text
stringlengths 4
1.02M
| meta
dict |
|---|---|
__author__ = 'Kamo Petrosyan'
from yml import YML
from offer import Offer
from yml_parser import YmlParser
|
{
"content_hash": "7f523754f0f1d0d44730610c54df3974",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 21.6,
"alnum_prop": 0.7685185185185185,
"repo_name": "Haikson/virtenviro",
"id": "c3bd4124df5adc6db66436f7181d29c794abea5e",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtenviro/shop/yml_import/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "48"
},
{
"name": "CSS",
"bytes": "237430"
},
{
"name": "HTML",
"bytes": "190190"
},
{
"name": "JavaScript",
"bytes": "204448"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "193113"
},
{
"name": "Ruby",
"bytes": "322"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
import gevent
import gevent.event
import zerorpc
from .testutils import teardown, random_ipc_endpoint
def test_pushpull_inheritance():
endpoint = random_ipc_endpoint()
pusher = zerorpc.Pusher()
pusher.bind(endpoint)
trigger = gevent.event.Event()
class Puller(zerorpc.Puller):
def lolita(self, a, b):
print('lolita', a, b)
assert a + b == 3
trigger.set()
puller = Puller()
puller.connect(endpoint)
gevent.spawn(puller.run)
trigger.clear()
pusher.lolita(1, 2)
trigger.wait()
print('done')
def test_pubsub_inheritance():
endpoint = random_ipc_endpoint()
publisher = zerorpc.Publisher()
publisher.bind(endpoint)
trigger = gevent.event.Event()
class Subscriber(zerorpc.Subscriber):
def lolita(self, a, b):
print('lolita', a, b)
assert a + b == 3
trigger.set()
subscriber = Subscriber()
subscriber.connect(endpoint)
gevent.spawn(subscriber.run)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in range(0, 10):
publisher.lolita(1, 2)
if trigger.wait(0.2):
print('done')
return
raise RuntimeError("The subscriber didn't receive any published message")
def test_pushpull_composite():
endpoint = random_ipc_endpoint()
trigger = gevent.event.Event()
class Puller(object):
def lolita(self, a, b):
print('lolita', a, b)
assert a + b == 3
trigger.set()
pusher = zerorpc.Pusher()
pusher.bind(endpoint)
service = Puller()
puller = zerorpc.Puller(service)
puller.connect(endpoint)
gevent.spawn(puller.run)
trigger.clear()
pusher.lolita(1, 2)
trigger.wait()
print('done')
def test_pubsub_composite():
endpoint = random_ipc_endpoint()
trigger = gevent.event.Event()
class Subscriber(object):
def lolita(self, a, b):
print('lolita', a, b)
assert a + b == 3
trigger.set()
publisher = zerorpc.Publisher()
publisher.bind(endpoint)
service = Subscriber()
subscriber = zerorpc.Subscriber(service)
subscriber.connect(endpoint)
gevent.spawn(subscriber.run)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in range(0, 10):
publisher.lolita(1, 2)
if trigger.wait(0.2):
print('done')
return
raise RuntimeError("The subscriber didn't receive any published message")
|
{
"content_hash": "4f4bb9f051a83397638b3dd0e01714af",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 25.25438596491228,
"alnum_prop": 0.6259117749218479,
"repo_name": "dotcloud/zerorpc-python",
"id": "a99f9b45090f8b2783a813a4d7818c946633d198",
"size": "4125",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_pubpush.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196837"
}
],
"symlink_target": ""
}
|
"""Resources to be indexed and searched over by the search module."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import datetime
import gettext
import HTMLParser
import logging
import operator
import os
import Queue
import re
import robotparser
import urllib
import urlparse
from xml.dom import minidom
import jinja2
import appengine_config
from common import jinja_utils
from modules.announcements import announcements
from google.appengine.api import search
from google.appengine.api import urlfetch
PROTOCOL_PREFIX = 'http://'
YOUTUBE_DATA_URL = 'https://gdata.youtube.com/feeds/api/videos/'
YOUTUBE_TIMED_TEXT_URL = 'https://youtube.com/api/timedtext'
# The limit (in seconds) for the time that elapses before a new transcript
# fragment should be started. A lower value results in more fine-grain indexing
# and more docs in the index.
YOUTUBE_CAPTION_SIZE_SECS = 30
class URLNotParseableException(Exception):
"""Exception thrown when the resource at a URL cannot be parsed."""
pass
class ResourceHTMLParser(HTMLParser.HTMLParser):
"""Custom parser for processing HTML files."""
IGNORED_TAGS = ['script', 'style']
def __init__(self, url):
HTMLParser.HTMLParser.__init__(self)
self.content_list = []
self._links = []
self._title = ''
self.tag_tracker = collections.Counter()
self.url = url
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == 'a' and 'href' in attrs_dict:
self._links.append(urlparse.urljoin(self.url, attrs_dict['href']))
self.tag_tracker[tag] += 1
def handle_endtag(self, tag):
if self.tag_tracker[tag] > 0:
self.tag_tracker[tag] -= 1
def handle_data(self, data):
"""Invoked every time the parser encounters the page's inner content."""
if self.tag_tracker['title']:
if self._title:
self._title += '\n%s' % data
else:
self._title = data
stripped_data = data.strip()
if (not any([self.tag_tracker[tag] for tag in self.IGNORED_TAGS]) and
stripped_data):
self.content_list.append(stripped_data)
def get_content(self):
return '\n'.join(self.content_list)
def get_links(self):
return self._links
def get_title(self):
return self._title
def get_parser_for_html(url, ignore_robots=False):
"""Returns a ResourceHTMLParser with the parsed data."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
parser = ResourceHTMLParser(url)
try:
result = urlfetch.fetch(url)
if (result.status_code in [200, 304] and
any(content_type in result.headers['Content-type'] for
content_type in ['text/html', 'xml'])):
if not isinstance(result.content, unicode):
result.content = result.content.decode('utf-8')
parser.feed(result.content)
else:
raise ValueError
except BaseException as e:
raise URLNotParseableException('Could not parse file at URL: %s\n%s' %
(url, e))
return parser
def get_minidom_from_xml(url, ignore_robots=False):
"""Returns a minidom representation of an XML file at url."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
try:
result = urlfetch.fetch(url)
except urlfetch.Error as e:
raise URLNotParseableException('Could not parse file at URL: %s. %s' %
(url, e))
if result.status_code not in [200, 304]:
raise URLNotParseableException('Bad status code (%s) for URL: %s' %
(result.status_code, url))
try:
if isinstance(result.content, unicode):
result.content = result.content.encode('utf-8')
xmldoc = minidom.parseString(result.content)
except BaseException as e:
raise URLNotParseableException(
'Error parsing XML document at URL: %s. %s' % (url, e))
return xmldoc
def _url_allows_robots(url):
"""Checks robots.txt for user agent * at URL."""
url = url.encode('utf-8')
try:
parts = urlparse.urlparse(url)
base = urlparse.urlunsplit((
parts.scheme, parts.netloc, '', None, None))
rp = robotparser.RobotFileParser(url=urlparse.urljoin(
base, '/robots.txt'))
rp.read()
except BaseException as e:
logging.info('Could not retreive robots.txt for URL: %s', url)
raise URLNotParseableException(e)
else:
return rp.can_fetch('*', url)
class Resource(object):
"""Abstract superclass for a resource."""
# Each subclass should define this constant
TYPE_NAME = 'Resource'
# Each subclass should use this constant to define the fields it needs
# returned with a search result.
RETURNED_FIELDS = []
# Each subclass should use this constant to define the fields it needs
# returned as snippets in the search result. In most cases, this should be
# one field.
SNIPPETED_FIELDS = []
# Each subclass should use this constant to define how many days should
# elapse before a resource should be re-indexed. This value should be
# nonnegative.
FRESHNESS_THRESHOLD_DAYS = 0
@classmethod
def generate_all(
cls, course, timestamps): # pylint: disable-msg=unused-argument
"""A generator returning objects of type cls in the course.
This generator should yield resources based on the last indexed time in
timestamps.
Args:
course: models.courses.course. the course to index.
timestamps: dict from doc_ids to last indexed datetimes.
Yields:
A sequence of Resource objects.
"""
# For the superclass, return a generator which immediately halts. All
# implementations in subclasses must also be generators for memory-
# management reasons.
return
yield # pylint: disable-msg=unreachable
@classmethod
def _get_doc_id(cls, *unused_vargs):
"""Subclasses should implement this with identifying fields as args."""
raise NotImplementedError
@classmethod
def _indexed_within_num_days(cls, timestamps, doc_id, num_days):
"""Determines whether doc_id was indexed in the last num_days days."""
try:
timestamp = timestamps[doc_id]
except (KeyError, TypeError):
return False
else:
delta = datetime.datetime.utcnow() - timestamp
return delta <= datetime.timedelta(num_days)
def get_document(self):
"""Return a search.Document to be indexed."""
raise NotImplementedError
def get_links(self):
"""External links to be indexed should be stored in self.links."""
return self.links if hasattr(self, 'links') else []
def get_unit_id(self):
return self.unit_id if hasattr(self, 'unit_id') else None
class Result(object):
"""The abstract superclass for a result returned by the search module."""
def get_html(self):
"""Return an HTML fragment to be used in the results page."""
raise NotImplementedError
@classmethod
def _generate_html_from_template(cls, template_name, template_value):
"""Generates marked-up HTML from template."""
template = jinja_utils.get_template(
template_name,
[os.path.join(appengine_config.BUNDLE_ROOT,
'modules', 'search', 'results_templates')])
return jinja2.Markup(template.render(template_value))
@classmethod
def _get_returned_field(cls, result, field):
"""Returns the value of a field in result, '' if none exists."""
try:
return result[field][0].value
except (KeyError, IndexError, AttributeError):
return ''
@classmethod
def _get_snippet(cls, result):
"""Returns the value of the snippet in result, '' if none exists."""
try:
return result.expressions[0].value
except (AttributeError, IndexError):
return ''
class LessonResource(Resource):
"""A lesson in a course."""
TYPE_NAME = 'Lesson'
RETURNED_FIELDS = ['title', 'unit_id', 'lesson_id', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 3
@classmethod
def generate_all(cls, course, timestamps):
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
doc_id = cls._get_doc_id(lesson.unit_id, lesson.lesson_id)
if (lesson.now_available and unit.now_available and
not cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield LessonResource(lesson)
except HTMLParser.HTMLParseError as e:
logging.info(
'Error parsing objectives for Lesson %s.%s: %s',
lesson.unit_id, lesson.lesson_id, e)
continue
@classmethod
def _get_doc_id(cls, unit_id, lesson_id):
return '%s_%s_%s' % (cls.TYPE_NAME, unit_id, lesson_id)
def __init__(self, lesson):
super(LessonResource, self).__init__()
self.unit_id = lesson.unit_id
self.lesson_id = lesson.lesson_id
self.title = lesson.title
if lesson.notes:
self.notes = urlparse.urljoin(PROTOCOL_PREFIX, lesson.notes)
else:
self.notes = ''
if lesson.objectives:
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(lesson.objectives)
self.content = parser.get_content()
self.links = parser.get_links()
else:
self.content = ''
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.unit_id, self.lesson_id),
fields=[
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=(
'unit?unit=%s&lesson=%s' %
(self.unit_id, self.lesson_id))),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class LessonResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(LessonResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes a lesson link.
lesson_string = gettext.gettext('Lesson')
template_value = {
'result_title': '%s - %s' % (self.title, lesson_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class ExternalLinkResource(Resource):
"""An external link from a course."""
TYPE_NAME = 'ExternalLink'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 15
# TODO(emichael): Allow the user to turn off external links in the dashboard
@classmethod
def generate_all_from_dist_dict(cls, link_dist, link_unit_id, timestamps):
"""Generate all external links from a map from URL to distance.
Args:
link_dist: dict. a map from URL to distance in the link graph from
the course.
link_unit_id: dict. A map from URL to the unit ID under which
the link is found.
timestamps: dict from doc_ids to last indexed datetimes. An empty
dict indicates that all documents should be generated.
Yields:
A sequence of ExternalLinkResource.
"""
url_queue = Queue.LifoQueue()
for url, dist in sorted(link_dist.iteritems(),
key=operator.itemgetter(1)):
url_queue.put(url)
while not url_queue.empty():
url = url_queue.get()
doc_id = cls._get_doc_id(url)
if (cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
continue
dist = link_dist[url]
unit_id = link_unit_id.get(url)
if dist > 1:
break
try:
resource = ExternalLinkResource(url, unit_id)
except URLNotParseableException as e:
logging.info(e)
else:
if dist < 1:
for new_link in resource.get_links():
if new_link not in link_dist:
link_dist[new_link] = dist + 1
url_queue.put(new_link)
link_unit_id[new_link] = unit_id
yield resource
def __init__(self, url, unit_id):
# distance is the distance from the course material in the link graph,
# where a lesson notes page has a distance of 0
super(ExternalLinkResource, self).__init__()
self.url = url
self.unit_id = unit_id
parser = get_parser_for_html(url)
self.content = parser.get_content()
self.title = parser.get_title()
self.links = parser.get_links()
@classmethod
def _get_doc_id(cls, url):
return '%s_%s' % (cls.TYPE_NAME, url)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.url),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=self.url),
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class ExternalLinkResult(Result):
"""An object for an external link in the search results."""
def __init__(self, search_result):
super(ExternalLinkResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class YouTubeFragmentResource(Resource):
"""An object for a YouTube transcript fragment in search results."""
TYPE_NAME = 'YouTubeFragment'
RETURNED_FIELDS = ['title', 'video_id', 'start', 'thumbnail_url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 30
@classmethod
def generate_all(cls, course, timestamps):
"""Generate all YouTubeFragments for a course."""
# TODO(emichael): Handle the existence of a single video in multiple
# places in a course.
youtube_ct_regex = r"""<[ ]*gcb-youtube[^>]+videoid=['"]([^'"]+)['"]"""
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
if not (lesson.now_available and unit.now_available):
continue
lesson_url = 'unit?unit=%s&lesson=%s' % (
lesson.unit_id, lesson.lesson_id)
if lesson.video and not cls._indexed_within_num_days(
timestamps, lesson.video, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.unit_id, lesson.video, lesson_url):
yield fragment
match = re.search(youtube_ct_regex, lesson.objectives)
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.unit_id, video_id, lesson_url):
yield fragment
if announcements.custom_module.enabled:
for entity in announcements.AnnouncementEntity.get_announcements():
if entity.is_draft:
continue
announcement_url = 'announcements#%s' % entity.key()
match = re.search(youtube_ct_regex, entity.html)
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id,
cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
None, video_id, announcement_url):
yield fragment
@classmethod
def _indexed_within_num_days(cls, timestamps, video_id, num_days):
for doc_id in timestamps:
if doc_id.startswith(cls._get_doc_id(video_id, '')):
return super(
YouTubeFragmentResource, cls)._indexed_within_num_days(
timestamps, doc_id, num_days)
return False
@classmethod
def _get_fragments_for_video(cls, unit_id, video_id, url_in_course):
"""Get all of the transcript fragment docs for a specific video."""
try:
(transcript, title, thumbnail_url) = cls._get_video_data(video_id)
except BaseException as e:
logging.info('Could not parse YouTube video with id %s.\n%s',
video_id, e)
return []
# Aggregate the fragments into YOUTUBE_CAPTION_SIZE_SECS time chunks
fragments = transcript.getElementsByTagName('text')
aggregated_fragments = []
# This parser is only used for unescaping HTML entities
parser = HTMLParser.HTMLParser()
while fragments:
current_start = float(fragments[0].attributes['start'].value)
current_text = []
while (fragments and
float(fragments[0].attributes['start'].value) -
current_start < YOUTUBE_CAPTION_SIZE_SECS):
current_text.append(parser.unescape(
fragments.pop(0).firstChild.nodeValue))
aggregated_fragment = YouTubeFragmentResource(
video_id, unit_id, url_in_course, current_start,
'\n'.join(current_text), title, thumbnail_url)
aggregated_fragments.append(aggregated_fragment)
return aggregated_fragments
@classmethod
def _get_video_data(cls, video_id):
"""Returns (track_minidom, title, thumbnail_url) for a video."""
try:
vid_info = get_minidom_from_xml(
urlparse.urljoin(YOUTUBE_DATA_URL, video_id),
ignore_robots=True)
title = vid_info.getElementsByTagName(
'title')[0].firstChild.nodeValue
thumbnail_url = vid_info.getElementsByTagName(
'media:thumbnail')[0].attributes['url'].value
except (URLNotParseableException, IOError,
IndexError, AttributeError) as e:
logging.error('Could not parse video info for video id %s.\n%s',
video_id, e)
title = ''
thumbnail_url = ''
# TODO(emichael): Handle the existence of multiple tracks
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL,
'?v=%s&type=list' % video_id)
tracklist = get_minidom_from_xml(url, ignore_robots=True)
tracks = tracklist.getElementsByTagName('track')
if not tracks:
raise URLNotParseableException('No tracks for video %s' % video_id)
track_name = tracks[0].attributes['name'].value
track_lang = tracks[0].attributes['lang_code'].value
track_id = tracks[0].attributes['id'].value
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL, urllib.quote(
'?v=%s&lang=%s&name=%s&id=%s' %
(video_id, track_lang, track_name, track_id), '?/=&'))
transcript = get_minidom_from_xml(url, ignore_robots=True)
return (transcript, title, thumbnail_url)
@classmethod
def _get_doc_id(cls, video_id, start_time):
return '%s_%s_%s' % (cls.TYPE_NAME, video_id, start_time)
def __init__(self, video_id, unit_id, url, start, text, video_title,
thumbnail_url):
super(YouTubeFragmentResource, self).__init__()
self.url = url
self.video_id = video_id
self.unit_id = unit_id
self.start = start
self.text = text
self.video_title = video_title
self.thumbnail_url = thumbnail_url
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.video_id, self.start),
fields=[
search.TextField(name='title', value=self.video_title),
search.TextField(name='video_id', value=self.video_id),
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='content', value=self.text),
search.NumberField(name='start', value=self.start),
search.TextField(name='thumbnail_url',
value=self.thumbnail_url),
search.TextField(name='url', value=self.url),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class YouTubeFragmentResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(YouTubeFragmentResult, self).__init__()
self.doc_id = search_result.doc_id
self.title = self._get_returned_field(search_result, 'title')
self.video_id = self._get_returned_field(search_result, 'video_id')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.start = self._get_returned_field(search_result, 'start')
self.thumbnail_url = self._get_returned_field(search_result,
'thumbnail_url')
self.url = self._get_returned_field(search_result, 'url')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'video_id': self.video_id,
'start_time': self.start,
'thumbnail_url': self.thumbnail_url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('youtube.html', template_value)
class AnnouncementResource(Resource):
"""An announcement in a course."""
TYPE_NAME = 'Announcement'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 1
@classmethod
def generate_all(cls, course, timestamps):
if announcements.custom_module.enabled:
for entity in announcements.AnnouncementEntity.get_announcements():
doc_id = cls._get_doc_id(entity.key())
if not(entity.is_draft or cls._indexed_within_num_days(
timestamps, doc_id, cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield AnnouncementResource(entity)
except HTMLParser.HTMLParseError as e:
logging.info('Error parsing Announcement %s: %s',
entity.title, e)
continue
def __init__(self, announcement):
super(AnnouncementResource, self).__init__()
self.title = announcement.title
self.key = announcement.key()
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(announcement.html)
self.content = parser.get_content()
@classmethod
def _get_doc_id(cls, key):
return '%s_%s' % (cls.TYPE_NAME, key)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.key),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url',
value='announcements#%s' % self.key),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class AnnouncementResult(Result):
"""An object for an announcement in search results."""
def __init__(self, search_result):
super(AnnouncementResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = None # Announcements are definitionally not in units.
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes an announcement link.
announcement_string = gettext.gettext('Announcement')
template_value = {
'result_title': '%s - %s' % (self.title, announcement_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
# Register new resource types here
RESOURCE_TYPES = [
(LessonResource, LessonResult),
(ExternalLinkResource, ExternalLinkResult),
(YouTubeFragmentResource, YouTubeFragmentResult),
(AnnouncementResource, AnnouncementResult)
]
def get_returned_fields():
"""Returns a list of fields that should be returned in a search result."""
returned_fields = set(['type'])
for resource_type, unused_result_type in RESOURCE_TYPES:
returned_fields |= set(resource_type.RETURNED_FIELDS)
return list(returned_fields)
def get_snippeted_fields():
"""Returns a list of fields that should be snippeted in a search result."""
snippeted_fields = set()
for resource_type, unused_result_type in RESOURCE_TYPES:
snippeted_fields |= set(resource_type.SNIPPETED_FIELDS)
return list(snippeted_fields)
def generate_all_documents(course, timestamps):
"""A generator for all docs for a given course.
Args:
course: models.courses.Course. the course to be indexed.
timestamps: dict from doc_ids to last indexed datetimes. An empty dict
indicates that all documents should be generated.
Yields:
A sequence of search.Document. If a document is within the freshness
threshold, no document will be generated. This function does not modify
timestamps.
"""
link_dist = {}
link_unit_id = {}
for resource_type, unused_result_type in RESOURCE_TYPES:
for resource in resource_type.generate_all(course, timestamps):
unit_id = resource.get_unit_id()
if isinstance(resource, LessonResource) and resource.notes:
link_dist[resource.notes] = 0
link_unit_id[resource.notes] = unit_id
for link in resource.get_links():
link_dist[link] = 1
link_unit_id[resource.notes] = unit_id
yield resource.get_document()
for resource in ExternalLinkResource.generate_all_from_dist_dict(
link_dist, link_unit_id, timestamps):
yield resource.get_document()
def process_results(results):
"""Generate result objects for the results of a query."""
result_types = {resource_type.TYPE_NAME: result_type
for (resource_type, result_type) in RESOURCE_TYPES}
processed_results = []
for result in results:
try:
result_type = result_types[result['type'][0].value]
processed_results.append(result_type(result))
except (AttributeError, IndexError, KeyError) as e:
# If there is no type information, we cannot process the result
logging.error("%s. Couldn't process result", e)
return processed_results
|
{
"content_hash": "70bc9c1d17a5083a7c7b21465a36958d",
"timestamp": "",
"source": "github",
"line_count": 782,
"max_line_length": 80,
"avg_line_length": 38.148337595907925,
"alnum_prop": 0.5877581120943953,
"repo_name": "danieldanciu/schoggi",
"id": "364e0f405836848aa64593a7ba883c0b331ba3da",
"size": "30430",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modules/search/resources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "341225"
},
{
"name": "HTML",
"bytes": "3369532"
},
{
"name": "Java",
"bytes": "705"
},
{
"name": "JavaScript",
"bytes": "1660297"
},
{
"name": "Python",
"bytes": "3319431"
},
{
"name": "Shell",
"bytes": "19316"
}
],
"symlink_target": ""
}
|
"""Defines a number of errors that can occur."""
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class MissingAuthError(Error):
"""No token was supplied."""
def __init__(self, message='No Recorded Future API key or '
'authentication method was provided.', *args):
"""Init the error with the query"""
Error.__init__(self, message % (args))
class RemoteServerError(Error):
"""Thrown when the server encounters errors."""
pass
class InvalidRFQError(Error):
"""Thrown when RFQ is bad"""
def __init__(self, message, query):
"""Init the error with the query"""
Error.__init__(self, message)
self.query = query
class HttpError(Error):
"""Thrown when http call fails"""
def __init__(self, message, response):
"""Init the error with the request module response object"""
Error.__init__(self, message)
self.response = response
@property
def content(self):
return self.response.content
@property
def status_code(self):
return self.response.status_code
class JsonParseError(HttpError):
"""Thrown when the client cannot parse the content as json."""
pass
class AuthenticationError(HttpError):
"""Thrown when the client on 401 error."""
pass
|
{
"content_hash": "dbd05d4f2c0f981edd3d581b938fdb7f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 68,
"avg_line_length": 23.614035087719298,
"alnum_prop": 0.6300148588410104,
"repo_name": "recordedfuture/rfapi-python",
"id": "21fe366d96fcd0bab2a7cd3c4df020b2a455766d",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rfapi/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91749"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import seq2seq
import utils
import numpy as np
class LSTM_model(object):
def __init__(self, config=None, mode=None):
self.config = config
self.mode = mode
self.build_graph()
self.load_validation()
def load_validation(self):
data_reader = utils.DataReader(data_filename="input_seqs_validation", batch_size=16)
inputs_seqs_batch, outputs_batch = data_reader.read(False, 1)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
sess = tf.Session()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
self.validation_inputs = []
self.validation_targets = []
try:
while not coord.should_stop():
input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
self.validation_inputs.append(input_data)
self.validation_targets.append(targets)
except tf.errors.OutOfRangeError:
pass
finally:
coord.request_stop()
coord.join(threads)
sess.close()
self.validation_inputs = np.array(self.validation_inputs).reshape([-1, self.config.input_length])
self.validation_targets = np.array(self.validation_targets).reshape([-1, 1])
def build_graph(self):
config = self.config
self.reader = utils.DataReader(seq_len=config.seq_length, batch_size=config.batch_size, data_filename=config.data_filename)
self.cell = rnn_cell.BasicLSTMCell(config.rnn_size, state_is_tuple=True)
self.input_data = tf.placeholder(tf.int32, [None, config.input_length])
self.targets = tf.placeholder(tf.int32, [None, 1])
self.initial_state = self.cell.zero_state(tf.shape(self.targets)[0], tf.float32)
with tf.variable_scope("input_embedding"):
embedding = tf.get_variable("embedding", [config.vocab_size, config.rnn_size])
inputs = tf.split(1, config.input_length, tf.nn.embedding_lookup(embedding, self.input_data))
inputs = [tf.squeeze(input, [1]) for input in inputs]
with tf.variable_scope("send_to_rnn"):
state = self.initial_state
output = None
for i, input in enumerate(inputs):
if i > 0:
tf.get_variable_scope().reuse_variables()
output, state = self.cell(input, state)
with tf.variable_scope("softmax"):
softmax_w = tf.get_variable("softmax_w", [config.rnn_size, config.vocab_size])
softmax_b = tf.get_variable("softmax_b", [config.vocab_size])
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
self.output = tf.cast(tf.reshape(tf.arg_max(self.probs, 1), [-1, 1]), tf.int32)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.output, self.targets), tf.float32))
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([config.batch_size])],
config.vocab_size)
self.cost = tf.reduce_mean(loss)
self.final_state = state
# self.lr = tf.Variable(0.001, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
config.grad_clip)
optimizer = tf.train.AdamOptimizer()#self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.summary_accuracy = tf.scalar_summary('accuracy', self.accuracy)
tf.scalar_summary('cost', self.cost)
self.summary_all = tf.merge_all_summaries()
|
{
"content_hash": "a01b017ab6ed423a0bf0ccb7f3b8a321",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 127,
"avg_line_length": 40.61538461538461,
"alnum_prop": 0.6466450216450217,
"repo_name": "jxwufan/AssociativeRetrieval",
"id": "d58ca31cc95be7c5982f8de12df8e7796ddcf0ac",
"size": "3696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LSTM_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24808"
}
],
"symlink_target": ""
}
|
import collections.abc
from functools import partial
from urllib.parse import urlencode
from geopy.exc import ConfigurationError, GeocoderQueryError
from geopy.geocoders.base import _DEFAULT_USER_AGENT, DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("Nominatim", )
_DEFAULT_NOMINATIM_DOMAIN = 'nominatim.openstreetmap.org'
_REJECTED_USER_AGENTS = (
# Various sample user-agent strings mentioned in docs:
"my-application",
"my_app/1",
"my_user_agent/1.0",
"specify_your_app_name_here",
_DEFAULT_USER_AGENT,
)
class Nominatim(Geocoder):
"""Nominatim geocoder for OpenStreetMap data.
Documentation at:
https://nominatim.org/release-docs/develop/api/Overview/
.. attention::
Using Nominatim with the default `user_agent` is strongly discouraged,
as it violates Nominatim's Usage Policy
https://operations.osmfoundation.org/policies/nominatim/
and may possibly cause 403 and 429 HTTP errors. Please make sure
to specify a custom `user_agent` with
``Nominatim(user_agent="my-application")`` or by
overriding the default `user_agent`:
``geopy.geocoders.options.default_user_agent = "my-application"``.
An exception will be thrown if a custom `user_agent` is not specified.
"""
structured_query_params = {
'street',
'city',
'county',
'state',
'country',
'postalcode',
}
geocode_path = '/search'
reverse_path = '/reverse'
def __init__(
self,
*,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
domain=_DEFAULT_NOMINATIM_DOMAIN,
scheme=None,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
# Make sure to synchronize the changes of this signature in the
# inheriting classes (e.g. PickPoint).
):
"""
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str domain: Domain where the target Nominatim service
is hosted.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.domain = domain.strip('/')
if (self.domain == _DEFAULT_NOMINATIM_DOMAIN
and self.headers['User-Agent'] in _REJECTED_USER_AGENTS):
raise ConfigurationError(
'Using Nominatim with default or sample `user_agent` "%s" is '
'strongly discouraged, as it violates Nominatim\'s ToS '
'https://operations.osmfoundation.org/policies/nominatim/ '
'and may possibly cause 403 and 429 HTTP errors. '
'Please specify a custom `user_agent` with '
'`Nominatim(user_agent="my-application")` or by '
'overriding the default `user_agent`: '
'`geopy.geocoders.options.default_user_agent = "my-application"`.'
% self.headers['User-Agent']
)
self.api = "%s://%s%s" % (self.scheme, self.domain, self.geocode_path)
self.reverse_api = "%s://%s%s" % (self.scheme, self.domain, self.reverse_path)
def _construct_url(self, base_api, params):
"""
Construct geocoding request url.
The method can be overridden in Nominatim-based geocoders in order
to extend URL parameters.
:param str base_api: Geocoding function base address - self.api
or self.reverse_api.
:param dict params: Geocoding params.
:return: string URL.
"""
return "?".join((base_api, urlencode(params)))
def geocode(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
limit=None,
addressdetails=False,
language=False,
geometry=None,
extratags=False,
country_codes=None,
viewbox=None,
bounded=False,
featuretype=None,
namedetails=False
):
"""
Return a location point by address.
:param query: The address, query or a structured query
you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `street`, `city`, `county`, `state`, `country`, or
`postalcode`. For more information, see Nominatim's
documentation for `structured requests`:
https://nominatim.org/release-docs/develop/api/Search
:type query: dict or str
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param int limit: Maximum amount of results to return from Nominatim.
Unless exactly_one is set to False, limit will always be 1.
:param bool addressdetails: If you want in *Location.raw* to include
address details such as house_number, city_district, postcode, etc
(in a structured form) set it to True
:param str language: Preferred language in which to return results.
Either uses standard
`RFC2616 <http://www.ietf.org/rfc/rfc2616.txt>`_
accept-language string or a simple comma-separated
list of language codes.
:param str geometry: If present, specifies whether the geocoding
service should return the result's geometry in `wkt`, `svg`,
`kml`, or `geojson` formats. This is available via the
`raw` attribute on the returned :class:`geopy.location.Location`
object.
:param bool extratags: Include additional information in the result if available,
e.g. wikipedia link, opening hours.
:param country_codes: Limit search results
to a specific country (or a list of countries).
A country_code should be the ISO 3166-1alpha2 code,
e.g. ``gb`` for the United Kingdom, ``de`` for Germany, etc.
:type country_codes: str or list
:type viewbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param viewbox: Prefer this area to find search results. By default this is
treated as a hint, if you want to restrict results to this area,
specify ``bounded=True`` as well.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:param bool bounded: Restrict the results to only items contained
within the bounding ``viewbox``.
:param str featuretype: If present, restrict results to certain type of features.
Allowed values: `country`, `state`, `city`, `settlement`.
:param bool namedetails: If you want in *Location.raw* to include
namedetails, set it to True. This will be a list of alternative names,
including language variants, etc.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, collections.abc.Mapping):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
else:
params = {'q': query}
params.update({
'format': 'json'
})
if exactly_one:
params['limit'] = 1
elif limit is not None:
limit = int(limit)
if limit < 1:
raise ValueError("Limit cannot be less than 1")
params['limit'] = limit
if viewbox:
params['viewbox'] = self._format_bounding_box(
viewbox, "%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s")
if bounded:
params['bounded'] = 1
if not country_codes:
country_codes = []
if isinstance(country_codes, str):
country_codes = [country_codes]
if country_codes:
params['countrycodes'] = ",".join(country_codes)
if addressdetails:
params['addressdetails'] = 1
if namedetails:
params['namedetails'] = 1
if language:
params['accept-language'] = language
if extratags:
params['extratags'] = True
if geometry is not None:
geometry = geometry.lower()
if geometry == 'wkt':
params['polygon_text'] = 1
elif geometry == 'svg':
params['polygon_svg'] = 1
elif geometry == 'kml':
params['polygon_kml'] = 1
elif geometry == 'geojson':
params['polygon_geojson'] = 1
else:
raise GeocoderQueryError(
"Invalid geometry format. Must be one of: "
"wkt, svg, kml, geojson."
)
if featuretype:
params['featuretype'] = featuretype
url = self._construct_url(self.api, params)
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def reverse(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
language=False,
addressdetails=True,
zoom=None,
namedetails=False,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str language: Preferred language in which to return results.
Either uses standard
`RFC2616 <http://www.ietf.org/rfc/rfc2616.txt>`_
accept-language string or a simple comma-separated
list of language codes.
:param bool addressdetails: Whether or not to include address details,
such as city, county, state, etc. in *Location.raw*
:param int zoom: Level of detail required for the address,
an integer in range from 0 (country level) to 18 (building level),
default is 18.
:param bool namedetails: If you want in *Location.raw* to include
namedetails, set it to True. This will be a list of alternative names,
including language variants, etc.
.. versionadded:: 2.3
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
try:
lat, lon = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'lat': lat,
'lon': lon,
'format': 'json',
}
if language:
params['accept-language'] = language
params['addressdetails'] = 1 if addressdetails else 0
if zoom is not None:
params['zoom'] = zoom
if namedetails:
params['namedetails'] = 1
url = self._construct_url(self.reverse_api, params)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_code(self, place):
# Parse each resource.
latitude = place.get('lat', None)
longitude = place.get('lon', None)
placename = place.get('display_name', None)
if latitude is not None and longitude is not None:
latitude = float(latitude)
longitude = float(longitude)
return Location(placename, (latitude, longitude), place)
def _parse_json(self, places, exactly_one):
if not places:
return None
if isinstance(places, collections.abc.Mapping) and 'error' in places:
if places['error'] == 'Unable to geocode':
# no results in reverse
return None
else:
raise GeocoderQueryError(places['error'])
if not isinstance(places, collections.abc.Sequence):
places = [places]
if exactly_one:
return self._parse_code(places[0])
else:
return [self._parse_code(place) for place in places]
|
{
"content_hash": "3b8b3f7e3f497829bc6c6914e13f53fb",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 89,
"avg_line_length": 35.5825,
"alnum_prop": 0.5783039415442984,
"repo_name": "geopy/geopy",
"id": "9cdb452a9f9f305c5bfc81837a2495d696ab4662",
"size": "14233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geopy/geocoders/nominatim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "Python",
"bytes": "565428"
}
],
"symlink_target": ""
}
|
from django.db.models import Q
from django.db.models import Sum
from rest_framework.serializers import JSONField
from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import SerializerMethodField
from kolibri.auth.models import Classroom
from kolibri.core.exams.models import Exam
from kolibri.core.lessons.models import Lesson
from kolibri.logger.models import ContentSummaryLog
from kolibri.logger.models import ExamLog
class ExamProgressSerializer(ModelSerializer):
"""
Annotates an Exam with progress information based on logs generated
by the requesting User
"""
class Meta:
model = Exam
fields = (
'active',
'id',
'progress',
'question_count',
'title',
)
progress = SerializerMethodField()
# Mostly copied from UserExamSerializer.to_representation, but working directly
# from Exam Model instead of ExamAssignment
def get_progress(self, instance):
try:
examlogs = instance.examlogs.get(user=self.context['user'])
return {
'score': examlogs.attemptlogs.aggregate(Sum('correct')).get('correct__sum'),
'answer_count': examlogs.attemptlogs.count(),
'closed': examlogs.closed,
}
except ExamLog.DoesNotExist:
return {
'score': None,
'answer_count': None,
'closed': None,
}
class LessonProgressSerializer(ModelSerializer):
"""
Annotates a Lesson with progress information based on logs generated
by the requesting User
"""
progress = SerializerMethodField()
resources = JSONField(default='[]')
class Meta:
model = Lesson
fields = (
'description',
'id',
'is_active',
'title',
'progress',
'resources',
)
def get_progress(self, instance):
content_ids = [resource['content_id'] for resource in instance.resources]
num_completed_logs = ContentSummaryLog.objects \
.exclude(completion_timestamp__isnull=True) \
.filter(
user=self.context['user'],
content_id__in=content_ids
) \
.count()
return {
'resources_completed': num_completed_logs,
'total_resources': len(instance.resources),
}
class LearnerClassroomSerializer(ModelSerializer):
assignments = SerializerMethodField()
class Meta:
model = Classroom
fields = (
'id',
'name',
'assignments',
)
def get_assignments(self, instance):
"""
Returns all Exams and Lessons (and progress) assigned to the requesting User
"""
current_user = self.context['request'].user
memberships = current_user.memberships.all()
learner_groups = [m.collection for m in memberships]
# Return only active Lessons that are assigned to the requesting user's groups
# TODO move this to a permission_class on Lesson
filtered_lessons = Lesson.objects.filter(
lesson_assignments__collection__in=learner_groups,
collection=instance,
is_active=True,
).distinct()
filtered_exams = Exam.objects.filter(
assignments__collection__in=learner_groups,
collection=instance,
).filter(Q(active=True) | Q(examlogs__user=current_user)).distinct()
return {
'lessons': LessonProgressSerializer(
filtered_lessons,
many=True,
context={'user': current_user},
).data,
'exams': ExamProgressSerializer(
filtered_exams,
many=True,
context={'user': current_user},
).data,
}
|
{
"content_hash": "e14239c8e2c1191b44f5eb846cbc949b",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 92,
"avg_line_length": 31.64,
"alnum_prop": 0.5876106194690266,
"repo_name": "christianmemije/kolibri",
"id": "f392f27582a51c8605e6ab391d7f6c41253f941a",
"size": "3955",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kolibri/plugins/learn/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30170"
},
{
"name": "HTML",
"bytes": "12565"
},
{
"name": "JavaScript",
"bytes": "762092"
},
{
"name": "Makefile",
"bytes": "7540"
},
{
"name": "Python",
"bytes": "1056980"
},
{
"name": "Shell",
"bytes": "10465"
},
{
"name": "Vue",
"bytes": "764872"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-waffle',
version='0.9.1',
description='A feature flipper for Django.',
long_description=open('README.rst').read(),
author='James Socol',
author_email='james.socol@gmail.com',
url='http://github.com/jsocol/django-waffle',
license='BSD',
packages=find_packages(exclude=['test_app']),
include_package_data=True,
package_data={'': ['README.rst']},
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
{
"content_hash": "3bc1c09922e3b1723e6dd0e115e9b126",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 33.34615384615385,
"alnum_prop": 0.6205305651672434,
"repo_name": "11craft/django-waffle",
"id": "05c1ee52cde38b5f1186f5965f59ddf1a83d50d5",
"size": "867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "941"
},
{
"name": "Python",
"bytes": "109418"
}
],
"symlink_target": ""
}
|
"""This file is free software under the GPLv3 license"""
import sys
import os
import subprocess
import shutil
from urllib2 import urlopen
import json
from cStringIO import StringIO
import zipfile
from collections import namedtuple
import renpy
from renpy.audio.music import stop as _stop_music
import renpy.game
from renpy.ui import Action
from renpy.exports import show_screen
from modloader.modinfo import get_mods
from modloader import get_mod_path, workshop_enabled
if workshop_enabled:
from steam_workshop.steam_config import has_valid_signature
import steam_workshop.steamhandler as steamhandler
BRANCHES_API = "https://api.github.com/repos/AWSW-Modding/AWSW-Modtools/branches"
ZIP_LOCATION = "https://github.com/AWSW-Modding/AWSW-Modtools/archive/{mod_name}.zip"
#steammgr = steamhandler.get_instance()
def cache(function):
def inner():
if not hasattr(function, "results"):
function.results = function()
return function.results
return inner
def show_message(message, bg="#3485e7", fg="#fff", stop_music=True):
if stop_music:
_stop_music()
for i in renpy.config.layers:
renpy.game.context().scene_lists.clear(i)
show_screen("message", message, bg, fg, _layer="screens")
def report_exception(overview, error_str):
if workshop_enabled:
print "Reporting exception"
steammgr = steamhandler.get_instance()
if steammgr.InitSuccess:
exception_str = "{}\n{}".format(overview, error_str)
#steammgr.HandleException(exception_str)
def remove_mod(mod_name, filename):
"""Remove a mod from the game and reload.
Args:
mod_name (str): The internal name of the mod to be removed
"""
show_message("Removing mod {}...".format(mod_name))
if filename is False:
mod_class = get_mods()[mod_name]
mod_folder = mod_class.__module__
elif filename is True:
mod_folder = mod_name
else:
mod_folder = filename
if mod_folder.isdigit():
steammgr = steamhandler.get_instance()
steammgr.Unsubscribe(int(mod_folder))
shutil.rmtree(os.path.join(os.path.normpath(renpy.config.gamedir), "mods", mod_folder))
print "Sucessfully removed {}, reloading".format(mod_name)
sys.stdout.flush()
show_message("Reloading game...")
_stop_music("modmenu_music")
renpy.exports.reload_script()
@cache
def github_downloadable_mods():
url_f = urlopen(BRANCHES_API)
branches = json.load(url_f)
url_f.close()
data = []
for branch in branches:
name = branch["name"]
if name.startswith("mod-"):
data.append([
ZIP_LOCATION.format(mod_name=name),
name.replace("mod-", "", 1).encode("utf-8"),
"DummyAuthor",
"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?",
"http://s-media-cache-ak0.pinimg.com/originals/42/41/90/424190c7f88c514a1c26a79572d61191.png"
])
return sorted(data, key=lambda mod: mod[1].lower())
@cache
def steam_downloadable_mods():
# A different format,
# (id, mod_name, author, desc, image_url)
mods = []
for mod in sorted(steamhandler.get_instance().GetAllItems(), key=lambda mod: mod[1]):
file_id = mod[0]
create_time, modify_time, signature = mod[5:8]
is_valid, verified = has_valid_signature(file_id, create_time, modify_time, signature)
if is_valid:
mods.append(list(mod[:5]))
mods[-1][3] += "\n\nVerified by {}".format(verified.username.replace("<postmaster@example.com>", ""))
else:
print "NOT VALID SIG", mod
return mods
def download_github_mod(download_link, name, show_download=True, reload_script=True):
if show_download:
show_message("Downloading {}".format(name))
mod_folder = os.path.join(get_mod_path(), name)
if os.path.exists(mod_folder):
shutil.rmtree(mod_folder, ignore_errors=True)
request = urlopen(download_link)
zip_f = zipfile.ZipFile(StringIO(request.read()))
zip_f.extractall(get_mod_path())
root = zip_f.namelist()[0]
os.rename(os.path.join(get_mod_path(), root),
mod_folder)
if reload_script:
show_message("Reloading Game...")
restart_python()
def download_steam_mod(id, name, reload_script=True):
steammgr = steamhandler.get_instance()
# (id, mod_name, author, desc, image_url)
for i in renpy.config.layers:
renpy.game.context().scene_lists.clear(i)
show_screen("_modloader_download_screen", id, _layer="screens")
def cb(item, success):
# Copy the folder
src = item[0].filepath
dest = os.path.join(os.getcwd(), "game", "mods", str(item[0].itemID))
shutil.copytree(src, dest)
steammgr.unregister_callback(steamhandler.PyCallback.Download, cb)
if reload_script:
restart_python()
steammgr.register_callback(steamhandler.PyCallback.Download, cb)
steammgr.Subscribe(id)
class UpdateModtools(Action):
def __init__(self):
pass
def __call__(self):
update_modtools("https://github.com/AWSW-Modding/AWSW-Modtools/archive/develop.zip")
def update_modtools(download_link):
print "Updating modtools..."
print "Saving new version..."
request = urlopen(download_link)
with open(os.path.join(renpy.config.gamedir, "modtools-update.zip"), "wb") as zip_f:
zip_f.write(request.read())
request.close()
with open(os.path.join(renpy.config.gamedir, "modloader", "modtools_files.json")) as json_f:
modtools_files = json.load(json_f)
for rel_path in modtools_files[0]:
fullpath = os.path.join(renpy.config.gamedir, rel_path)
if os.path.exists(fullpath):
if os.path.isdir(fullpath):
shutil.rmtree(fullpath)
else:
os.remove(fullpath)
print "Writing bootloader..."
zip_f = zipfile.ZipFile(os.path.join(renpy.config.gamedir, "modtools-updater.rpe"), 'w', zipfile.ZIP_DEFLATED)
zip_f.write(os.path.join(renpy.config.gamedir, "modloader", "modtools_update_script.py"), "autorun.py")
zip_f.close()
restart_python()
def restart_python():
print "Restarting..."
if sys.platform.startswith('win'):
subprocess.Popen([sys.executable, "-O", sys.argv[0]],
creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
with open("stdout.txt", "wb") as out, open("stderr.txt", "wb") as err:
subprocess.Popen([sys.executable, "-O", sys.argv[0]],
preexec_fn=os.setpgrp,
stdout=out,
stderr=err)
print "Exiting"
os._exit(0)
def report_duplicate_labels():
renpy.parser.parse_errors = renpy.game.script.duplicate_labels
if renpy.parser.report_parse_errors():
raise SystemExit(-1)
try:
import ssl
except ImportError:
start_callbacks = renpy.python.store_dicts["store"]["config"].start_callbacks
installing_mods = next((func for func in start_callbacks if func.__name__ == "steam_callback"), None)
if not installing_mods:
# If there are download callbacks, we're in the middle of updating
from modloader.fix_ssl import fix_ssl
fix_ssl()
restart_python()
|
{
"content_hash": "cac7752f6b606be95f4954ca532c9d9c",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 884,
"avg_line_length": 37.72811059907834,
"alnum_prop": 0.6589715402467327,
"repo_name": "AWSW-Modding/AWSW-Modtools",
"id": "8607c41bce7a598c889c1b7c18b0b68196c231ae",
"size": "8187",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "modloader/modconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66794"
},
{
"name": "Ren'Py",
"bytes": "55716"
}
],
"symlink_target": ""
}
|
import argparse
import os
import subprocess
import sys
import tempfile
import zipfile
current_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(current_path, "..", "..", "..", "tools"))
# pylint: disable=F0401
import find_depot_tools
if not sys.platform.startswith("linux"):
print "Not supported for your platform"
sys.exit(0)
prebuilt_file_path = os.path.join(current_path, "prebuilt")
stamp_path = os.path.join(prebuilt_file_path, "VERSION")
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
gsutil_exe = os.path.join(depot_tools_path, "third_party", "gsutil", "gsutil")
def download():
version_path = os.path.join(current_path, "../VERSION")
with open(version_path) as version_file:
version = version_file.read().strip()
try:
with open(stamp_path) as stamp_file:
current_version = stamp_file.read().strip()
if current_version == version:
return 0 # Already have the right version.
except IOError:
pass # If the stamp file does not exist we need to download a new binary.
platform = "linux-x64" # TODO: configurate
basename = platform + ".zip"
gs_path = "gs://mojo/shell/" + version + "/" + basename
with tempfile.NamedTemporaryFile() as temp_zip_file:
# We're downloading from a public bucket which does not need authentication,
# but the user might have busted credential files somewhere such as ~/.boto
# that the gsutil script will try (and fail) to use. Setting these
# environment variables convinces gsutil not to attempt to use these, but
# also generates a useless warning about failing to load the file. We want
# to discard this warning but still preserve all output in the case of an
# actual failure. So, we run the script and capture all output and then
# throw the output away if the script succeeds (return code 0).
env = os.environ.copy()
env["AWS_CREDENTIAL_FILE"] = ""
env["BOTO_CONFIG"] = ""
try:
subprocess.check_output(
[gsutil_exe,
"--bypass_prodaccess",
"cp",
gs_path,
temp_zip_file.name],
stderr=subprocess.STDOUT,
env=env)
except subprocess.CalledProcessError as e:
print e.output
sys.exit(1)
with zipfile.ZipFile(temp_zip_file.name) as z:
zi = z.getinfo("mojo_shell")
mode = zi.external_attr >> 16
z.extract(zi, prebuilt_file_path)
os.chmod(os.path.join(prebuilt_file_path, "mojo_shell"), mode)
with open(stamp_path, 'w') as stamp_file:
stamp_file.write(version)
return 0
def main():
parser = argparse.ArgumentParser(description="Download mojo_shell binary "
"from google storage")
parser.parse_args()
return download()
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "2f52f80e97823cc0f6cdf98a7ee5b39d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 80,
"avg_line_length": 33.305882352941175,
"alnum_prop": 0.6616036736135641,
"repo_name": "dednal/chromium.src",
"id": "2a0890c5f6d00776dd79ed74498f893db5790eb4",
"size": "3016",
"binary": false,
"copies": "9",
"ref": "refs/heads/nw12",
"path": "mojo/public/tools/download_shell_binary.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "34522"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9240962"
},
{
"name": "C++",
"bytes": "222772775"
},
{
"name": "CSS",
"bytes": "875874"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "27190037"
},
{
"name": "Java",
"bytes": "7645280"
},
{
"name": "JavaScript",
"bytes": "18828195"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1397246"
},
{
"name": "Objective-C++",
"bytes": "7575073"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "248854"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418340"
},
{
"name": "Python",
"bytes": "8032766"
},
{
"name": "Shell",
"bytes": "464218"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
}
|
import math, subprocess
p = subprocess.Popen(["ioreg", "-rc", "AppleSmartBattery"], stdout=subprocess.PIPE)
output = p.communicate()[0]
o_max = [l for l in output.splitlines() if 'MaxCapacity' in l][0]
o_cur = [l for l in output.splitlines() if 'CurrentCapacity' in l][0]
b_max = float(o_max.rpartition('=')[-1].strip())
b_cur = float(o_cur.rpartition('=')[-1].strip())
charge = b_cur / b_max
charge_threshold = int(math.ceil(10 * charge))
# Output
total_slots, slots = 10, []
filled = int(math.ceil(charge_threshold * (total_slots / 10.0))) * u'▸'
# old arrow: ▹▸▶
empty = (total_slots - len(filled)) * u'▹'
out = (filled + empty).encode('utf-8')
import sys
color_green = '%{[32m%}'
color_yellow = '%{[1;33m%}'
color_red = '%{[31m%}'
color_reset = '%{[00m%}'
color_out = (
color_green if len(filled) > 6
else color_yellow if len(filled) > 4
else color_red
)
out = color_out + out + color_reset
sys.stdout.write(out)
|
{
"content_hash": "bf9b7a75eb3c19996cb4452d292db06e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 83,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6305732484076433,
"repo_name": "tjtrabue/dotfiles",
"id": "545c3670125a2d5cb01a0a8191dab035644a02df",
"size": "991",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "copy/prompt/batcharge.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8764"
},
{
"name": "Common Lisp",
"bytes": "925"
},
{
"name": "Elixir",
"bytes": "1422"
},
{
"name": "Emacs Lisp",
"bytes": "55859"
},
{
"name": "GDB",
"bytes": "28"
},
{
"name": "Haskell",
"bytes": "7659"
},
{
"name": "JavaScript",
"bytes": "6436"
},
{
"name": "Lua",
"bytes": "153908"
},
{
"name": "Perl",
"bytes": "14272"
},
{
"name": "Python",
"bytes": "80263"
},
{
"name": "Scheme",
"bytes": "18444"
},
{
"name": "Shell",
"bytes": "515152"
},
{
"name": "TeX",
"bytes": "5343"
},
{
"name": "Vim Script",
"bytes": "78474"
},
{
"name": "Vim Snippet",
"bytes": "287"
},
{
"name": "YASnippet",
"bytes": "943"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib import admin
from taobao import models
# Register your models here.
#admin.site.register(models.PModel)
#admin.site.register(models.Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ('pmodel', 'nickname', 'price', 'year')
search_fields=('nickname',)
ordering = ('-price',)
admin.site.register(models.Product,ProductAdmin)
class PPhotoAdmin(admin.ModelAdmin):
list_display = ('product', 'description' )
search_fields=('product',)
admin.site.register(models.PPhoto,PPhotoAdmin)
class PModelAdmin(admin.ModelAdmin):
list_display = ('maker', 'name' )
search_fields=('name',)
admin.site.register(models.PModel,PModelAdmin)
class MakerAdmin(admin.ModelAdmin):
list_display = ('name', 'country' )
search_fields=('name',)
ordering = ('-country',)
admin.site.register(models.Maker,MakerAdmin)
|
{
"content_hash": "17efcac7ea1686b848f99dedc4785225",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 58,
"avg_line_length": 28.25,
"alnum_prop": 0.7190265486725663,
"repo_name": "lichengshuang/python",
"id": "2c4fabbb2f1ddb7461f5ed50db1d55362d3fe593",
"size": "928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/djangoStudy/taobao/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "321"
},
{
"name": "HTML",
"bytes": "68150"
},
{
"name": "Python",
"bytes": "420936"
},
{
"name": "Shell",
"bytes": "76121"
},
{
"name": "Vim script",
"bytes": "27690"
}
],
"symlink_target": ""
}
|
import time
# 3p
import mock
from nose.plugins.attrib import attr
# project
from config import AGENT_VERSION
from tests.checks.common import AgentCheckTest
from util import headers as agent_headers
RESULTS_TIMEOUT = 10
AGENT_CONFIG = {
'version': AGENT_VERSION,
'api_key': 'toto'
}
CONFIG = {
'instances': [{
'name': 'conn_error',
'url': 'https://thereisnosuchlink.com',
'check_certificate_expiration': False,
'timeout': 1,
}, {
'name': 'http_error_status_code',
'url': 'http://httpbin.org/404',
'check_certificate_expiration': False,
'timeout': 1,
}, {
'name': 'status_code_match',
'url': 'http://httpbin.org/404',
'http_response_status_code': '4..',
'check_certificate_expiration': False,
'timeout': 1,
'tags': ["foo:bar"]
}, {
'name': 'cnt_mismatch',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': 'thereisnosuchword'
}, {
'name': 'cnt_match',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': '(thereisnosuchword|github)'
}, {
'name': 'cnt_match_unicode',
'url': 'https://ja.wikipedia.org/',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': 'メインページ'
}
]
}
CONFIG_SSL_ONLY = {
'instances': [{
'name': 'good_cert',
'url': 'https://github.com:443',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7
}, {
'name': 'cert_exp_soon',
'url': 'https://google.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 9999,
'days_critical': 7
}, {
'name': 'cert_critical',
'url': 'https://google.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 9999,
'days_critical': 9999
}, {
'name': 'conn_error',
'url': 'https://thereisnosuchlink.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7
}
]
}
CONFIG_EXPIRED_SSL = {
'instances': [{
'name': 'expired_cert',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7
},
]
}
CONFIG_UNORMALIZED_INSTANCE_NAME = {
'instances': [{
'name': '_need-to__be_normalized-',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7
},
]
}
SIMPLE_CONFIG = {
'instances': [{
'name': 'simple_config',
'url': 'http://httpbin.org',
'check_certificate_expiration': False,
},
]
}
CONFIG_HTTP_HEADERS = {
'instances': [{
'url': 'https://google.com',
'name': 'UpService',
'timeout': 1,
'headers': {"X-Auth-Token": "SOME-AUTH-TOKEN"}
}]
}
FAKE_CERT = {'notAfter': 'Apr 12 12:00:00 2006 GMT'}
@attr(requires='network')
class HTTPCheckTest(AgentCheckTest):
CHECK_NAME = 'http_check'
def tearDown(self):
if self.check:
self.check.stop()
def wait_for_async(self, method, attribute, count):
"""
Loop on `self.check.method` until `self.check.attribute >= count`.
Raise after
"""
i = 0
while i < RESULTS_TIMEOUT:
self.check._process_results()
if len(getattr(self.check, attribute)) >= count:
return getattr(self.check, method)()
time.sleep(1)
i += 1
raise Exception("Didn't get the right count of service checks in time, {0}/{1} in {2}s: {3}"
.format(len(getattr(self.check, attribute)), count, i,
getattr(self.check, attribute)))
def test_http_headers(self):
"""
Headers format.
"""
# Run the check
self.load_check(CONFIG_HTTP_HEADERS, AGENT_CONFIG)
headers = self.check._load_conf(CONFIG_HTTP_HEADERS['instances'][0])[6]
self.assertEqual(headers["X-Auth-Token"], "SOME-AUTH-TOKEN", headers)
expected_headers = agent_headers(AGENT_CONFIG).get('User-Agent')
self.assertEqual(expected_headers, headers.get('User-Agent'), headers)
def test_check(self):
"""
Check coverage.
"""
self.run_check(CONFIG)
# Overrides self.service_checks attribute when values are available\
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 5)
# HTTP connection error
tags = ['url:https://thereisnosuchlink.com', 'instance:conn_error']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
# Wrong HTTP response status code
tags = ['url:http://httpbin.org/404', 'instance:http_error_status_code']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.can_connect", tags=tags, count=0)
# HTTP response status code match
tags = ['url:http://httpbin.org/404', 'instance:status_code_match', 'foo:bar']
self.assertServiceCheckOK("http.can_connect", tags=tags)
# Content match & mismatching
tags = ['url:https://github.com', 'instance:cnt_mismatch']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.can_connect", tags=tags, count=0)
tags = ['url:https://github.com', 'instance:cnt_match']
self.assertServiceCheckOK("http.can_connect", tags=tags)
tags = ['url:https://ja.wikipedia.org/', 'instance:cnt_match_unicode']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.coverage_report()
def test_check_ssl(self):
self.run_check(CONFIG_SSL_ONLY)
# Overrides self.service_checks attribute when values are available
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 6)
tags = ['url:https://github.com:443', 'instance:good_cert']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.ssl_cert", tags=tags)
tags = ['url:https://google.com', 'instance:cert_exp_soon']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckWarning("http.ssl_cert", tags=tags)
tags = ['url:https://google.com', 'instance:cert_critical']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckCritical("http.ssl_cert", tags=tags)
tags = ['url:https://thereisnosuchlink.com', 'instance:conn_error']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
self.assertServiceCheckCritical("http.ssl_cert", tags=tags)
self.coverage_report()
@mock.patch('ssl.SSLSocket.getpeercert', return_value=FAKE_CERT)
def test_mock_case(self, getpeercert_func):
self.run_check(CONFIG_EXPIRED_SSL)
# Overrides self.service_checks attribute when values are av
# Needed for the HTTP headers
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 2)
tags = ['url:https://github.com', 'instance:expired_cert']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckCritical("http.ssl_cert", tags=tags)
self.coverage_report()
def test_service_check_instance_name_normalization(self):
"""
Service check `instance` tag value is normalized.
Note: necessary to avoid mismatch and backward incompatiblity.
"""
# Run the check
self.run_check(CONFIG_UNORMALIZED_INSTANCE_NAME)
# Overrides self.service_checks attribute when values are available
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 2)
# Assess instance name normalization
tags = ['url:https://github.com', 'instance:need_to_be_normalized']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.ssl_cert", tags=tags)
def test_warnings(self):
"""
Deprecate events usage for service checks.
"""
self.run_check(SIMPLE_CONFIG)
# Overrides self.service_checks attribute when values are available\
self.warnings = self.wait_for_async('get_warnings', 'warnings', 1)
# Assess warnings
self.assertWarning(
"Using events for service checks is deprecated in "
"favor of monitors and will be removed in future versions of the "
"Datadog Agent.",
count=1
)
|
{
"content_hash": "ac89628740e6845a9b492c2274f0f1ae",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 100,
"avg_line_length": 33.062271062271066,
"alnum_prop": 0.5932860624861511,
"repo_name": "indeedops/dd-agent",
"id": "8e80febe0e7af814979a42a212f30fa2c5edfdc0",
"size": "9073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/checks/integration/test_http_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8553"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2300561"
},
{
"name": "Ruby",
"bytes": "102896"
},
{
"name": "Shell",
"bytes": "61965"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
import unittest
class TreeOne(unittest.TestCase):
"""Test class for the test_build_simple_tree test.
build_test_tree should build a test tree with these test cases.
"""
def test_one(self) -> None:
self.assertGreater(2, 1)
def test_two(self) -> None:
self.assertNotEqual(2, 1)
|
{
"content_hash": "5e1a8912132c246b80b1e7291da1d06f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 22.642857142857142,
"alnum_prop": 0.6498422712933754,
"repo_name": "DonJayamanne/pythonVSCode",
"id": "6db51a4fd80b8227253b7c7cf5d4533f9d8b3812",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "569"
},
{
"name": "JavaScript",
"bytes": "28707"
},
{
"name": "Jupyter Notebook",
"bytes": "10520"
},
{
"name": "Python",
"bytes": "2602995"
},
{
"name": "Roff",
"bytes": "108"
},
{
"name": "Shell",
"bytes": "76"
},
{
"name": "TypeScript",
"bytes": "5178987"
}
],
"symlink_target": ""
}
|
import httplib
import re
import random
import string
import time
import websocket
import json
import threading
REST_TIMEOUT = 5
class OrtcError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Channel(object):
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def subscribe_on_reconnecting(self):
return self._subscribe_on_reconnecting
@subscribe_on_reconnecting.setter
def subscribe_on_reconnecting(self, subscribe_on_reconnecting):
self._subscribe_on_reconnecting = subscribe_on_reconnecting
@property
def is_subscribing(self):
return self._is_subscribing
@is_subscribing.setter
def is_subscribing(self, is_subscribing):
self._is_subscribing = is_subscribing
@property
def is_subscribed(self):
return self._is_subscribed
@is_subscribed.setter
def is_subscribed(self, is_subscribed):
self._is_subscribed = is_subscribed
@property
def callback(self):
return self._callback
@callback.setter
def callback(self, callback):
self._callback = callback
def __init__(self, name, subscribe_on_reconnecting, callback):
self._name = name
self._subscribe_on_reconnecting = subscribe_on_reconnecting
self._is_subscribing = False
self._is_subscribed = False
self._callback = callback
class MultiMessage(object):
@property
def total_parts(self):
return self._total_parts
@total_parts.setter
def total_parts(self, total_parts):
self._total_parts = total_parts
@property
def ready_parts(self):
return self._ready_parts
@ready_parts.setter
def ready_parts(self, ready_parts):
self._ready_parts = ready_parts
def __init__(self, total_parts):
self._total_parts = total_parts
self._ready_parts = 0
self._parts = [None]*total_parts
def set_part(self, part_id, part):
if self._parts[part_id] == None:
self._ready_parts += 1
self._parts[part_id] = part
def is_ready(self):
return True if self._ready_parts == self._total_parts else False
def get_all_message(self):
return ''.join([str(x) for x in self._parts])
class Private:
@staticmethod
def _get_cluster(host, app_key):
try:
host += '?appkey='+app_key
from urlparse import urlparse
uri = urlparse(host)
conn = httplib.HTTPConnection(uri.netloc, timeout=REST_TIMEOUT)
conn.request("GET", uri.path + "?" + uri.query)
res = conn.getresponse()
if res.status == 200:
rbody = re.search('"(.*)"', res.read()).group(0)
return rbody[1:][:-1]
except StandardError:
return None
@staticmethod
def _call_exception_callback(sender, exception):
if sender.on_exception_callback:
sender.on_exception_callback(sender, exception)
@staticmethod
def _validate_url(url):
return True if re.compile('^\s*(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?\s*$').match(url) else False
@staticmethod
def _validate_input(var):
return True if re.compile('^[\w\-:\/\.]*$').match(var) else False
@staticmethod
def _enum_state(**state):
return type('Enum state', (), state)
@staticmethod
def _remove_slashes(text):
text = text.replace("\\\\\\\"", '"')
text = text.replace("\\\\\\\\", '\\')
text = text.replace("\\\\n", '\n')
return text
@staticmethod
def _check_permission(permissions, channel):
if permissions == {}:
return True, ''
if channel in permissions:
return True, permissions[channel]
if ':' in channel:
if channel[:channel.index(':')]+':*' in permissions:
return True, permissions[channel[:channel.index(':')]+':*']
return False, ''
@staticmethod
def _rest_post_request(url, body, callback):
def p_thread():
try:
from urlparse import urlparse
uri = urlparse(url)
conn = httplib.HTTPSConnection(uri.netloc, timeout=REST_TIMEOUT)
headers = {}
headers['Content-Length'] = len(body)
conn.request("POST", uri.path, None, headers)
conn.send(body)
res = conn.getresponse()
if res.status==200:
callback(None, res.read())
else:
callback(str(res.status), None)
except Exception, e:
callback(str(e), None)
t = threading.Thread(target=p_thread)
t.setDaemon(True)
t.start()
@staticmethod
def _prepare_server(url, is_cluster, app_key, callback):
server = Private._get_cluster(url, app_key) if is_cluster else url
if server == None:
callback('Error getting server from Cluster', None)
return
server += '/' if not server[-1] == '/' else ''
return server
@staticmethod
def _prepare_server_internal(url, cluster_url, app_key, callback):
if app_key == None:
callback('Please, do connect first', None)
return False, None
server = Private._get_cluster(cluster_url, app_key) if not cluster_url == None else url
if server == None:
callback('Error getting server from Cluster', None)
return False, None
return True, server
|
{
"content_hash": "435632af5f3f429a0c9ff2dd33203180",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 144,
"avg_line_length": 31.124324324324323,
"alnum_prop": 0.5751997221257381,
"repo_name": "realtime-framework/RealtimeMessaging-Python2",
"id": "3094d52560615e511cb03bcbe65f27aa9ed915f0",
"size": "5758",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ortc_extensibility.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46297"
}
],
"symlink_target": ""
}
|
from tests.test_helper import *
from braintree.test.nonces import Nonces
import sys
import urllib.parse as urlparse
class TestOAuthGateway(unittest.TestCase):
def setUp(self):
self.gateway = BraintreeGateway(
client_id="client_id$development$integration_client_id",
client_secret="client_secret$development$integration_client_secret"
)
def test_create_token_from_code(self):
code = TestHelper.create_grant(self.gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "read_write"
})
result = self.gateway.oauth.create_token_from_code({
"code": code
})
self.assertTrue(result.is_success)
credentials = result.credentials
self.assertIsNotNone(credentials.access_token)
self.assertIsNotNone(credentials.refresh_token)
self.assertIsNotNone(credentials.expires_at)
self.assertEqual("bearer", credentials.token_type)
def test_create_token_from_code_with_bad_parameters(self):
result = self.gateway.oauth.create_token_from_code({
"code": "bad_code",
"scope": "read_write"
})
self.assertFalse(result.is_success)
self.assertIn(result.message, "Invalid grant: code not found")
credentials_code_errors = result.errors.for_object("credentials").on("code")
self.assertEqual(1, len(credentials_code_errors))
self.assertEqual(ErrorCodes.OAuth.InvalidGrant, credentials_code_errors[0].code)
def test_create_token_from_code_returns_helpful_error_with_bad_credentials(self):
gateway = BraintreeGateway(
access_token="access_token$development$integration_merchant_id$fb27c79dd",
)
with self.assertRaises(ConfigurationError) as error:
gateway.oauth.create_token_from_code({
"code": "some_code",
"scope": "read_write"
})
config_error = error.exception
self.assertIn("client_id and client_secret are required", str(config_error))
def test_create_token_from_refresh_token(self):
code = TestHelper.create_grant(self.gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "read_write"
})
refresh_token = self.gateway.oauth.create_token_from_code({
"code": code,
"scope": "read_write"
}).credentials.refresh_token
result = self.gateway.oauth.create_token_from_refresh_token({
"refresh_token": refresh_token
})
self.assertTrue(result.is_success)
credentials = result.credentials
self.assertIsNotNone(credentials.access_token)
self.assertIsNotNone(credentials.refresh_token)
self.assertIsNotNone(credentials.expires_at)
self.assertEqual("bearer", credentials.token_type)
def test_revoke_access_token(self):
code = TestHelper.create_grant(self.gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "read_write"
})
access_token = self.gateway.oauth.create_token_from_code({
"code": code,
"scope": "read_write"
}).credentials.access_token
result = self.gateway.oauth.revoke_access_token(access_token)
self.assertTrue(result.is_success)
with self.assertRaises(AuthenticationError):
gateway = BraintreeGateway(access_token=access_token)
gateway.customer.create()
def test_connect_url(self):
connect_url = self.gateway.oauth.connect_url({
"merchant_id": "integration_merchant_id",
"redirect_uri": "http://bar.example.com",
"scope": "read_write",
"state": "baz_state",
"landing_page": "login",
"login_only": "true",
"user": {
"country": "USA",
"email": "foo@example.com",
"first_name": "Bob",
"last_name": "Jones",
"phone": "555-555-5555",
"dob_year": "1970",
"dob_month": "01",
"dob_day": "01",
"street_address": "222 W Merchandise Mart",
"locality": "Chicago",
"region": "IL",
"postal_code": "60606"
},
"business": {
"name": "14 Ladders",
"registered_as": "14.0 Ladders",
"industry": "Ladders",
"description": "We sell the best ladders",
"street_address": "111 N Canal",
"locality": "Chicago",
"region": "IL",
"postal_code": "60606",
"country": "USA",
"annual_volume_amount": "1000000",
"average_transaction_amount": "100",
"maximum_transaction_amount": "10000",
"ship_physical_goods": "true",
"fulfillment_completed_in": 7,
"currency": "USD",
"website": "http://example.com"
},
"payment_methods": ["credit_card", "paypal"]
})
query_string = urlparse.urlparse(connect_url)[4]
params = urlparse.parse_qs(query_string)
self.assertEqual(params["merchant_id"], ["integration_merchant_id"])
self.assertEqual(params["client_id"], ["client_id$development$integration_client_id"])
self.assertEqual(params["redirect_uri"], ["http://bar.example.com"])
self.assertEqual(params["scope"], ["read_write"])
self.assertEqual(params["state"], ["baz_state"])
self.assertEqual(params["landing_page"], ["login"])
self.assertEqual(params["login_only"], ["true"])
self.assertEqual(params["user[country]"], ["USA"])
self.assertEqual(params["business[name]"], ["14 Ladders"])
self.assertEqual(params["payment_methods[]"], ["credit_card", "paypal"])
def test_connect_url_limits_payment_methods(self):
connect_url = self.gateway.oauth.connect_url({
"merchant_id": "integration_merchant_id",
"redirect_uri": "http://bar.example.com",
"scope": "read_write",
"state": "baz_state",
"payment_methods": ["credit_card"]
})
query_string = urlparse.urlparse(connect_url)[4]
params = urlparse.parse_qs(query_string)
self.assertEqual(params["merchant_id"], ["integration_merchant_id"])
self.assertEqual(params["client_id"], ["client_id$development$integration_client_id"])
self.assertEqual(params["redirect_uri"], ["http://bar.example.com"])
self.assertEqual(params["payment_methods[]"], ["credit_card"])
def test_connect_url_doesnt_modify_options(self):
options = {"payment_methods": ["credit_card"]}
connect_url = self.gateway.oauth.connect_url(options)
self.assertEqual(options, {"payment_methods": ["credit_card"]})
|
{
"content_hash": "da867c9433c33cc6e754e868ab06717d",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 94,
"avg_line_length": 38.955555555555556,
"alnum_prop": 0.5825727324586423,
"repo_name": "braintree/braintree_python",
"id": "2d88a52e76a4535657a0812e38c909b27b205175",
"size": "7012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_oauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "252"
},
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "1338636"
},
{
"name": "Ruby",
"bytes": "2099"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
}
|
"""Tools for data driven testing"""
import random
from engine.base import suit_list, ranks
suit_list = tuple(suit_list)
class DataList(list):
"""Custom list with __name__ attribute, which is added by
prepare_test_data()
"""
pass
def prepare_test_data(*test_data):
"""Generator to prepare data for DDT
:param test_data: tuples with name of the test, input value and expected
output value
:return: named list with input value and expected output value
"""
for td in test_data:
_test_name, _input, _expected = td
data_list = DataList([_input, _expected])
setattr(data_list, '__name__', _test_name)
yield data_list
def generate_random_suits():
"""Generates 5 random suits
:return: 5 random suits
:type: list
"""
random.seed(None)
return [suit_list[random.randint(0, len(suit_list)-1)] for _ in range(5)]
def generate_random_ranks():
"""Generates 5 random ranks
:return: 5 random ranks
:type: list
"""
random.seed(None)
return [ranks[random.randint(0, len(ranks)-1)] for _ in range(5)]
def generate_different_suits():
"""Create hand with 4 suits from list and 1 random
:return: 5 suits
:type: list
"""
_suits = list(suit_list)
random.seed(None)
s = _suits[3]
while s == _suits[3]:
s = suit_list[random.randint(0, len(suit_list)-1)]
_suits += s
return _suits
def generate_different_ranks():
"""Create hand with 'even' ranks
:return: 5 ranks
:type: list
"""
return ['3', '6', '9', 'Q', '5']
def shuffle(unshuffled):
"""Shuffle items
:param unshuffled: list with items (suits or ranks)
:return: list with shuffled items
"""
random.seed(None)
_unshuffled = list(unshuffled)
shuffled = list()
while _unshuffled:
shuffled.append(_unshuffled.pop(random.randint(0, len(_unshuffled)-1)))
return shuffled
|
{
"content_hash": "55b4c726cd5765cc0d999efddfc7a778",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 22.825581395348838,
"alnum_prop": 0.6148751910341315,
"repo_name": "kolyat/vpoker",
"id": "4d5ba13cbd86397fbf4b9c85acd9289c11366af0",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70429"
}
],
"symlink_target": ""
}
|
"""
URLconf for registration and activation, using django-registration's
default backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.default.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead.
"""
from django.conf.urls.defaults import *
from django.views.generic import TemplateView
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
url(r'^activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_activate'),
url(r'^register/$',
register,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_register'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
(r'', include('registration.auth_urls')),
)
|
{
"content_hash": "5196669bbc750ab28cae6f61dde0e9cc",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 104,
"avg_line_length": 46.15686274509804,
"alnum_prop": 0.5802888700084962,
"repo_name": "AlexShik/django-registration-fork",
"id": "fc59feef8658a1421b535476fce64ff95725a2f3",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/backends/default/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "100016"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
}
|
ENVIRONMENT = 'DEV'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<insert secret key here>'
#ROOT_URLCONF = 'urls'
ROOT_URLCONF = 'django_backend.urls'
WSGI_APPLICATION = 'django_backend.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
|
{
"content_hash": "a8550c88f323780b691e155b1bd0e741",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 24.055555555555557,
"alnum_prop": 0.6166281755196305,
"repo_name": "vingle1/AfriCuisine",
"id": "95962e5d9e00944903ebdb732882b23c73b73853",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/django_backend/localsettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "671889"
},
{
"name": "HTML",
"bytes": "9882"
},
{
"name": "JavaScript",
"bytes": "6172399"
},
{
"name": "Python",
"bytes": "14721"
}
],
"symlink_target": ""
}
|
from yaql.language import exceptions
from yaql.language import specs
from yaql.language import yaqltypes
import yaql.tests
class TestMiscellaneous(yaql.tests.TestCase):
def test_pass_lambda_from_code(self):
self.assertEqual(
[],
list(self.context('where', self.engine, [1, 2, 3])(False))
)
self.assertEqual(
[2, 3],
list(self.context('where', self.engine, [1, 2, 3])(
lambda t: t > 1))
)
def test_bool_is_not_an_integer(self):
@specs.parameter('arg', yaqltypes.Integer())
def foo(arg):
return arg
self.context.register_function(foo)
self.assertEqual(2, self.eval('foo(2)'))
self.assertRaises(
exceptions.NoMatchingFunctionException,
self.eval, 'foo(true)')
def test_nullable_collections(self):
@specs.parameter('arg', yaqltypes.Sequence())
def foo1(arg):
return arg is None
@specs.parameter('arg', yaqltypes.Sequence(nullable=True))
def foo2(arg):
return arg is None
@specs.parameter('arg', yaqltypes.Iterable())
def bar1(arg):
return arg is None
@specs.parameter('arg', yaqltypes.Iterable(nullable=True))
def bar2(arg):
return arg is None
@specs.parameter('arg', yaqltypes.Iterator())
def baz1(arg):
return arg is None
@specs.parameter('arg', yaqltypes.Iterator(nullable=True))
def baz2(arg):
return arg is None
for func in (foo1, foo2, bar1, bar2, baz1, baz2):
self.context.register_function(func)
self.assertFalse(self.eval('foo1([1, 2])'))
self.assertRaises(
exceptions.NoMatchingFunctionException,
self.eval, 'foo1(null)')
self.assertFalse(self.eval('foo2([1, 2])'))
self.assertTrue(self.eval('foo2(null)'))
self.assertFalse(self.eval('bar1([1, 2])'))
self.assertRaises(
exceptions.NoMatchingFunctionException,
self.eval, 'bar1(null)')
self.assertFalse(self.eval('bar2([1, 2])'))
self.assertTrue(self.eval('bar2(null)'))
self.assertFalse(self.eval('baz1($)', data=iter([1, 2])))
self.assertRaises(
exceptions.NoMatchingFunctionException,
self.eval, 'baz1(null)')
self.assertFalse(self.eval('baz2($)', data=iter([1, 2])))
self.assertTrue(self.eval('baz2(null)'))
|
{
"content_hash": "3a8e4f3fb7293967e11fdbe9c381b816",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 70,
"avg_line_length": 32.74025974025974,
"alnum_prop": 0.5799285997619992,
"repo_name": "openstack/yaql",
"id": "26a18e86bcc50637c978e67dcc19e204993eff1f",
"size": "3136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yaql/tests/test_miscellaneous.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "481598"
}
],
"symlink_target": ""
}
|
import os
class VerificationError(Exception):
""" An error raised when verification fails
"""
class VerificationMissing(Exception):
""" An error raised when incomplete structures are passed into
cdef, but no verification has been done
"""
def get_extension(srcfilename, modname, sources=(), **kwds):
from distutils.core import Extension
allsources = [srcfilename]
allsources.extend(sources)
return Extension(name=modname, sources=allsources, **kwds)
def compile(tmpdir, ext):
"""Compile a C extension module using distutils."""
saved_environ = os.environ.copy()
try:
outputfilename = _build(tmpdir, ext)
outputfilename = os.path.abspath(outputfilename)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
def _build(tmpdir, ext):
# XXX compact but horrible :-(
from distutils.core import Distribution
import distutils.errors
#
dist = Distribution({'ext_modules': [ext]})
options = dist.get_option_dict('build_ext')
options['force'] = ('ffiplatform', True)
options['build_lib'] = ('ffiplatform', tmpdir)
options['build_temp'] = ('ffiplatform', tmpdir)
#
try:
dist.run_command('build_ext')
except (distutils.errors.CompileError,
distutils.errors.LinkError) as e:
raise VerificationError('%s: %s' % (e.__class__.__name__, e))
#
cmd_obj = dist.get_command_obj('build_ext')
[soname] = cmd_obj.get_outputs()
return soname
try:
from os.path import samefile
except ImportError:
def samefile(f1, f2):
return os.path.abspath(f1) == os.path.abspath(f2)
def maybe_relative_path(path):
if not os.path.isabs(path):
return path # already relative
dir = path
names = []
while True:
prevdir = dir
dir, name = os.path.split(prevdir)
if dir == prevdir or not dir:
return path # failed to make it relative
names.append(name)
try:
if samefile(dir, os.curdir):
names.reverse()
return os.path.join(*names)
except OSError:
pass
# ____________________________________________________________
try:
int_or_long = (int, long)
import cStringIO
except NameError:
int_or_long = int # Python 3
import io as cStringIO
def _flatten(x, f):
if isinstance(x, str):
f.write('%ds%s' % (len(x), x))
elif isinstance(x, dict):
keys = sorted(x.keys())
f.write('%dd' % len(keys))
for key in keys:
_flatten(key, f)
_flatten(x[key], f)
elif isinstance(x, (list, tuple)):
f.write('%dl' % len(x))
for value in x:
_flatten(value, f)
elif isinstance(x, int_or_long):
f.write('%di' % (x,))
else:
raise TypeError(
"the keywords to verify() contains unsupported object %r" % (x,))
def flatten(x):
f = cStringIO.StringIO()
_flatten(x, f)
return f.getvalue()
|
{
"content_hash": "a9116d33da3b31f16867bc4a75ca2aef",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 29.26126126126126,
"alnum_prop": 0.5865147783251231,
"repo_name": "ArneBab/pypyjs",
"id": "460ba901219c585d242bd6cfb76dd3da3d1ec08b",
"size": "3248",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/cffi/ffiplatform.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('common', '0016_auto_20161013_2111'),
('blog', '0006_auto_20161012_1555'),
]
operations = [
migrations.CreateModel(
name='BlogPagePerson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False,
null=True)),
('author', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='common.Person')),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
),
migrations.AddField(
model_name='blogpageperson',
name='page',
field=modelcluster.fields.ParentalKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='authors',
to='blog.BlogPage'),
),
migrations.AddField(
model_name='blogpage',
name='blog_authors',
field=models.ManyToManyField(
blank=True,
through='blog.BlogPagePerson',
to='common.Person'),
),
]
|
{
"content_hash": "deba87472aa0de6383586e1ee8c26317",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 32.755102040816325,
"alnum_prop": 0.49470404984423677,
"repo_name": "baylee-d/cos.io",
"id": "227cbc88b51c897f761b50a3c5cdee792d4c7bca",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "blog/migrations/0007_auto_20161013_2112.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "481084"
},
{
"name": "HTML",
"bytes": "819147"
},
{
"name": "JavaScript",
"bytes": "409024"
},
{
"name": "Python",
"bytes": "696401"
},
{
"name": "Shell",
"bytes": "1178"
}
],
"symlink_target": ""
}
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
row = 0
column = len(matrix[row]) - 1
while row >= 0 and row < len(matrix) and column >= 0 and column < len(matrix[row]):
if matrix[row][column] == target:
return True
elif matrix[row][column] < target:
row += 1
else:
column -= 1
return False
|
{
"content_hash": "6de208461f079f6020e224f95b6964f6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 91,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.4748603351955307,
"repo_name": "FeiZhan/Algo-Collection",
"id": "2948952fa2836fba3256a747c0e27ac390a81c2a",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "answers/leetcode/Search a 2D Matrix/Search a 2D Matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "892410"
},
{
"name": "Java",
"bytes": "743448"
},
{
"name": "JavaScript",
"bytes": "3093"
},
{
"name": "Python",
"bytes": "93383"
},
{
"name": "Shell",
"bytes": "891"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0026_auto_20141022_1556'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='additional_child_amount',
field=models.FloatField(default=None, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='taxsaveinputs',
name='additional_qualify_no_child',
field=models.FloatField(default=3, null=True, blank=True),
preserve_default=True,
),
]
|
{
"content_hash": "2630f0cc2f954d7181f63ae78f9ac1a5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 27.72,
"alnum_prop": 0.5974025974025974,
"repo_name": "PeterDSteinberg/webapp-public",
"id": "bead44843e1687b9f4e17792a81a4d273899a25b",
"size": "717",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0027_auto_20141022_1559.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61933"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "381167"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
}
|
class PlayerData():
pass
|
{
"content_hash": "b3e756e8d4a9b20121f24962bc1e6679",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 19,
"avg_line_length": 9,
"alnum_prop": 0.7037037037037037,
"repo_name": "grimfang/owp_dungeon_crawler",
"id": "b3323b7bebeea84fc5cf2297ba3684b5c4f01acc",
"size": "42",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/playerData.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51000"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
}
|
import pyfits
import numpy
import SPARTATools
datapath = '/home/deen/Data/GRAVITY/SPARTA_Data/'
delta = pyfits.getdata(datapath+'delta_junk.fits')
fullNoll = pyfits.getdata(datapath+'ZernikeCovar_Noll.fits')
HOIM = pyfits.getdata(datapath+'HOIM.fits')
TTIM = pyfits.getdata(datapath+'TTIM.fits')
nfilt = 50
nHOAct = 60
nTTAct = 2
nSubap = 136
HO_inv = SPARTATools.pseudoinv(HOIM, nfilt)
TT2HO = numpy.dot(HO_inv, TTIM)
lam, ev = SPARTATools.diagonalisation(numpy.dot(HOIM.T, HOIM))
pistonMode = ev[:,-1]
pistonProj = numpy.dot(delta, pistonMode)
|
{
"content_hash": "d20a65f51a84507b89454b918a37d216",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 25.045454545454547,
"alnum_prop": 0.7477313974591652,
"repo_name": "soylentdeen/CIAO-commissioning-tools",
"id": "3c1994a5a518c2e1864c4c7f9e0746187db0751b",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/matlabtests/matlabhelp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "251894"
},
{
"name": "Tcl",
"bytes": "584"
}
],
"symlink_target": ""
}
|
import random
from typing import List, Optional, Tuple
from shared.insn_yaml import InsnsFile
from .branch import Branch
from .jump import Jump
from .loop import Loop
from ..config import Config
from ..program import ProgInsn, Program
from ..model import Model
from ..snippet_gen import GenCont, GenRet
class BadAtEnd(Loop):
'''A snippet generator that generates a loop/branch/jump at end of a loop
This works by overriding the _gen_tail method of the Loop generator and
replacing the last instruction that we would have generated with a loop,
branch or jump instruction.
'''
ends_program = True
def __init__(self, cfg: Config, insns_file: InsnsFile) -> None:
super().__init__(cfg, insns_file)
self.branch_gen = Branch(cfg, insns_file)
self.jump_gen = Jump(cfg, insns_file)
def pick_weight(self,
model: Model,
program: Program) -> float:
# Only try this if we've got a reasonable amount of room.
room = min(model.fuel, program.space)
assert 0 < room
return (1.0 if room > 50 else 0.0)
def _gen_tail_insns(self,
num_insns: int,
model: Model,
program: Program) -> Optional[Tuple[List[ProgInsn],
Model]]:
assert num_insns > 0
old_model = model.copy()
ret = super()._gen_tail_insns(num_insns, model, program)
if ret is None:
return None
insns, model = ret
assert len(insns) == num_insns
match_addr = model.pc
# We need a model to represent the state just before the last
# instruction. Of course, we can't "unwind" the state update that we
# just did. However, a merge between the initial state and the end
# state gives a sound (if very imprecise!) approximation.
#
# Since the Model.merge() method expects the two models to have the
# same PC (which is the usual situation when implementing phi nodes),
# we also have to teleport both models to the right place.
model.pc -= 4
old_model.pc = model.pc
model.merge(old_model)
# Pick a new instruction to replace the last one we generated. Start by
# shuffling the numbers from 0..2, which will represent Branch, Loop,
# Jump respectively. The idea is that we want to try them in an
# arbitrary order, but don't want to fail completely if one of the
# other generators might have worked.
types = list(range(3))
random.shuffle(types)
for lbj in types:
if lbj == 0:
# Loop! Pick an loop head (with an arbitrary "100" for
# space_here and without setting up program properly: it
# doesn't really matter). Pass False as the "check" argument:
# we don't want to check anything about whether the implied
# loop overlaps with existing code.
retL = self._pick_head(100, False, model, program)
if retL is not None:
tail_insn, _ = retL
break
elif lbj == 1:
# Branch!
retB = self.branch_gen.gen_head(model, program)
if retB is not None:
tail_insn, _ = retB
break
else:
assert lbj == 2
# Jump!
# The merge() function above took the minimum of the two
# models' amount of fuel. Since Jump.gen_tgt() updates the
# model with the new instruction, we need to give it one extra
# fuel (to match what got incorrectly subtracted for the
# instruction we're replacing). It also adds the generated
# instruction to the program: we don't want that, so we give it
# a throw-away copy to modify.
model.fuel += 1
retJ = self.jump_gen.gen_tgt(model, program.copy(), None)
if retJ is not None:
tail_insn, _, _ = retJ
if tail_insn is None:
return
# Fix up model.pc. The "head" generators for loop and branch don't
# update the model for what they've generated, so we'll be 4 bytes too
# early. Jump.gen_tgt() updates the model to match, so we'll be at some
# crazy new location! It doesn't really matter (we're faulting anyway),
# but Loop expects us to leave model.pc in the right place, so we need
# to fix it up here.
model.pc = match_addr
insns[-1] = tail_insn
return (insns, model)
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
# Run the Loop generator, but with our _gen_tail, which means we'll put
# an unexpected jump/branch/loop at the end of the body.
ret = super().gen(cont, model, program)
if ret is None:
return None
# If the Loop generator returned success, that's great! However, we
# need to fix things up a bit for the changes we've made. Specifically,
# the "done" flag should be true (since this is supposed to cause a
# fault) and the final PC should be 4 less, pointing just before the
# last instruction in the loop.
snippet, model = ret
model.pc -= 4
return (snippet, model)
|
{
"content_hash": "756b3eabc618526516b9dcca33a0dfc8",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 39.885714285714286,
"alnum_prop": 0.5750358166189111,
"repo_name": "lowRISC/opentitan",
"id": "6ff7e8ff51cf54b86370a687d7269e2d47928b61",
"size": "5732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw/ip/otbn/dv/rig/rig/gens/bad_at_end.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "516881"
},
{
"name": "C",
"bytes": "4864968"
},
{
"name": "C++",
"bytes": "1629214"
},
{
"name": "CSS",
"bytes": "3281"
},
{
"name": "Dockerfile",
"bytes": "6732"
},
{
"name": "Emacs Lisp",
"bytes": "411542"
},
{
"name": "HTML",
"bytes": "149270"
},
{
"name": "Makefile",
"bytes": "20646"
},
{
"name": "Python",
"bytes": "2576872"
},
{
"name": "Rust",
"bytes": "856480"
},
{
"name": "SCSS",
"bytes": "54700"
},
{
"name": "Shell",
"bytes": "119163"
},
{
"name": "Smarty",
"bytes": "771102"
},
{
"name": "Starlark",
"bytes": "688003"
},
{
"name": "Stata",
"bytes": "3676"
},
{
"name": "SystemVerilog",
"bytes": "14853322"
},
{
"name": "Tcl",
"bytes": "361936"
},
{
"name": "Verilog",
"bytes": "3296"
}
],
"symlink_target": ""
}
|
'''
author Lama Hamadeh
'''
import numpy as np
import pandas as pd
import scipy.io.wavfile as wavfile
# Good Luck!
#
# INFO:
# Samples = Observations. Each audio file will is a single sample
# in our dataset.
#
# Audio Samples = https://en.wikipedia.org/wiki/Sampling_(signal_processing)
# Each .wav file is actually just a bunch of numeric samples, "sampled"
# from the analog signal. Sampling is a type of discretization. When we
# mention 'samples', we mean observations. When we mention 'audio samples',
# we mean the actually "features" of the audio file.
#
#
# The goal of this lab is to use multi-target, linear regression to generate
# by extrapolation, the missing portion of the test audio file.
#
# Each one audio_sample features will be the output of an equation,
# which is a function of the provided portion of the audio_samples:
#
# missing_samples = f(provided_samples)
#
# You can experiment with how much of the audio you want to chop off
# and have the computer generate using the Provided_Portion parameter.
#
# TODO: Play with this. This is how much of the audio file will
# be provided, in percent. The remaining percent of the file will
# be generated via linear extrapolation.
Provided_Portion = 0.25
#
# TODO: Create a regular ol' Python List called 'zero'
# Loop through the dataset and load up all 50 of the 0_jackson*.wav files
# For each audio file, simply append the audio data (not the sample_rate,
# just the data!) to your Python list 'zero':
#
# .. your code here ..
zero = []
import os
for file in os.listdir('/Users/Admin/Desktop/LAMA/DAT210x/DAT210x-master/Module5/Datasets/free-spoken-digit-dataset-master/recordings'):
if file.startswith('0_jackson'):
a = os.path.join('/Users/Admin/Desktop/LAMA/DAT210x/DAT210x-master/Module5/Datasets/free-spoken-digit-dataset-master/recordings', file)
sample_rate, audio_data = wavfile.read(a)
zero.append(audio_data)
print len(zero) # 50, as expected since there 50 files in the folder starting with "0_jackson"
#note that if the recordings can be denoted as n_jackson, then it can be seen that for each label, n, there are 50 files.
# in other words, there are 50 takes of each clip
#
# TODO: Just for a second, convert zero into a DataFrame. When you do
# so, set the dtype to np.int16, since the input audio files are 16
# bits per sample. If you don't know how to do this, read up on the docs
# here:
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
#
# Since these audio clips are unfortunately not length-normalized,
# we're going to have to just hard chop them to all be the same length.
# Since Pandas would have inserted NANs at any spot to make zero a
# perfectly rectangular [n_observed_samples, n_audio_samples] array,
# do a dropna on the Y axis here. Then, convert it back into an
# NDArray using .values
#
# .. your code here ..
zero = pd.DataFrame(data = zero, dtype = np.int16)
zero.dropna(axis = 1, inplace = True)
zero = zero.values
print type(zero) # zero is now a numpy NDArray
#
# TODO: It's important to know how (many audio_samples samples) long the
# data is now. 'zero' is currently shaped [n_samples, n_audio_samples],
# so get the n_audio_samples count and store it in a variable called
# n_audio_samples
#
# .. your code here ..
n_audio_samples = zero.shape[1]
print n_audio_samples #4087
#
# TODO: Create your linear regression model here and store it in a
# variable called 'model'. Don't actually train or do anything else
# with it yet:
#
# .. your code here ..
from sklearn import linear_model
model = linear_model.LinearRegression()
#
# INFO: There are 50 takes of each clip. You want to pull out just one
# of them, randomly, and that one will NOT be used in the training of
# your model. In other words, the one file we'll be testing / scoring
# on will be an unseen sample, independent to the rest of your
# training set:
from sklearn.utils.validation import check_random_state
rng = check_random_state(7) # Leave this alone until you've submitted your lab
random_idx = rng.randint(zero.shape[0])
test = zero[random_idx]
train = np.delete(zero, [random_idx], axis=0)
#
# TODO: Print out the shape of train, and the shape of test
# train will be shaped: [n_samples, n_audio_samples], where
# n_audio_samples are the 'features' of the audio file
# test will be shaped [n_audio_features], since it is a single
# sample (audio file, e.g. observation).
#
# .. your code here ..
print "Shapes of train and test, respectively:", train.shape, test.shape
#
# INFO: The test data will have two parts, X_test and y_test. X_test is
# going to be the first portion of the test audio file, which we will
# be providing the computer as input. y_test, the "label" if you will,
# is going to be the remaining portion of the audio file. Like such,
# the computer will use linear regression to derive the missing
# portion of the sound file based off of the training data its received!
#
# Save the original 'test' clip, the one you're about to delete
# half of, so that you can compare it to the 'patched' clip once
# you've generated it. HINT: you should have got the sample_rate
# when you were loading up the .wav files:
wavfile.write('Original Test Clip.wav', sample_rate, test)
#
# TODO: Prepare the TEST date by creating a slice called X_test. It
# should have Provided_Portion * n_audio_samples audio sample features,
# taken from your test audio file, currently stored in the variable
# 'test'. In other words, grab the FIRST Provided_Portion *
# n_audio_samples audio features from test and store it in X_test.
#
# .. your code here ..
X_test = test[:int(Provided_Portion*n_audio_samples)]
#
# TODO: If the first Provided_Portion * n_audio_samples features were
# stored in X_test, then we need to also grab the *remaining* audio
# features and store it in y_test. With the remaining features stored
# in there, we will be able to R^2 "score" how well our algorithm did
# in completing the sound file.
#
# .. your code here ..
y_test = test[int(Provided_Portion*n_audio_samples):]
#
# TODO: Duplicate the same process for X_train, y_train. The only
# differences being: 1) Your will be getting your audio data from
# 'train' instead of from 'test', 2) Remember the shape of train that
# you printed out earlier? You want to do this slicing but for ALL
# samples (observations). For each observation, you want to slice
# the first Provided_Portion * n_audio_samples audio features into
# X_train, and the remaining go into y_train. All of this should be
# accomplishable using regular indexing in two lines of code.
#
# .. your code here ..
X_train = train[:, :int(Provided_Portion*n_audio_samples)]
y_train = train[:, int(Provided_Portion*n_audio_samples):]
#
# TODO: SciKit-Learn gets mad if you don't supply your training or testing
# data in the form of a 2D arrays: [n_samples, n_features].
#
# So if you only have one SAMPLE, such as is our case with X_test,
# and y_test, then by calling .reshape(1, -1), you can turn
# [n_features] into [1, n_features].
#
# On the other hand, if you only have one FEATURE, which currently
# doesn't apply, you can call .reshape(-1, 1) on your data to turn
# [n_samples] into [n_samples, 1]:
#
# .. your code here ..
X_test = X_test.reshape(1, -1)
y_test = y_test.reshape(1, -1)
#
# TODO: Fit your model using your training data and label:
#
# .. your code here ..
model.fit(X_train, y_train)
#
# TODO: Use your model to predict the 'label' of X_test. Store the
# resulting prediction in a variable called y_test_prediction
#
# .. your code here ..
y_test_prediction = model.predict(X_test)
# INFO: SciKit-Learn will use float64 to generate your predictions
# so let's take those values back to int16:
y_test_prediction = y_test_prediction.astype(dtype=np.int16)
#
# TODO: Score how well your prediction would do for some good laughs,
# by passing in your test data and test label (y_test).
#
# .. your code here ..
score = model.score(X_test, y_test)
print "Extrapolation R^2 Score: ", score
#
# First, take the first Provided_Portion portion of the test clip, the
# part you fed into your linear regression model. Then, stitch that
# together with the abomination the predictor model generated for you,
# and then save the completed audio clip:
completed_clip = np.hstack((X_test, y_test_prediction))
wavfile.write('Extrapolated Clip.wav', sample_rate, completed_clip[0])
#
# INFO: Congrats on making it to the end of this crazy lab =) !
|
{
"content_hash": "3fa98893b630bd1c9dd30d697172ef01",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 143,
"avg_line_length": 39.22072072072072,
"alnum_prop": 0.7109222464683588,
"repo_name": "LamaHamadeh/Microsoft-DAT210x",
"id": "bdc54806ee6040b39c4380b021941941e76e3cd7",
"size": "8707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Module 5/assignment10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150945"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='django-orderedmodel',
version='2014.12.001',
description='Orderable models for Django',
long_description='''
This Django_ app helps you create Django models that can be
moved up/down with respect to each other.
See README_ for more details.
.. _Django: https://www.djangoproject.com/
.. _README: https://github.com/kirelagin/django-orderedmodel/blob/master/README.md
''',
author='Kirill Elagin',
author_email='kirelagin@gmail.com',
url='https://github.com/kirelagin/django-orderedmodel',
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
],
keywords = ['Django', 'order', 'ordering', 'models'],
packages = ['orderedmodel'],
package_data = {
'orderedmodel': [
'static/orderedmodel/arrow-down.gif',
'static/orderedmodel/arrow-up.gif',
],
},
)
|
{
"content_hash": "998c1b598757c2a491a4db136426f2fa",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 92,
"avg_line_length": 33.93023255813954,
"alnum_prop": 0.5455791638108294,
"repo_name": "kirelagin/django-orderedmodel",
"id": "d2d4c11185decc9342c534b5503e003e1534b172",
"size": "1482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4691"
}
],
"symlink_target": ""
}
|
import json
from pprint import pprint
from collections import OrderedDict
with open('Config.json') as data_file:
data = json.load(data_file, object_pairs_hook=OrderedDict)
print "=========================================================="
print(data)
print "=========================================================="
with open('data_config.json', 'w') as outfile:
# json.dump(data, outfile,,indent=4, sort_keys=False, separators=(',', ':') )
json.dump(data, outfile, indent = 4)
# data01 ={}
# with open('Config.json') as data_file:
# for line in data_file:
# data01.update(json.load(line))
# with open('data_config_01.json', 'w') as outfile:
# json.dump(data01, outfile,indent=4, separators=(',', ':') )
# print "=========================================================="
# fd = open(Config.json, 'r')
# text = fd.read()
# fd.close()
# rerurn data_01 = json.read(text)
# pprint(data01)
|
{
"content_hash": "197ea45ff8e73ea997784d191eb74dce",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 30.866666666666667,
"alnum_prop": 0.5248380129589633,
"repo_name": "HPPTECH/hpp_IOSTressTest",
"id": "f7ef885a1897480b0b007c6c6ebc1c3686339504",
"size": "945",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "Refer/IOST_OLD_SRC/IOST_0.04/Skylark/Test_config_json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5571"
},
{
"name": "C",
"bytes": "5083"
},
{
"name": "CSS",
"bytes": "53608"
},
{
"name": "HTML",
"bytes": "2732176"
},
{
"name": "JavaScript",
"bytes": "945408"
},
{
"name": "Makefile",
"bytes": "5568"
},
{
"name": "Python",
"bytes": "5810318"
},
{
"name": "Shell",
"bytes": "21948"
}
],
"symlink_target": ""
}
|
'''
This sample script shows a simple **nested** menu structure.
There are two `group` items: `build_menu` and `test_menu`. These two items
are shown on the main menu. Once selected, thier respective sub-items will
be shown.
`test_menu` has a sub-menu itself, `sub_test_menu`. That item has a single
menu item.
Notice how the decorators change from `@climenu` to `@build_menu` and
`@test_menu`.
'''
from __future__ import print_function
import climenu
###############################################################################
# Create an empty function to serve as a menu group
@climenu.group()
def build_menu():
'''Build Functions'''
pass
# Add this function to the `build_menu` group
@build_menu.menu()
def test():
'''Build the package'''
print("!!!package build!!!")
return True
# Add this function to the `build_menu` group
@build_menu.menu()
def test2():
'''Build the release'''
print("!!!release build")
return True
###############################################################################
###############################################################################
# Create an empty function to serve as a menu group
@climenu.group()
def test_menu():
'''Test Functions'''
pass
# Add this function to the `test_menu` group
@test_menu.menu()
def test_one():
'''Run test #1'''
print("!!!test #1 run!!!")
return True
# Add this function to the `test_menu` group
@test_menu.menu()
def test_two():
'''Run test #2'''
print("!!!test #2 run!!!")
# Create a sub-group and add it to the `test_menu` group
@test_menu.group()
def sub_test_menu():
'''Another testing menu'''
pass
# Add this function to the `subsub_menu` group
@sub_test_menu.menu()
def subsub_menu1():
'''Run test #3'''
print("!!!test #3 run!!!")
#################################################################################
if __name__ == '__main__':
climenu.run()
|
{
"content_hash": "9f3ed2ae9a7f06ea9400675d9a3898f6",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 81,
"avg_line_length": 25.35,
"alnum_prop": 0.5152859960552268,
"repo_name": "mtik00/pyclimenu",
"id": "04e3a7f5f7b3c499a8b7fb94336c858ef03a6734",
"size": "2077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/nested.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "181"
},
{
"name": "Python",
"bytes": "51288"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, absolute_import)
import re
from django import forms
from django.forms.widgets import HiddenInput
from . import VIES_COUNTRY_CHOICES
EMPTY_VALUES = (None, '')
class VATINWidget(forms.MultiWidget):
"""docstring for VATINWidget"""
def __init__(self, choices=VIES_COUNTRY_CHOICES, attrs=None):
widgets = (
forms.Select(choices=choices),
forms.TextInput()
)
super(VATINWidget, self).__init__(widgets, attrs)
def value_from_datadict(self, data, files, name):
value = [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
try:
country, code = value
# the spaces and the dots are removed
code = code.replace(".", "").replace(" ", "")
except:
return data.get(name, None)
if code not in EMPTY_VALUES:
if country in EMPTY_VALUES:
try:
# ex. code="FR09443710785", country="".
empty, country, code = re.split('([a-zA-Z])', code)
except:
return ['', code]
else:
# ex. code ="FR09443710785", country="FR".
re_code = re.compile(r'^%s(\d+)$' % country)
if re_code.match(code):
code = code.replace(country, "", 1)
try:
country = country.upper()
except:
pass
return [country, code]
else:
return [None, None]
def format_output(self, rendered_widgets):
return "%s %s" % (rendered_widgets[0], rendered_widgets[1])
def decompress(self, value):
if value:
try:
country, code = value
except:
country = None
code = value
if country in EMPTY_VALUES:
try:
country = code[:2]
code = code[2:]
except:
pass
return [country, code]
return [None, None]
class VATINHiddenWidget(VATINWidget):
"""
A Widget that splits vat input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None):
widgets = (HiddenInput(attrs=attrs), HiddenInput(attrs=attrs))
super(VATINWidget, self).__init__(widgets, attrs)
|
{
"content_hash": "97733ab46e73093c85864ce23a7c25ea",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 116,
"avg_line_length": 31.41025641025641,
"alnum_prop": 0.5146938775510204,
"repo_name": "vdboor/django-vies",
"id": "3f26ff06036b6590833666f785af8e60894be1d3",
"size": "2474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vies/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17901"
}
],
"symlink_target": ""
}
|
import pendulum
from furl import furl
from share import Harvester
class FigshareHarvester(Harvester):
page_size = 50
url = 'https://api.figshare.com/v2/articles'
def do_harvest(self, start_date, end_date):
return self.fetch_records(furl(self.url).set(query_params={
'order_direction': 'asc',
'order': 'modified_date',
'page_size': self.page_size,
'modified_date': start_date.date().isoformat(),
}).url, end_date.date())
def fetch_records(self, url, end_day):
page, detail = 0, None
while True:
page += 1
resp = self.requests.get(furl(url).add(query_params={
'page': page,
}).url)
if resp.status_code == 422:
# We've asked for too much. Time to readjust date range
# Thanks for leaking variables python
page, url = 0, furl(url).add(query_params={
'modified_date': pendulum.parse(detail['modified_date']).date().isoformat()
})
continue
for item in resp.json():
resp = self.requests.get(item['url'])
detail = resp.json()
if pendulum.parse(detail['modified_date']).date() > end_day:
return
yield item['url'], detail
if len(resp.json()) < self.page_size:
return # We've hit the end of our results
|
{
"content_hash": "2fee9d56deb59adf054d143eaa54a8cb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 95,
"avg_line_length": 31.041666666666668,
"alnum_prop": 0.525503355704698,
"repo_name": "zamattiac/SHARE",
"id": "f34fc36bc13929a1bbdb07007e1b3fb6c08fe740",
"size": "1490",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "providers/com/figshare/v2/harvester.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
}
|
"""The tests for the StatsD feeder."""
import unittest
try:
from unittest import mock
except ImportError:
import mock
import homeassistant.core as ha
import homeassistant.components.statsd as statsd
from homeassistant.const import STATE_ON, STATE_OFF, EVENT_STATE_CHANGED
class TestStatsd(unittest.TestCase):
"""Test the StatsD component."""
@mock.patch('statsd.StatsClient')
def test_statsd_setup_full(self, mock_connection):
"""Test setup with all data."""
config = {
'statsd': {
'host': 'host',
'port': 123,
'sample_rate': 1,
'prefix': 'foo',
}
}
hass = mock.MagicMock()
self.assertTrue(statsd.setup(hass, config))
mock_connection.assert_called_once_with(
host='host',
port=123,
prefix='foo')
self.assertTrue(hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
hass.bus.listen.call_args_list[0][0][0])
@mock.patch('statsd.StatsClient')
def test_statsd_setup_defaults(self, mock_connection):
"""Test setup with defaults."""
config = {
'statsd': {
'host': 'host',
}
}
hass = mock.MagicMock()
self.assertTrue(statsd.setup(hass, config))
mock_connection.assert_called_once_with(
host='host',
port=statsd.DEFAULT_PORT,
prefix=statsd.DEFAULT_PREFIX)
self.assertTrue(hass.bus.listen.called)
@mock.patch('statsd.StatsClient')
def test_event_listener_defaults(self, mock_client):
"""Test event listener."""
config = {
'statsd': {
'host': 'host',
}
}
hass = mock.MagicMock()
statsd.setup(hass, config)
self.assertTrue(hass.bus.listen.called)
handler_method = hass.bus.listen.call_args_list[0][0][1]
valid = {'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0}
for in_, out in valid.items():
state = mock.MagicMock(state=in_,
attributes={"attribute key": 3.2})
handler_method(mock.MagicMock(data={'new_state': state}))
mock_client.return_value.gauge.assert_has_calls([
mock.call(state.entity_id, out, statsd.DEFAULT_RATE),
])
mock_client.return_value.gauge.reset_mock()
mock_client.return_value.incr.assert_called_once_with(
state.entity_id, rate=statsd.DEFAULT_RATE)
mock_client.return_value.incr.reset_mock()
for invalid in ('foo', '', object):
handler_method(mock.MagicMock(data={
'new_state': ha.State('domain.test', invalid, {})}))
self.assertFalse(mock_client.return_value.gauge.called)
self.assertFalse(mock_client.return_value.incr.called)
@mock.patch('statsd.StatsClient')
def test_event_listener_attr_details(self, mock_client):
"""Test event listener."""
config = {
'statsd': {
'host': 'host',
'log_attributes': True
}
}
hass = mock.MagicMock()
statsd.setup(hass, config)
self.assertTrue(hass.bus.listen.called)
handler_method = hass.bus.listen.call_args_list[0][0][1]
valid = {'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0}
for in_, out in valid.items():
state = mock.MagicMock(state=in_,
attributes={"attribute key": 3.2})
handler_method(mock.MagicMock(data={'new_state': state}))
mock_client.return_value.gauge.assert_has_calls([
mock.call("%s.state" % state.entity_id,
out, statsd.DEFAULT_RATE),
mock.call("%s.attribute_key" % state.entity_id,
3.2, statsd.DEFAULT_RATE),
])
mock_client.return_value.gauge.reset_mock()
mock_client.return_value.incr.assert_called_once_with(
state.entity_id, rate=statsd.DEFAULT_RATE)
mock_client.return_value.incr.reset_mock()
for invalid in ('foo', '', object):
handler_method(mock.MagicMock(data={
'new_state': ha.State('domain.test', invalid, {})}))
self.assertFalse(mock_client.return_value.gauge.called)
self.assertFalse(mock_client.return_value.incr.called)
|
{
"content_hash": "a77c461b6502115ecd2cdfd57be78de7",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 72,
"avg_line_length": 35.73076923076923,
"alnum_prop": 0.5377825618945102,
"repo_name": "Julian/home-assistant",
"id": "076e41908967a8028a1de83026fdd110bf59be17",
"size": "4645",
"binary": false,
"copies": "1",
"ref": "refs/heads/py2",
"path": "tests/components/test_statsd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1354942"
},
{
"name": "Python",
"bytes": "2755966"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
}
|
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
import shlex
from . import __version__
from . import config as bc_config
from . import credentials as bc_credentials
from . import client as bc_client
from .endpoint import EndpointCreator
from .exceptions import EventNotFound, ConfigNotFound, ProfileNotFound
from . import handlers
from .hooks import HierarchicalEmitter, first_non_none_response
from .loaders import Loader
from .provider import get_provider
from . import regions
from .model import ServiceModel
from . import service as bc_service
from . import waiter
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
AllEvents = {
'after-call': '.%s.%s',
'after-parsed': '.%s.%s.%s.%s',
'before-parameter-build': '.%s.%s',
'before-call': '.%s.%s',
'service-created': '',
'service-data-loaded': '.%s',
'creating-endpoint': '.%s',
'before-auth': '.%s',
'needs-retry': '.%s.%s',
}
"""
A dictionary where each key is an event name and the value
is the formatting string used to construct a new event.
"""
SessionVariables = {
# logical: config_file, env_var, default_value
'profile': (None, 'BOTO_DEFAULT_PROFILE', None),
'region': ('region', 'BOTO_DEFAULT_REGION', None),
'data_path': ('data_path', 'BOTO_DATA_PATH', None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config'),
'provider': ('provider', 'BOTO_PROVIDER_NAME', 'aws'),
# These variables are intended for internal use so don't have any
# user settable values.
# This is the shared credentials file amongst sdks.
'credentials_file': (None, None, '~/.aws/credentials'),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': ('metadata_service_timeout', None, 1),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': ('metadata_service_num_attempts',
None, 1),
}
"""
A default dictionary that maps the logical names for session variables
to the specific environment variables and configuration file names
that contain the values for these variables.
When creating a new Session object, you can pass in your own dictionary to
remap the logical names or to add new logical names. You can then get the
current value for these variables by using the ``get_config_variable``
method of the :class:`botocore.session.Session` class.
The default set of logical variable names are:
* profile - Default profile name you want to use.
* region - Default region name to use, if not otherwise specified.
* data_path - Additional directories to search for data files.
* config_file - Location of a Boto config file.
* provider - The name of the service provider (e.g. aws)
These form the keys of the dictionary. The values in the dictionary
are tuples of (<config_name>, <environment variable>, <default value).
The ``profile`` and ``config_file`` variables should always have a
None value for the first entry in the tuple because it doesn't make
sense to look inside the config file for the location of the config
file or for the default profile to use.
The ``config_name`` is the name to look for in the configuration file,
the ``env var`` is the OS environment variable (``os.environ``) to
use, and ``default_value`` is the value to use if no value is otherwise
found.
"""
FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, loader=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SessionVariables``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
"""
self.session_var_map = copy.copy(self.SessionVariables)
if session_vars:
self.session_var_map.update(session_vars)
if event_hooks is None:
self._events = HierarchicalEmitter()
else:
self._events = event_hooks
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
self._provider = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if loader is None:
loader = Loader()
self._loader = loader
# _data_paths_added is used to track whether or not we added
# extra paths to the loader. We will do this lazily
# only when we ask for the loader.
self._data_paths_added = False
self._components = ComponentLocator()
self._register_components()
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider',
lambda: bc_credentials.create_credential_resolver(self))
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: Loader(self.get_config_variable('data_path') or ''))
def _register_endpoint_resolver(self):
self._components.lazy_register_component(
'endpoint_resolver',
lambda: regions.EndpointResolver(self.get_data('aws/_endpoints')))
def _reset_components(self):
self._register_components()
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_first is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
@property
def provider(self):
if self._provider is None:
self._provider = get_provider(
self, self.get_config_variable('provider'))
return self._provider
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
return self._profile
@profile.setter
def profile(self, profile):
# Since provider can be specified in profile, changing the
# profile should reset the provider.
self._provider = None
self._profile = profile
# Need to potentially reload the config file/creds.
self._reset_components()
def get_config_variable(self, logical_name,
methods=('instance', 'env', 'config'),
default=None):
"""
Retrieve the value associated with the specified logical_name
from the environment or the config file. Values found in the
environment variable take precedence of values found in the
config file. If no value can be found, a None will be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:type method: tuple
:param method: Defines which methods will be used to find
the variable value. By default, all available methods
are tried but you can limit which methods are used
by supplying a different value to this parameter.
Valid choices are: instance|env|config
:param default: The default value to return if there is no
value associated with the config file. This value will
override any default value specified in ``SessionVariables``.
:returns: str value of variable of None if not defined.
"""
value = None
# There's two types of defaults here. One if the
# default value specified in the SessionVariables.
# The second is an explicit default value passed into this
# function (the default parameter).
# config_default is tracking the default value specified
# in the SessionVariables.
config_default = None
if logical_name in self.session_var_map:
# Short circuit case, check if the var has been explicitly
# overriden via set_config_variable.
if 'instance' in methods and \
logical_name in self._session_instance_vars:
return self._session_instance_vars[logical_name]
config_name, envvar_name, config_default = self.session_var_map[
logical_name]
if logical_name in ('config_file', 'profile'):
config_name = None
if logical_name == 'profile' and self._profile:
value = self._profile
elif 'env' in methods and envvar_name and envvar_name in os.environ:
value = os.environ[envvar_name]
elif 'config' in methods:
if config_name:
config = self.get_scoped_config()
value = config.get(config_name)
# If we don't have a value at this point, we need to try to assign
# a default value. An explicit default argument will win over the
# default value from SessionVariables.
if value is None and default is not None:
value = default
if value is None and config_default is not None:
value = config_default
return value
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SessionVariables``.
:param value: The value to associate with the config variable.
"""
self._session_instance_vars[logical_name] = value
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = bc_config.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = bc_config.raw_config_parse(cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = bc_credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Boto` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
latest = loader.determine_latest('%s/%s' % (
self.provider.name, service_name), api_version)
waiter_path = latest.replace('.api', '.waiters')
waiter_config = loader.load_data(waiter_path)
return waiter.WaiterModel(waiter_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = '%s/%s' % (self.provider.name, service_name)
service_data = self.get_component('data_loader').load_service_model(
data_path,
api_version=api_version
)
event_name = self.create_event('service-data-loaded', service_name)
self._events.emit(event_name, service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
data_path = '%s' % self.provider.name
return self.get_component('data_loader')\
.list_available_services(data_path)
def get_service(self, service_name, api_version=None):
"""
Get information about a service.
:type service_name: str
:param service_name: The name of the service (e.g. 'ec2')
:returns: :class:`botocore.service.Service`
"""
service = bc_service.get_service(self, service_name,
self.provider,
api_version=api_version)
event = self.create_event('service-created')
self._events.emit(event, service=service)
return service
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.FmtString``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.FmtString
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.FmtString)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def register_event(self, event_name, fmtstr):
"""
Register a new event. The event will be added to ``AllEvents``
and will then be able to be created using ``create_event``.
:type event_name: str
:param event_name: The base name of the event.
:type fmtstr: str
:param fmtstr: The formatting string for the event.
"""
if event_name not in self.AllEvents:
self.AllEvents[event_name] = fmtstr
def create_event(self, event_name, *fmtargs):
"""
Creates a new event string that can then be emitted.
You could just create it manually, since it's just
a string but this helps to define the range of known events.
:type event_name: str
:param event_name: The base name of the new event.
:type fmtargs: tuple
:param fmtargs: A tuple of values that will be used as the
arguments pass to the string formatting operation. The
actual values passed depend on the type of event you
are creating.
"""
if event_name in self.AllEvents:
fmt_string = self.AllEvents[event_name]
if fmt_string:
event = event_name + (fmt_string % fmtargs)
else:
event = event_name
return event
raise EventNotFound(event_name=event_name)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
return self._components.get_component(name)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that
not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates. By default SSL certificates
are verified. You can provide the following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed client.
Normally, botocore will automatically construct the appropriate URL
to use when communicating with a service. You can specify a
complete URL (including the "http/https" scheme) to override this
behavior. If this value is provided, then ``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
loader = self.get_component('data_loader')
endpoint_creator = self._create_endpoint_creator(aws_access_key_id,
aws_secret_access_key,
aws_session_token)
event_emitter = self.get_component('event_emitter')
client_creator = bc_client.ClientCreator(loader, endpoint_creator,
event_emitter)
client = client_creator.create_client(service_name, region_name, use_ssl,
endpoint_url, verify,
aws_access_key_id,
aws_secret_access_key,
aws_session_token)
return client
def _create_endpoint_creator(self, aws_access_key_id, aws_secret_access_key,
aws_session_token):
resolver = self.get_component('endpoint_resolver')
region = self.get_config_variable('region')
event_emitter = self.get_component('event_emitter')
if aws_secret_access_key is None:
credentials = self.get_credentials()
else:
credentials = None
user_agent= self.user_agent()
endpoint_creator = EndpointCreator(resolver, region, event_emitter,
credentials, user_agent)
return endpoint_creator
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred.pop(name)
self._components[name] = factory()
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
|
{
"content_hash": "1b27191bb4d50623cb6936d48cc40a94",
"timestamp": "",
"source": "github",
"line_count": 856,
"max_line_length": 94,
"avg_line_length": 40.8107476635514,
"alnum_prop": 0.6063433903933131,
"repo_name": "gdm/aws-cfn-resource-bridge",
"id": "8adb78eda6873990fd12175ca08629c81add5e65",
"size": "35560",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "aws/cfn/bridge/vendored/botocore/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1126757"
},
{
"name": "Shell",
"bytes": "5310"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
import sys
import os
from jinja2 import Template
# TODO more checks: fail_html, etc.
@pytest.mark.online
@pytest.mark.usefixtures('tmpdir')
class TestDownload(object):
_config = """
tasks:
path_and_temp:
mock:
- {title: 'entry 1', url: 'http://speedtest.ftp.otenet.gr/files/test100k.db'}
accept_all: yes
download:
path: __tmp__
temp: __tmp__
just_path:
mock:
- {title: 'entry 2', url: 'http://speedtest.ftp.otenet.gr/files/test100k.db'}
accept_all: yes
download:
path: __tmp__
just_string:
mock:
- {title: 'entry 3', url: 'http://speedtest.ftp.otenet.gr/files/test100k.db'}
accept_all: yes
download: __tmp__
just_temp:
mock:
- {title: 'entry 4', url: 'http://speedtest.ftp.otenet.gr/files/test100k.db'}
accept_all: yes
download:
path: {{ temp_path_1 }}
temp: {{ temp_path_2 }}
"""
@pytest.fixture
def config(self, tmpdir):
temp_path_1 = tmpdir.mkdir('temp_path_1')
temp_path_2 = tmpdir.mkdir('temp_path_2')
return Template(self._config).render(
{'temp_path_1': temp_path_1.strpath, 'temp_path_2': temp_path_2.strpath}
)
def test_path_and_temp(self, execute_task):
"""Download plugin: Path and Temp directories set"""
task = execute_task('path_and_temp')
assert not task.aborted, 'Task should not have aborted'
def test_just_path(self, execute_task):
"""Download plugin: Path directory set as dict"""
task = execute_task('just_path')
assert not task.aborted, 'Task should not have aborted'
def test_just_string(self, execute_task):
"""Download plugin: Path directory set as string"""
task = execute_task('just_string')
assert not task.aborted, 'Task should not have aborted'
def test_just_temp(self, execute_task, manager):
task = execute_task('just_temp')
assert not task.aborted, 'Task should not have aborted'
temp_path = manager.config['tasks']['just_temp']['download']['temp']
assert not os.listdir(temp_path)
entry = task.find_entry(title='entry 4')
assert not entry.get('file')
# TODO: Fix this test
@pytest.mark.usefixtures('tmpdir')
@pytest.mark.skip(
reason='TODO: These are really just config validation tests, and I have config validation turned off'
' at the moment for unit tests due to some problems'
)
class TestDownloadTemp(object):
config = """
tasks:
temp_wrong_permission:
mock:
- {title: 'entry 1', url: 'http://www.speedtest.qsc.de/1kB.qsc'}
accept_all: yes
download:
path: __tmp__
temp: /root
temp_non_existent:
download:
path: __tmp__
temp: /a/b/c/non/existent/
temp_wrong_config_1:
download:
path: __tmp__
temp: no
temp_wrong_config_2:
download:
path: __tmp__
temp: 3
temp_empty:
download:
path: __tmp__
temp:
"""
@pytest.mark.skipif(
sys.platform.startswith('win'),
reason='Windows does not have a guaranteed "private" directory afaik',
)
def test_wrong_permission(self, execute_task):
"""Download plugin: Temp directory has wrong permissions"""
task = execute_task('temp_wrong_permission', abort_ok=True)
assert task.aborted
def test_temp_non_existent(self, execute_task):
"""Download plugin: Temp directory does not exist"""
task = execute_task('temp_non_existent', abort_ok=True)
assert task.aborted
def test_wrong_config_1(self, execute_task):
"""Download plugin: Temp directory config error [1of3]"""
task = execute_task('temp_wrong_config_1', abort_ok=True)
assert task.aborted
def test_wrong_config_2(self, execute_task):
"""Download plugin: Temp directory config error [2of3]"""
task = execute_task('temp_wrong_config_2', abort_ok=True)
assert task.aborted
def test_wrong_config_3(self, execute_task):
"""Download plugin: Temp directory config error [3of3]"""
task = execute_task('temp_empty', abort_ok=True)
assert task.aborted
@pytest.mark.online
@pytest.mark.usefixtures('tmpdir')
class TestDownloadAuth(object):
config = """
templates:
download:
disable: builtins
mock:
- title: digest
url: https://httpbin.org/digest-auth/auth/user/passwd/MD5
- title: basic
url: https://httpbin.org/basic-auth/user/passwd
accept_all: yes
download:
path: __tmp__
temp: __tmp__
tasks:
no_auth:
template:
- download
with_auth:
template:
- download
download_auth:
- digest-auth:
username: user
password: passwd
type: digest
- basic-auth:
username: user
password: passwd
"""
def test_download_auth(self, execute_task):
"""Test download basic and digest auth"""
task = execute_task('no_auth')
assert len(task.failed) == 2
task = execute_task('with_auth')
assert len(task.accepted) == 2
|
{
"content_hash": "97795fdc3a50acf9450d32c98afdcae1",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 105,
"avg_line_length": 32.26229508196721,
"alnum_prop": 0.551490514905149,
"repo_name": "JorisDeRieck/Flexget",
"id": "243708bfb3ff51ab16ec632fd2357390f838414c",
"size": "5904",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "2338"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3512234"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
import argparse
import sqlite3
import os
def main():
""" 1) Fill in all null values with 0s
2) Normalize all data
3) Any additional options
"""
parser = argparse.ArgumentParser()
parser.add_argument("--svm", action="store_true", help="output svm file?")
args = parser.parse_args()
if(not os.path.exists("samples.db")):
print("Expected samples.db in path. Use net_preprocess.py first.")
exit(1)
autofill_nulls("samples")
normalize_data("samples")
autofill_nulls("udp_samples")
normalize_data("udp_samples")
autofill_nulls("tcp_samples")
normalize_data("tcp_samples")
autofill_nulls("icmp_samples")
normalize_data("icmp_samples")
def autofill_nulls(table):
"""
"""
conn = sqlite3.connect("samples.db")
c = conn.cursor()
# Names of all column
c.execute('''SELECT * FROM %s''' % (table))
column_names = [description[0] for description in c.description]
c.execute('''SELECT * FROM %s''' % (table))
samples = c.fetchall()
for sample in samples:
for idx, column in enumerate(column_names):
if(column != "uuid"):
if(sample[idx] == None):
new_value = 0
# Update db with new value
c.execute('''UPDATE %s SET %s=%d WHERE uuid=%d''' % (table, column, new_value, sample[0]))
conn.commit()
conn.close()
def normalize_data(table):
"""
"""
conn = sqlite3.connect("samples.db")
c = conn.cursor()
# Names of all column
c.execute('''SELECT * FROM %s''' % (table))
column_names = [description[0] for description in c.description]
# Get min,max of all columns
column_props = {}
for column in column_names:
if(column != "uuid"):
c.execute('''SELECT MIN(%s), MAX(%s) FROM %s''' % (column, column, table))
column_props[column] = c.fetchone()[:2]
c.execute('''SELECT * FROM %s''' % (table))
samples = c.fetchall()
for sample in samples:
new_values = []
for idx, column in enumerate(column_names):
if(column != "uuid" and column != "class"):
# Avoid div by zero errros
if(column_props[column][1] != 0):
# Update value to be (value - min)/(max - min)
new_value = ((sample[idx] - column_props[column][0])/(column_props[column][1] - column_props[column][0]))
# Update db with new value
c.execute('''UPDATE %s SET %s=%f WHERE uuid=%d''' % (table, column, new_value, sample[0]))
conn.commit()
conn.close()
def sql_2_libsvm():
#Turn sql data into lib svm format
#lib svm format: <label> <feature_idx>:<feature_value> <feature_idx>:<feature_value> ...
#also scale any non-binary data from 0 to 1.
return
if __name__ == "__main__": main()
|
{
"content_hash": "4dd5e286a15dd0df31761cbb0372ffdf",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 125,
"avg_line_length": 33.78888888888889,
"alnum_prop": 0.5406116409075962,
"repo_name": "lattrelr7/cse881-pcap",
"id": "a74c0854e0a4bb7f897c3012c96db2dc3131a1b4",
"size": "3052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process_db.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2878"
},
{
"name": "Matlab",
"bytes": "1345"
},
{
"name": "Python",
"bytes": "29436"
}
],
"symlink_target": ""
}
|
import abc
from collections import namedtuple
ActionStepWrapper = namedtuple('ActionStepWrapper', ['type', 'params'])
EVALUATOR_EVENT_TYPE = 'type'
class Recipe(object, metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def get_do_recipe(action_spec):
"""Execute the action.
:param action_spec: The action specification as described in the
template. contains:
1. type - action type e.g. raise_alarm. set_state, etc.
2. targets - target element(s) in the graph
3. properties - the action properties
:type action_spec: ActionSpecs
"""
pass
@staticmethod
@abc.abstractmethod
def get_undo_recipe(action_spec):
"""Revert the action.
:param action_spec: The action specification as described in the
template. contains:
1. type - action type e.g. raise_alarm. set_state, etc.
2. targets - target element(s) in the graph
3. properties - the action properties
:type action_spec: ActionSpecs
"""
pass
|
{
"content_hash": "2577ac3aa5b434f8b0d742524c92e1cc",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 28.205128205128204,
"alnum_prop": 0.6245454545454545,
"repo_name": "openstack/vitrage",
"id": "dcc07ef2ae81dd815000ae3c6c45c1d55bcde2f7",
"size": "1673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/evaluator/actions/recipes/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
}
|
import cfscrape, sys
scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance
# Or: scraper = cfscrape.CloudflareScraper() # CloudflareScraper inherits from requests.Session
if (sys.argv[1] == "content"):
print scraper.get(sys.argv[2]).content
if (sys.argv[1] == "header"):
print scraper.get(sys.argv[2]).headers
|
{
"content_hash": "ace97f191b4281ae9fd766bc492954a0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 96,
"avg_line_length": 34.7,
"alnum_prop": 0.7175792507204611,
"repo_name": "greencardamom/WaybackMedic",
"id": "8f30b3b7f97b65c7deec8372528b61489b88ad0a",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WaybackMedic 2.1/cloudflare.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "714522"
},
{
"name": "JavaScript",
"bytes": "1167"
},
{
"name": "Lua",
"bytes": "3224"
},
{
"name": "Nim",
"bytes": "817133"
},
{
"name": "Python",
"bytes": "1002"
},
{
"name": "Roff",
"bytes": "1515"
},
{
"name": "Shell",
"bytes": "9918"
}
],
"symlink_target": ""
}
|
from openstack import service_filter
class OrchestrationService(service_filter.ServiceFilter):
"""The orchestration service."""
valid_versions = [service_filter.ValidVersion('v1')]
def __init__(self, version=None):
"""Create an orchestration service."""
super(OrchestrationService, self).__init__(
service_type='orchestration',
version=version
)
|
{
"content_hash": "86fab60da3cb486bfc3f6e77b2c498df",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.651219512195122,
"repo_name": "dudymas/python-openstacksdk",
"id": "930e5b9ab5a10c76a5dfc9bdc0a5b3476ec4709d",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack/orchestration/orchestration_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1062098"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
VERSION = "0.0.1"
# Utility function to read the README file.
# Used for the long_description.
# It's nice, because:
# 1) we have a top level README file
# 2) it's easier to type in the README file than to put a raw string
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "bottle-memcache-decorator",
packages = ['bottle_memcache_decorator'],
package_data = {
'bottle-memcache-decorator': ['LICENSE', 'README.md']
},
version = VERSION,
author = "Chris Ziogas",
author_email = "ziogas_chr@hotmail.com",
url = "http://github.com/ziogaschr/bottle-memcache-decorator",
download_url='https://github.com/ziogaschr/bottle-memcache-decorator/tarball/v%s#egg=ziogaschr/bottle-memcache-decorator-%s' % (VERSION, VERSION),
description = ("Adds a memcache decorator in your Bottle application. \
It automatically stores the route result to memcache for routes \
where the bottle-memcache plugin is enabled."),
license = "MIT",
platforms = 'any',
keywords = "Bottle Plugin Memcache Decorator",
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: Plugins",
"Framework :: Bottle",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires = [
'bottle>=0.11',
'bottle-extras',
'bottle_memcache'
],
test_suite = 'bottle_memcache_decorator/tests'
)
|
{
"content_hash": "fa291dc22c88ba0b6c6ac01a11c24984",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 150,
"avg_line_length": 35.4,
"alnum_prop": 0.6559322033898305,
"repo_name": "ziogaschr/bottle-memcache-decorator",
"id": "976e906e3032a1bcc3a601a3d8f4731ee2b592df",
"size": "1770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13801"
}
],
"symlink_target": ""
}
|
import sys
import os
import json
import tempfile
import uuid
from unittest import TestCase
from flask import Flask, jsonify, request
from flask_orator import Orator, jsonify
from sqlite3 import ProgrammingError
from orator.support.collection import Collection
PY2 = sys.version_info[0] == 2
class FlaskOratorTestCase(TestCase):
def setUp(self):
dbname = '%s.db' % str(uuid.uuid4())
self.dbpath = os.path.join(tempfile.gettempdir(), dbname)
app = Flask(__name__)
app.config['ORATOR_DATABASES'] = {
'test': {
'driver': 'sqlite',
'database': self.dbpath
}
}
db = Orator(app)
self.app = app
self.db = db
self.User = self.make_user_model()
@app.route('/')
def index():
return jsonify(self.User.order_by('id').paginate(5))
@app.route('/users', methods=['POST'])
def create():
data = request.json
user = self.User.create(**data)
return jsonify(user)
@app.route('/users/<user_id>', methods=['GET'])
def show(user_id):
return self.User.find_or_fail(user_id)
self.init_tables()
def tearDown(self):
os.remove(self.dbpath)
def init_tables(self):
with self.schema().create('users') as table:
table.increments('id')
table.string('name').unique()
table.string('email').unique()
table.timestamps()
def make_user_model(self):
class User(self.db.Model):
__fillable__ = ['name', 'email']
return User
def post(self, client, endpoint, params_=None, **data):
headers = [
('Content-Type', 'application/json')
]
if params_ is None:
params_ = {}
return client.post(endpoint, headers=headers,
data=json.dumps(data))
def get(self, client, endpoint, **data):
return client.get(endpoint)
def connection(self):
return self.db.Model.get_connection_resolver().connection()
def schema(self):
return self.connection().get_schema_builder()
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
if PY2:
return self.assertRaisesRegexp(
expected_exception, expected_regex,
callable_obj, *args, **kwargs)
return super(FlaskOratorTestCase, self).assertRaisesRegex(
expected_exception, expected_regex,
callable_obj, *args, **kwargs
)
def assertRegex(self, *args, **kwargs):
if PY2:
return self.assertRegexpMatches(*args, **kwargs)
else:
return super(FlaskOratorTestCase, self).assertRegex(*args, **kwargs)
class BasicAppTestCase(FlaskOratorTestCase):
def test_basic_insert(self):
c = self.app.test_client()
user_data = json.loads(
self.post(c, '/users', name='foo', email='foo@bar.com').data.decode()
)
self.assertEqual(1, user_data['id'])
self.assertEqual('foo', user_data['name'])
self.assertEqual('foo@bar.com', user_data['email'])
self.post(c, '/users', name='bar', email='bar@baz.com')
users = json.loads(
self.get(c, '/').data.decode()
)
self.assertEqual('foo', users[0]['name'])
self.assertEqual('bar', users[1]['name'])
def test_model_not_found_returns_404(self):
c = self.app.test_client()
response = self.get(c, '/users/9999')
self.assertEqual(404, response.status_code)
self.assertRegex(str(response.data), 'No query results found for model \[User\]')
class PaginatorTestCase(FlaskOratorTestCase):
def test_default_page(self):
c = self.app.test_client()
for i in range(10):
self.post(c, '/users',
name='user %s' % i,
email='foo%s@bar.com' % i)
users = json.loads(
self.get(c, '/').data.decode()
)
self.assertEqual(5, len(users))
self.assertEqual(1, users[0]['id'])
self.assertEqual(5, users[-1]['id'])
def test_specific_page(self):
c = self.app.test_client()
for i in range(10):
self.post(c, '/users',
name='user %s' % i,
email='foo%s@bar.com' % i)
users = json.loads(
self.get(c, '/?page=2').data.decode()
)
self.assertEqual(5, len(users))
self.assertEqual(6, users[0]['id'])
self.assertEqual(10, users[-1]['id'])
def test_page_greater_than_max_page(self):
c = self.app.test_client()
for i in range(10):
self.post(c, '/users',
name='user %s' % i,
email='foo%s@bar.com' % i)
users = json.loads(
self.get(c, '/?page=5').data.decode()
)
self.assertEqual(0, len(users))
class ConsistenceTestCase(FlaskOratorTestCase):
def test_handlers(self):
connection = self.db.connection().get_connection()
c = self.app.test_client()
self.get(c, '/')
self.assertRaisesRegex(ProgrammingError, 'Cannot operate on a closed database.', connection.commit)
self.assertIsNone(self.db.connection().get_connection())
def test_behaves_like_manager(self):
@self.app.route('/users')
def users():
try:
users = jsonify(Collection(self.db.table('users').get()).map(lambda x: dict(x.items())))
except Exception:
raise
return users
c = self.app.test_client()
for i in range(10):
self.post(c, '/users',
name='user %s' % i,
email='foo%s@bar.com' % i)
users = json.loads(self.get(c, '/users').data.decode())
self.assertEqual(10, len(users))
|
{
"content_hash": "48d03010ce34a9cde23dd28c8dd60b01",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 107,
"avg_line_length": 27.577272727272728,
"alnum_prop": 0.5454095928795121,
"repo_name": "sdispater/flask-orator",
"id": "bcaf588abad09636032fb4fbb494fed3a6a4d8f1",
"size": "6092",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_orator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10195"
}
],
"symlink_target": ""
}
|
import sys
from pyspark.sql import SparkSession, functions, types
spark = SparkSession.builder.appName('reddit relative scores').getOrCreate()
assert sys.version_info >= (3, 4) # make sure we have Python 3.4+
assert spark.version >= '2.1' # make sure we have Spark 2.1+
import string, re
wordbreak = r'[%s\s]+' % (re.escape(string.punctuation),) # regex that matches spaces and/or punctuation
def main():
in_directory = sys.argv[1]
out_directory = sys.argv[2]
words = spark.read.text(in_directory)
words.show()
sperated = words.select(functions.explode(functions.split(functions.lower(words["value"]),wordbreak)).alias("word"))
# sperated.show()
word_count = sperated.groupby("word").agg(functions.count("word").alias("count"))
# word_count.show()
sorted_word_count = word_count.orderBy(functions.desc("count"),"word").filter(word_count["word"] !="")
sorted_word_count.write.csv(out_directory, mode = "overwrite")
if __name__=='__main__':
main()
|
{
"content_hash": "487b2fe9450dac18d31bfd7b6bd163f5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 120,
"avg_line_length": 37.25925925925926,
"alnum_prop": 0.679920477137177,
"repo_name": "MockyJoke/numbers",
"id": "a978263cfc370c121156273d5ff716fa0757479c",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex12/code/word_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "565160"
},
{
"name": "Python",
"bytes": "108046"
}
],
"symlink_target": ""
}
|
"""
Project-wide application configuration.
DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""
import os
from authomatic.providers import oauth2
from authomatic import Authomatic
"""
NAMES
"""
# Project name to be used in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'commencement'
# Project name to be used in file paths
PROJECT_FILENAME = 'commencement'
# The name of the repository containing the source
REPOSITORY_NAME = 'commencement'
GITHUB_USERNAME = 'nprapps'
REPOSITORY_URL = 'git@github.com:%s/%s.git' % (GITHUB_USERNAME, REPOSITORY_NAME)
REPOSITORY_ALT_URL = None # 'git@bitbucket.org:nprapps/%s.git' % REPOSITORY_NAME'
# Project name used for assets rig
# Should stay the same, even if PROJECT_SLUG changes
ASSETS_SLUG = 'commencement'
"""
DEPLOYMENT
"""
PRODUCTION_S3_BUCKET = {
'bucket_name': 'apps.npr.org',
'region': 'us-east-1'
}
STAGING_S3_BUCKET = {
'bucket_name': 'stage-apps.npr.org',
'region': 'us-east-1'
}
ASSETS_S3_BUCKET = {
'bucket_name': 'assets.apps.npr.org',
'region': 'us-east-1'
}
DEFAULT_MAX_AGE = 20
PRODUCTION_SERVERS = ['cron.nprapps.org']
STAGING_SERVERS = ['cron-staging.nprapps.org']
# Should code be deployed to the web/cron servers?
DEPLOY_TO_SERVERS = False
SERVER_USER = 'ubuntu'
SERVER_PYTHON = 'python2.7'
SERVER_PROJECT_PATH = '/home/%s/apps/%s' % (SERVER_USER, PROJECT_FILENAME)
SERVER_REPOSITORY_PATH = '%s/repository' % SERVER_PROJECT_PATH
SERVER_VIRTUALENV_PATH = '%s/virtualenv' % SERVER_PROJECT_PATH
# Should the crontab file be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_CRONTAB = False
# Should the service configurations be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_SERVICES = False
UWSGI_SOCKET_PATH = '/tmp/%s.uwsgi.sock' % PROJECT_FILENAME
# Services are the server-side services we want to enable and configure.
# A three-tuple following this format:
# (service name, service deployment path, service config file extension)
SERVER_SERVICES = [
('app', SERVER_REPOSITORY_PATH, 'ini'),
('uwsgi', '/etc/init', 'conf'),
('nginx', '/etc/nginx/locations-enabled', 'conf'),
]
# These variables will be set at runtime. See configure_targets() below
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = None
SERVER_LOG_PATH = None
DEBUG = True
"""
COPY EDITING
"""
COPY_GOOGLE_DOC_KEY = '0AlXMOHKxzQVRdFM0eHpucEdWRzRiMVFDdkY4amx6QkE'
COPY_PATH = 'data/copy.xlsx'
DATA_GOOGLE_DOC_KEY = '1dDSs5QSEjieH-dWTnR7BVUxI2gH6EiUI0vfmmHMnMQU'
"""
SHARING
"""
SHARE_URL = 'http://%s/%s/' % (PRODUCTION_S3_BUCKET['bucket_name'], PROJECT_SLUG)
# Will be resized to 120x120, can't be larger than 1MB
TWITTER_IMAGE_URL = '%sassets/promo_art.png' % SHARE_URL
TWITTER_HANDLE = '@npr_ed'
# 16:9 ("wide") image. FB uses 16:9 in the newsfeed and crops to square in timelines.
# No documented restrictions on size
FACEBOOK_IMAGE_URL = TWITTER_IMAGE_URL
FACEBOOK_APP_ID = '138837436154588'
# Thumbnail image for Google News / Search.
# No documented restrictions on resolution or size
GOOGLE_IMAGE_URL = TWITTER_IMAGE_URL
"""
ADS
"""
NPR_DFP = {
'STORY_ID': '1013',
'TARGET': 'News_U_S__Education',
'ENVIRONMENT': 'NPR',
'TESTSERVER': 'false'
}
"""
SERVICES
"""
NPR_GOOGLE_ANALYTICS = {
'ACCOUNT_ID': 'UA-5828686-4',
'DOMAIN': PRODUCTION_S3_BUCKET['bucket_name'],
'TOPICS': '[1003,1013]',
}
VIZ_GOOGLE_ANALYTICS = {
'ACCOUNT_ID': 'UA-5828686-75'
}
DISQUS_API_KEY = 'tIbSzEhGBE9NIptbnQWn4wy1gZ546CsQ2IHHtxJiYAceyyPoAkDkVnQfCifmCaQW'
DISQUS_UUID = '$NEW_DISQUS_UUID'
"""
OAUTH
"""
GOOGLE_OAUTH_CREDENTIALS_PATH = '~/.google_oauth_credentials'
authomatic_config = {
'google': {
'id': 1,
'class_': oauth2.Google,
'consumer_key': os.environ.get('GOOGLE_OAUTH_CLIENT_ID'),
'consumer_secret': os.environ.get('GOOGLE_OAUTH_CONSUMER_SECRET'),
'scope': ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/userinfo.email'],
'offline': True,
},
}
authomatic = Authomatic(authomatic_config, os.environ.get('AUTHOMATIC_SALT'))
"""
APP-SPECIFIC
"""
TAGS = {
'balance': 'Balance',
'be-kind': 'Be kind',
'change-the-world': 'Change the world',
'dont-give-up': 'Don\'t give up',
'dream': 'Dream',
'embrace-failure': 'Embrace failure',
'fight-for-equality': 'Fight for equality',
'inner-voice': 'Inner voice',
'make-art': 'Make art',
'play': 'Play',
'remember-history': 'Remember history',
'tips': 'Tips',
'unplug': 'Unplug',
'work-hard': 'Work hard',
'yolo': 'YOLO'
}
INITIAL_SPEECH_SLUG = 'mindy-kaling-harvard-law-school-2014'
COMMENT_PROMPT = 'Leave a comment'
"""
Utilities
"""
def get_secrets():
"""
A method for accessing our secrets.
"""
secrets_dict = {}
for k,v in os.environ.items():
if k.startswith(PROJECT_SLUG):
k = k[len(PROJECT_SLUG) + 1:]
secrets_dict[k] = v
return secrets_dict
def configure_targets(deployment_target):
"""
Configure deployment targets. Abstracted so this can be
overriden for rendering before deployment.
"""
global S3_BUCKET
global S3_BASE_URL
global S3_DEPLOY_URL
global SERVERS
global SERVER_BASE_URL
global SERVER_LOG_PATH
global DEBUG
global DEPLOYMENT_TARGET
global DISQUS_SHORTNAME
global ASSETS_MAX_AGE
if deployment_target == 'production':
S3_BUCKET = PRODUCTION_S3_BUCKET
S3_BASE_URL = 'https://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = PRODUCTION_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
SERVER_LOG_PATH = '/var/log/%s' % PROJECT_FILENAME
DISQUS_SHORTNAME = 'npr-news'
DEBUG = False
ASSETS_MAX_AGE = 86400
elif deployment_target == 'staging':
S3_BUCKET = STAGING_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = STAGING_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
SERVER_LOG_PATH = '/var/log/%s' % PROJECT_FILENAME
DISQUS_SHORTNAME = 'nprviz-test'
DEBUG = True
ASSETS_MAX_AGE = 20
else:
S3_BUCKET = None
S3_BASE_URL = 'http://127.0.0.1:8000'
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = 'http://127.0.0.1:8001/%s' % PROJECT_SLUG
SERVER_LOG_PATH = '/tmp'
DISQUS_SHORTNAME = 'nprviz-test'
DEBUG = True
ASSETS_MAX_AGE = 20
DEPLOYMENT_TARGET = deployment_target
"""
Run automated configuration
"""
DEPLOYMENT_TARGET = os.environ.get('DEPLOYMENT_TARGET', None)
configure_targets(DEPLOYMENT_TARGET)
|
{
"content_hash": "16186b7c9430c796421327119a0a9eb5",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 109,
"avg_line_length": 26.923664122137403,
"alnum_prop": 0.6589169265664871,
"repo_name": "nprapps/commencement",
"id": "5e2db3313451271a1499f9a7397c406e06eba410",
"size": "7077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57994"
},
{
"name": "HTML",
"bytes": "29017"
},
{
"name": "JavaScript",
"bytes": "548504"
},
{
"name": "Python",
"bytes": "98091"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
}
|
import warnings
warnings.filterwarnings("ignore",
"the rgbimg module is deprecated",
DeprecationWarning,
".*test_rgbimg$")
import rgbimg
import os, uu
from test.test_support import verbose, unlink, findfile
class error(Exception):
pass
print 'RGBimg test suite:'
def testimg(rgb_file, raw_file):
rgb_file = findfile(rgb_file)
raw_file = findfile(raw_file)
width, height = rgbimg.sizeofimage(rgb_file)
rgb = rgbimg.longimagedata(rgb_file)
if len(rgb) != width * height * 4:
raise error, 'bad image length'
raw = open(raw_file, 'rb').read()
if rgb != raw:
raise error, \
'images don\'t match for '+rgb_file+' and '+raw_file
for depth in [1, 3, 4]:
rgbimg.longstoimage(rgb, width, height, depth, '@.rgb')
os.unlink('@.rgb')
table = [
('testrgb'+os.extsep+'uue', 'test'+os.extsep+'rgb'),
('testimg'+os.extsep+'uue', 'test'+os.extsep+'rawimg'),
('testimgr'+os.extsep+'uue', 'test'+os.extsep+'rawimg'+os.extsep+'rev'),
]
for source, target in table:
source = findfile(source)
target = findfile(target)
if verbose:
print "uudecoding", source, "->", target, "..."
uu.decode(source, target)
if verbose:
print "testing..."
ttob = rgbimg.ttob(0)
if ttob != 0:
raise error, 'ttob should start out as zero'
testimg('test'+os.extsep+'rgb', 'test'+os.extsep+'rawimg')
ttob = rgbimg.ttob(1)
if ttob != 0:
raise error, 'ttob should be zero'
testimg('test'+os.extsep+'rgb', 'test'+os.extsep+'rawimg'+os.extsep+'rev')
ttob = rgbimg.ttob(0)
if ttob != 1:
raise error, 'ttob should be one'
ttob = rgbimg.ttob(0)
if ttob != 0:
raise error, 'ttob should be zero'
for source, target in table:
unlink(findfile(target))
|
{
"content_hash": "b9cd880e93e734059309a98538b7f35f",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 26.794117647058822,
"alnum_prop": 0.6114160263446762,
"repo_name": "TathagataChakraborti/resource-conflicts",
"id": "650c02aa0f9a6adc32080ee88b3448a5908c6524",
"size": "1847",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/test/test_rgbimg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "Batchfile",
"bytes": "9764"
},
{
"name": "C",
"bytes": "14253103"
},
{
"name": "C++",
"bytes": "754817"
},
{
"name": "CSS",
"bytes": "9779"
},
{
"name": "DIGITAL Command Language",
"bytes": "13234"
},
{
"name": "Emacs Lisp",
"bytes": "174752"
},
{
"name": "Groff",
"bytes": "43625"
},
{
"name": "HTML",
"bytes": "418642"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Makefile",
"bytes": "392287"
},
{
"name": "Matlab",
"bytes": "918"
},
{
"name": "Objective-C",
"bytes": "28604"
},
{
"name": "Perl",
"bytes": "163937"
},
{
"name": "Prolog",
"bytes": "66"
},
{
"name": "Python",
"bytes": "38769203"
},
{
"name": "R",
"bytes": "2349"
},
{
"name": "SAS",
"bytes": "57249"
},
{
"name": "Shell",
"bytes": "173594"
},
{
"name": "TeX",
"bytes": "5169842"
},
{
"name": "VimL",
"bytes": "9563"
},
{
"name": "Visual Basic",
"bytes": "1443"
}
],
"symlink_target": ""
}
|
"""Functional tests for fused batch norm operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
DATA_FORMATS_AND_AVG_FACTORS = (
("_data_format_NHWC_no_averaging", "NHWC", 1.0),
("_data_format_NHWC_averaging", "NHWC", 0.6),
("_data_format_NCHW_no_averaging", "NCHW", 1.0),
("_data_format_NCHW_averaging", "NCHW", 0.6),
)
class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, old_mean, old_var, epsilon,
exponential_avg_factor, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
factor = element_count / max(element_count - 1, 1)
corrected_var = var * factor
normalized = (x - mean) / np.sqrt(var + epsilon)
if exponential_avg_factor != 1.0:
mean = (1.0 -
exponential_avg_factor) * old_mean + exponential_avg_factor * mean
corrected_var = (1.0 - exponential_avg_factor
) * old_var + exponential_avg_factor * corrected_var
return (normalized * scale + offset), mean, var, corrected_var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(*DATA_FORMATS)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
exponential_avg_factor = 1.0
data_format_src = "NHWC"
y_ref, mean_ref, var_ref, _ = self._reference_training(
x_val, scale_val, offset_val, None, None, epsilon,
exponential_avg_factor, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format,
exponential_avg_factor):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val_corr = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, mean_val, var_val_corr, epsilon,
exponential_avg_factor, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
if exponential_avg_factor == 1.0:
old_mean = None
old_var = None
else:
old_mean = array_ops.placeholder(
np.float32, shape=scale_shape, name="old_mean")
old_var = array_ops.placeholder(
np.float32, shape=scale_shape, name="old_var")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=old_mean,
variance=old_var,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=True)
if exponential_avg_factor == 1.0:
feed_dict = {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val,
}
else:
feed_dict = {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val,
old_mean: mean_val,
old_var: var_val_corr
}
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict=feed_dict)
self.assertLess(err, 1e-3)
y_tf, mean_tf, var_tf = sess.run([y, mean, var], feed_dict)
self.assertAllClose(y_tf, y_ref_converted, atol=1e-3)
self.assertAllClose(mean_tf, mean_ref, atol=1e-3)
self.assertAllClose(var_tf, var_ref_corr, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS_AND_AVG_FACTORS)
def testLearning(self, data_format, exponential_avg_factor):
self._testLearning(False, data_format, exponential_avg_factor)
@parameterized.named_parameters(*DATA_FORMATS_AND_AVG_FACTORS)
def testLearningWithGradientChecker(self, data_format,
exponential_avg_factor):
self._testLearning(True, data_format, exponential_avg_factor)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
# The TensorFlow FusedBatchNormGrad training operation takes two inputs with
# implementation defined values. In theory the only correct value these
# inputs are the corresponding reserve_space_{1|2} outputs from the
# FusedBatchNorm training operation. However, in practice, we rely on the
# first one being mean on {C|G}PU, and the second one being variance on CPU
# and inverse(sqrt(variance + epsilon)) on GPU (we test this assumption
# separately).
reserve_space_1_val = mean_val
if self.device == "XLA_GPU":
reserve_space_2_val = np.reciprocal(np.sqrt(var_val + epsilon))
else:
reserve_space_2_val = var_val
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
reserve_space_1 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_1")
reserve_space_2 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_2")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
reserve_space_1,
reserve_space_2,
data_format=data_format,
is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
reserve_space_1: reserve_space_1_val,
reserve_space_2: reserve_space_2_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "f9b8d18bae8d84efbef48a83f5224e4e",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 80,
"avg_line_length": 40.85502958579882,
"alnum_prop": 0.6204649141864002,
"repo_name": "davidzchen/tensorflow",
"id": "a36effe5984eec6477df03d48553ecaf3bbb036e",
"size": "14498",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/tests/fused_batchnorm_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32240"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "887514"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "81865221"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867241"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "971474"
},
{
"name": "Jupyter Notebook",
"bytes": "549437"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1921657"
},
{
"name": "Makefile",
"bytes": "65901"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "316967"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19963"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37285698"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700629"
},
{
"name": "Smarty",
"bytes": "35540"
},
{
"name": "Starlark",
"bytes": "3604653"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import txmongo
from twisted.internet import defer, reactor
@defer.inlineCallbacks
def example():
mongo = yield txmongo.MongoConnection()
foo = mongo.foo # `foo` database
test = foo.test # `test` collection
yield test.insert({"src":"Twitter", "content":"bla bla"}, safe=True)
yield test.insert({"src":"Twitter", "content":"more data"}, safe=True)
yield test.insert({"src":"Wordpress", "content":"blog article 1"}, safe=True)
yield test.insert({"src":"Wordpress", "content":"blog article 2"}, safe=True)
yield test.insert({"src":"Wordpress", "content":"some comments"}, safe=True)
result = yield test.group(keys=["src"],
initial={"count":0}, reduce="function(obj,prev){prev.count++;}")
print "result:", result
if __name__ == '__main__':
example().addCallback(lambda ign: reactor.stop())
reactor.run()
|
{
"content_hash": "7a9f03121272528f53e8cbb31bdb499d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 35.958333333333336,
"alnum_prop": 0.6488991888760139,
"repo_name": "claytondaley/mongo-async-python-driver",
"id": "3daa2688ff9aab95567de4dc85e08c81da655c9e",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/inlinecallbacks/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "121919"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
}
|
from core.serializers import TagSerializer
from rest_framework import generics
from core.models import Tag
class TagList(generics.ListCreateAPIView):
queryset = Tag.objects.all()
serializer_class = TagSerializer
class TagDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Tag.objects.all()
serializer_class = TagSerializer
|
{
"content_hash": "a868da44227e917770f825a0dd70fa3e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 26.923076923076923,
"alnum_prop": 0.7914285714285715,
"repo_name": "wathsalav/xos",
"id": "ba8a035c7dda6a6ae903074fb11b33fe49b33e91",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xos/core/views/tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "370"
},
{
"name": "CSS",
"bytes": "37088"
},
{
"name": "HTML",
"bytes": "636864"
},
{
"name": "JavaScript",
"bytes": "760492"
},
{
"name": "Makefile",
"bytes": "2717"
},
{
"name": "Python",
"bytes": "1160110"
},
{
"name": "Shell",
"bytes": "10483"
}
],
"symlink_target": ""
}
|
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.ops.signal import reconstruction_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.stft')
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=window_ops.hann_window,
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# fft_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return fft_ops.rfft(framed_signals, [fft_length])
@tf_export('signal.inverse_stft_window_fn')
def inverse_stft_window_fn(frame_step,
forward_window_fn=window_ops.hann_window,
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
@tf_export('signal.inverse_stft')
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=window_ops.hann_window,
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = fft_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or real_frames.shape.ndims is None or
real_frames.shape.as_list()[-1] is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape.as_list()[-1] > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape.as_list()[-1] < frame_length_static:
pad_amount = frame_length_static - real_frames.shape.as_list()[-1]
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(
2.0,
math_ops.ceil(
math_ops.log(math_ops.cast(value, dtypes.float32)) /
math_ops.log(2.0))), value.dtype)
|
{
"content_hash": "f91be1054d4e468f25f46553503f484b",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 80,
"avg_line_length": 43.338181818181816,
"alnum_prop": 0.6788890753482127,
"repo_name": "alsrgv/tensorflow",
"id": "2e46ae50b0a2075edd18caf167def4c859aab920",
"size": "12607",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/signal/spectral_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
"""
This is a very simple python starter script to automate a series of PWSCF
calAllations. If you don't know Python, get a quick primer from the official
Python doAlmentation at https://docs.python.org/2.7/. The script is deliberately
simple so that only basic Python syntax is used and you can get comfortable with
making changes and writing programs.
Author: Shyue Ping Ong
"""
import numpy as np
submit_script = open("submit_script", 'a')
# Load the Al.100.bulk.pw.in.template file as a template.
with open("Al.100.bulk.pw.in.template") as f:
template = f.read()
# Set default values for various parameters
k = 16 # k-point grid of 16x16x16
alat = 7.65 # The lattice parameter for the cell in Bohr.
# Loop through different k-points.
for alat in np.arange(7.55, 7.65, 0.01):
# This generates a string from the template with the parameters replaced
# by the specified values.
s = template.format(k=k, alat=alat)
# Let's define an easy jobname.
jobname = "Al_100_bulk_%s" % (alat)
# Write the actual input file for PWSCF.
with open("%s.pw.in" % jobname, "w") as f:
f.write(s)
# Write the command in submit_script.
submit_script.write(
'mpirun --map-by core --mca btl_openib_if_include "mlx5_2:1" '
'--mca btl openib,self,vader pw.x -input {jobname}.pw.in -npool 1 > {jobname}.out\n'
.format(jobname=jobname))
print("Done with input generation for %s" % jobname)
# Append another line in submit_script to cleanup.
# For this lab, we don't need the files that are dumped into the tmp directory.
submit_script.write("rm -r tmp")
# Close the submit_script after appending all PWSCF commands.
submit_script.close()
|
{
"content_hash": "497178f89f4d6c3deedd46460c55d35a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 96,
"avg_line_length": 36.46808510638298,
"alnum_prop": 0.6948658109684948,
"repo_name": "materialsvirtuallab/nano266",
"id": "a41f2a9e8d1906883262b94a603f27e128eadb6f",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labs/lab4/scf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12291"
},
{
"name": "Shell",
"bytes": "3063"
},
{
"name": "TeX",
"bytes": "3962"
}
],
"symlink_target": ""
}
|
'''
~Gros
'''
import sys
from hashlib import sha256
from binascii import hexlify, unhexlify
import random
import argparse
from Crypto.Cipher import AES
from pwn import *
from utils import *
from config import config
def mitm():
print("MITM (key-fixing attack)...")
alice = remote(config["host"], config["task2"]["port_alice"])
bob = remote(config["host"], config["task2"]["port_bob"])
# alice p,g--> bob
p = int(alice.recvline().strip())
g = int(alice.recvline().strip())
print "p = {}".format(p)
print "g = {}".format(g)
key = derive_key("0")
evil_AB = p
bob.sendline(str(p))
bob.sendline(str(g))
# alice <--ACK,p,g bob
bob.recvline().strip()
bob.recvline().strip()
bob.recvline().strip()
alice.sendline('ACK')
alice.sendline(str(p))
alice.sendline(str(g))
# alice A--> bob
A = int(alice.recvline().strip())
print "A = {}".format(A)
print "set A = evil_AB"
bob.sendline(str(evil_AB))
# alice <--B bob
B = int(bob.recvline().strip())
print "B = {}".format(B)
print "set B = evil_AB"
alice.sendline(str(evil_AB))
# get msg from alice
msg_from_alice = unhexlify(alice.recvline().strip())
iv = msg_from_alice[:16]
msg_from_alice = msg_from_alice[16:]
cipher = AES.new(key, AES.MODE_CBC, iv)
msg = strip_padding(cipher.decrypt(msg_from_alice))
print "Msg from alice: {}".format(msg)
# send bob evil msg
fake_msg_from_alice = msg
fake_msg_from_alice = add_padding(fake_msg_from_alice)
iv = random_bytes(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
msg_enc = cipher.encrypt(fake_msg_from_alice)
bob.sendline(hexlify(iv+msg_enc))
# get msg from bob
msg_from_bob = unhexlify(bob.recvline().strip())
iv = msg_from_bob[:16]
msg_from_bob = msg_from_bob[16:]
cipher = AES.new(key, AES.MODE_CBC, iv)
msg = strip_padding(cipher.decrypt(msg_from_bob))
print "Msg from bob: {}".format(msg)
# send alice evil msg
fake_msg_from_bob = 'I am so evil!'
fake_msg_from_bob = add_padding(fake_msg_from_bob)
iv = random_bytes(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
msg_enc = cipher.encrypt(fake_msg_from_bob)
alice.sendline(hexlify(iv+msg_enc))
if __name__ == "__main__":
mitm()
|
{
"content_hash": "c1fa7edb0a3744baa7085ec2bdca9896",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 65,
"avg_line_length": 25.097826086956523,
"alnum_prop": 0.6119532265049805,
"repo_name": "JustHitTheCore/ctf_workshops",
"id": "2290c82524984c3d50b77aa6331eccc86faaa873",
"size": "2356",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "2017/lab_dh_done_on_labs/task2_mitm_solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24335"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
}
|
"""
Find intermediate evalutation results in assert statements through builtin AST.
This should replace oldinterpret.py eventually.
"""
import sys
import ast
import py
from _pytest.assertion import util
from _pytest.assertion.reinterpret import BuiltinAssertionError
if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --assert=plain)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(e):
explanation = util.format_explanation(e.explanation)
value = e.cause[1]
if str(value):
lines = explanation.split('\n')
lines[0] += " << %s" % (value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.cause[0].__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = None
if local is None or not self.frame.is_true(local):
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not self.frame.is_true(result):
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
if util._reprcompare is not None:
res = util._reprcompare(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = None
if from_instance is None or self.frame.is_true(from_instance):
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
explanation = "assert %s" % (test_explanation,)
if not self.frame.is_true(test_result):
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
|
{
"content_hash": "cf2133cee07f076c732ddb2bd0dd3ad5",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 85,
"avg_line_length": 36.810810810810814,
"alnum_prop": 0.551558166095611,
"repo_name": "ktan2020/legacy-automation",
"id": "e7e9658d7dbda560940f4b89452cf0169ae2d7a6",
"size": "12258",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/pytest-2.3.4-py2.7.egg/_pytest/assertion/newinterpret.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
from django.db import models
from asynchrormous.query import AsyncQuerySet
class AsyncManager(models.Manager):
""" A model manager which uses the AsyncQuerySet. """
def get_query_set(self):
return AsyncQuerySet(self.model, using=self._db)
|
{
"content_hash": "349b2ec57a00136943ffa78e087039f8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 57,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.7364341085271318,
"repo_name": "adamalton/asynchrormous",
"id": "658e64db5d48ab663778b09f5477f4ffed825736",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asynchrormous/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7881"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import rename
print("say_hi_fn():", rename.say_hi_fn())
print("MyStruct().say_something():", rename.MyStruct().say_something())
# Just make sure the symbols exist
rename.auto_renamed_func()
struct = rename.MyStruct()
struct.auto_renamed_meth()
_ = struct.auto_renamed_property
struct.auto_renamed_property = "foo"
_ = struct.custom_name
struct.custom_name = "foo"
print("MyStruct.auto_renamed_property.__doc__:", rename.MyStruct.auto_renamed_property.__doc__.strip())
print("MyStruct.custom_name.__doc__:", rename.MyStruct.custom_name.__doc__.strip())
print("OK")
|
{
"content_hash": "d6d37ea9c7a4e21ce136f691a06db13f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 103,
"avg_line_length": 30.35,
"alnum_prop": 0.7166392092257001,
"repo_name": "go-python/gopy",
"id": "7e7daca6f14a50099e36e6065106100b260af1eb",
"size": "790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_examples/rename/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "119"
},
{
"name": "Go",
"bytes": "261530"
},
{
"name": "Makefile",
"bytes": "1839"
},
{
"name": "Python",
"bytes": "34989"
}
],
"symlink_target": ""
}
|
import sys
def read_numbers_from_string(string):
accum = ""
numbers = []
for char in string:
if char.isdigit():
accum += char
else:
if len(accum) > 0:
numbers.append(int(accum))
accum = ""
return numbers
class Sudoku(object):
def __init__(self, file):
sizes_str = file.readline()
self.empties = []
self.table = []
self.rows, self.columns = read_numbers_from_string(sizes_str)
i = 0
for line in file:
j = 0
numbers = []
for char in line:
if char.isdigit():
numbers.append(int(char))
j += 1
elif char == "x":
numbers.append(0)
self.empties.append([i,j])
j += 1
self.table.append(numbers)
i += 1
def show_table(self):
for row in self.table:
print row
print " "
def table_columns(self):
result = []
for j in xrange(0, self.columns):
column = []
for i in xrange(0, self.rows):
column.append(self.table[i][j])
result.append(column)
return result
def small_square(self,row,column):
sm_row = row // (self.rows/3)
sm_col = column // (self.columns/3)
result = []
for i in xrange(sm_row*3, sm_row*3 + (self.rows/3)):
for j in xrange(sm_col*3, sm_col*3 + (self.columns/3)):
result.append(self.table[i][j])
return result
def check_table(self):
#Check rows
for row in self.table:
for digit in xrange(1,10):
if row.count(digit) > 1:
return -1
#Check columns
for column in self.table_columns():
for digit in xrange(1,10):
if column.count(digit) > 1:
return -1
#Check small square
for i in xrange(0, self.rows, self.rows/3):
for j in xrange(0, self.columns, self.columns/3):
for digit in xrange(1,10):
if self.small_square(i,j).count(digit) > 1:
return -1
return 0
def try_digits(self, i):
if i == len(self.empties):
print "Solved!"
self.show_table()
return 0
else:
empty = self.empties[i]
for digit in xrange(1,10):
self.table[empty[0]][empty[1]] = digit
#self.show_table()
value = self.check_table()
if value == 0:
if self.try_digits(i+1) == 0:
return 0
self.table[empty[0]][empty[1]] = 0
return -1
file = open("in.txt","r")
sudoku = Sudoku(file)
sudoku.show_table()
sudoku.try_digits(0)
#Ideas to optimize:
# -Don't create the column table everytime you check for repeated values
# -Same with the small squares
# -Don't try the same digits on a small square, with this we can not check it on check()
# -Non recursive approach?
|
{
"content_hash": "d78f28e0ab0774aaeebf654c8cf3ba15",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 88,
"avg_line_length": 23.457142857142856,
"alnum_prop": 0.6228177019894438,
"repo_name": "ltdicai/testing-ground",
"id": "4ba51b635e8a1d4f77f56eb7ecbc764a868874b3",
"size": "2463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "games/sudoku/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14159"
}
],
"symlink_target": ""
}
|
import sys
import os
import juliadoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'juliadoc.julia',
'juliadoc.jlhelp'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EvolvingGraphs'
copyright = u'2015, Weijian Zhang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [juliadoc.get_theme_dir()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = juliadoc.default_sidebars()
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EvolvingGraphsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'EvolvingGraphs.tex', u'EvolvingGraphs Documentation',
u'Weijian Zhang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'evolvinggraphs', u'EvolvingGraphs Documentation',
[u'Weijian Zhang'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EvolvingGraphs', u'EvolvingGraphs Documentation',
u'Weijian Zhang', 'EvolvingGraphs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "dccbc4046321fc57cf952c9b44aaae8e",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 79,
"avg_line_length": 32.09055118110236,
"alnum_prop": 0.7067844436265489,
"repo_name": "weijianzhang/EvolvingGraphs.jl",
"id": "20656b53c40cf3877fcb42caebd10f41dd81b5a3",
"size": "8578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc_backup/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6717"
},
{
"name": "Julia",
"bytes": "77754"
},
{
"name": "Makefile",
"bytes": "6794"
},
{
"name": "Python",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
import re
from elizabeth.intd import CURRENCIES, CURRENCY_SYMBOLS
from ._patterns import STR_REGEX
def test_str(business):
assert re.match(STR_REGEX, str(business))
def test_copyright(business):
result = business.copyright()
assert '©' in result
def test_currency_sio(business):
result = business.currency_iso()
assert result in CURRENCIES
def test_company_type(generic):
result = generic.business.company_type()
assert result in generic.business.data['company']['type']['title']
result_2 = generic.business.company_type(abbr=True)
assert result_2 in generic.business.data['company']['type']['abbr']
def test_company(generic):
result = generic.business.company()
assert result in generic.business.data['company']['name']
def test_price(generic):
currencies = CURRENCY_SYMBOLS[generic.business.locale]
result = generic.business.price(minimum=100.00, maximum=1999.99)
price, symbol = result.split(' ')
assert isinstance(price, str)
assert float(price) >= 100.00
assert float(price) <= 1999.99
assert symbol in currencies
# invalid locale should use default
generic.business.locale = "xx"
assert CURRENCY_SYMBOLS['default'] in generic.business.price()
|
{
"content_hash": "a1c41669aa6228344437697ac4f693f3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 27.733333333333334,
"alnum_prop": 0.7115384615384616,
"repo_name": "wikkiewikkie/elizabeth",
"id": "d1a1f1d959c20d857496c499fe3efdb72ab4df72",
"size": "1273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_data/test_business.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "286804"
}
],
"symlink_target": ""
}
|
from menus.base import Menu
from django.core.exceptions import ValidationError
class CMSAttachMenu(Menu):
cms_enabled = True
name = None
def __init__(self, *args, **kwargs):
super(CMSAttachMenu, self).__init__(*args, **kwargs)
if self.cms_enabled and not self.name:
raise ValidationError("the menu %s is a CMSAttachMenu but has no name defined!" % self.__class__.__name__)
|
{
"content_hash": "907fbb9a4667ad485c6da3f3e7c4bec5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 118,
"avg_line_length": 38.09090909090909,
"alnum_prop": 0.6563245823389021,
"repo_name": "dibaunaumh/tikal-corp-website",
"id": "7ee19f499258f02091edf8426a0be075584fed53",
"size": "419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/menu_bases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "780451"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "1558449"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
}
|
import time
import socket
import threading
# Package modules.
import socksocket
from constants import *
from constants import _VERSIONS
from _forwarder import Forwarder
from socksocket import SockSocket
from socksexception import SocksException
#Consts
_BUFSIZE = 8192
class SocksClient(object):
"""A basic SOCKS client that provide traffic forwarding through SOCKS
tunnle. SocksClient only support TCP forwarding using connect request
or single udp communication (without multiple senders).
For more advanced SOCKS operation use SockSocket."""
def __init__(self, local_address, remote_addres, socks_chain = [],
udp = False, version = V_SOCKS5, user = None, pwd = None):
"""Initialize SocksClient properties.
local_address is where the client listen and from there forward the
traffic to tunnle and remote_address is to where sending the traffic
after forwarded in the tunnle (Both in (address, port) format).
socks_chain is a list of SOCKS server addresses and each server should
be in those format:
"127.0.0.1' (or any other IP address)
(ip address, port)
(ip address, port, version, user, pwd)
If data omitted it will replace by the default values specified
in the initializing function (default port is 1080 as specified in
the SOCKS RFC). for example:
("127.0.0.1", 8888) -> ("127.0.0.1", 8888, self.version, ,self.user, self.pwd)
"127.0.0.1" -> ("127.0.0.1", 1080, self.version, ,self.user, self.pwd)
("127.0.0.1", 8888, 5, "user", "pwd")->("127.0.0.1", 8888, 5, ,"user","pwd")
Any other option will cause an errors."""
# Version check.
if version not in _VERSIONS:
raise ValueError("Invalid SOCKS version")
self.remote_addres = remote_addres
self.local_address = local_address
self.version = version
self.user = user
self.pwd = pwd
self.udp = udp
self.connected = False
self.key_event = threading.Event()
self._in_sock = None
self._socksock= None
self._forwarder = None
# Set socks_chain as property.
self.socks_chain = socks_chain
self._set_sockets()
def _get_chain(self):
"""Returns the SOCKS chain for setting socks_chain as a property."""
return self._socks_chain
def _set_chain(self, chain):
"""Set the SOCKS chain for for setting socks_chain as a property."""
if self.connected:
msg = "Can't change SOCKS chain after connect through it"
raise SocksException(msg)
if not isinstance(chain, list):
raise SocksException("socks_chain must be a list")
if self.udp and len(chain) != 1:
msg = "SocksClient support only one chain link for UDP"
raise SocksException(msg)
# Check if every item in the chain is string or tuple
# with the length of 2 or 5.
for i in chain:
if (not isinstance(i[0], str) and
(not isinstance(i, tuple) or (len(i) not in (2, 5)))):
raise SocksException("Invalid items is sock_chain")
self._socks_chain = chain
# Set self.sock_chain to a propery.
socks_chain = property(_get_chain, _set_chain, "SOCKS servers chain")
def remove_server(self, server):
"""Removes server from the socks_chain."""
if server in self.socks_chain:
self.socks_chain = self.socks_chain.remove(server)
def add_server(self, server):
"""Adds server to socks_chain."""
self.socks_chain = self.socks_chain.append(server)
def _get_server_from_chain(self, server):
"""returns a server tuple with 5 items."""
# Server is tuple with 5 items.
if (isinstance(server, tuple) and
len(self.socks_chain[0]) == 5):
addr, port, version, user, pwd = server
# Server is tuple with 2 items.
elif (isinstance(self.socks_chain[0], tuple) and
len(self.socks_chain[0]) == 2):
addr, port = self.socks_chain[0]
version, user, pwd = self.version, self.user, self.pwd
# Server is string
else:
addr, port = server, DEFAULT_PORT
version, user, pwd = self.version, self.user, self.pwd
return addr, port, version, user, pwd
def connect_socks_chain(self):
"""Connect the SOCKS server and request to connecet the remote address
specified in remote_address."""
if not self.socks_chain:
return
# Set the first SOCKS server in the chain.
if not self._socksock.socks_server:
server = self._get_server_from_chain(self.socks_chain[0])
self._socksock.set_server(server)
# Connect all SOCKS server in chain.
for i in self.socks_chain[1:]:
server = self._get_server_from_chain(i)
self._socksock.connect(server[:2])
self._socksock.connect_socks_server(server)
self.connected = True
def reverse_connect(self, lcl_port = DEFAULT_REVERSE_PORT, backlog = 0):
"""Wait for a connection from the first server in socks_chain and
reverse connect him. If there is no servers in socks_chain wait for any
connection and add it to socks_chain."""
# Create a new SockSocket for listening.
l_sock = SockSocket(self._socksock.family, self._socksock.type,
self._socksock.proto, self._socksock.socks_server)
# Set reverse connection parameters.
if self.socks_chain:
server = self._get_server_from_chain(self.socks_chain[0])
addr, port, version, user, pwd = server
rmt_addr = [(addr, port)]
else:
version, user, pwd = self.version, self.user, self.pwd
rmt_addr = []
# Receive connection.
acc_socksock = l_sock.reverse_socks_connect(lcl_port, rmt_addr,backlog,
version, user, pwd)
if acc_socksock:
self._socksock = acc_socksock
self._forwarder = Forwarder(self.local_address, self._socksock,
self.key_event, self.version)
self.connect_socks_chain()
# If succedded there is no need to listen and connected will be True.
l_sock.close()
return self.connected
def tcp_forawrd(self):
"""Wait for incoming tcp connection to local_address and start
forwarding traffic through connected SOCKS chain (need to preform
connect_socks_chain or reverse_connect)."""
# Start listen and connect the remote address.
self._in_sock.bind(self.local_address)
self._in_sock.listen(5)
self._in_sock.setblocking(False)
self._socksock.connect(self.remote_addres)
try:
forward_threads = []
while True:
# Accept connections and set arguments for forwarder thread.
connection = addr = None
try:
connection, addr = self._in_sock.accept()
except socket.error as e:
pass
if not connection:
# Check if SockSocket connected to server is alive.
if forward_threads:
if True not in [i.isAlive() for i in forward_threads]:
try:
if len(self._socksock.recv(_BUFSIZE)) == 0:
break
except socket.error as e:
break
time.sleep(1)
continue
# Run forwarder thread.
forward_data = {self._socksock:"", connection:""}
args_tuple = (forward_data, self._forwarder.tcp_receive,
self._forwarder.tcp_send)
forward_handler=threading.Thread(target=self._forwarder.forward,
args = args_tuple)
forward_handler.daemon = True
forward_threads.append(forward_handler)
forward_handler.start()
except KeyboardInterrupt:
self.key_event.set()
finally:
self.close_sockets()
self._set_sockets()
def udp_forward(self):
"""Wait for incomming datagram and start forwarding datagrams between
the sender and the remote address(need to preform connect_socks_chain
or reverse_connect)."""
# Getting the first datagram for connecting all sockets.
self._in_sock.bind(self.local_address)
self._socksock.connect(self.remote_addres)
# Setting socket to non-blocking for solve python bug on Windows that
# doesn't allow to break if no datagram recieved.
dgram = addr = None
while not dgram:
self._in_sock.settimeout(1)
try:
dgram, addr = self._in_sock.recvfrom(_BUFSIZE)
except socket.error as e:
time.sleep(1)
self._socksock.send(dgram)
self._in_sock.connect(addr)
# Setting forward_data for forward and forward.
forward_data = {self._socksock.get_udp_sock():[], self._in_sock:[],
self._socksock:[]}
self._forwarder.forward(forward_data, self._forwarder.udp_client_recv,
self._forwarder.udp_client_send)
self.close_sockets()
self._set_sockets()
def close_sockets(self):
"""Close incoming and outgoing sockets."""
if self._in_sock:
self._in_sock.close()
if self._socksock:
self._socksock.close()
def _set_sockets(self):
"""Set the incomming and outgoing sockets."""
socktype = socket.SOCK_DGRAM if self.udp else socket.SOCK_STREAM
# Check remote address family and set SockSocket.
family = socket.getaddrinfo(*self.local_address)[0][0]
self._socksock = SockSocket(family, socktype)
# Check remote address family and set incomming traffic socket.
family = socket.getaddrinfo(*self.local_address)[0][0]
self._in_sock = socket.socket(family, socktype)
# Modify forwarder as needed.
self._forwarder = Forwarder(self.local_address, self._socksock,
self.key_event, self.version)
self.connected = False
|
{
"content_hash": "96038df17a8a0fced9dc465ae0c525d9",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 80,
"avg_line_length": 36.596666666666664,
"alnum_prop": 0.5659896165406686,
"repo_name": "NelyusC/SockstPy",
"id": "64ee41ff667ae74489bc6f04ba65745ffb6507ab",
"size": "11318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sockstpy/socksclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93652"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from distutils.core import setup
setup(name='autograd',
version='1.0.4',
description='Efficiently computes derivatives of numpy code.',
author='Dougal Maclaurin and David Duvenaud',
author_email="maclaurin@physics.harvard.edu, dduvenaud@seas.harvard.edu",
packages=['autograd', 'autograd.numpy', 'autograd.scipy', 'autograd.scipy.stats'],
install_requires=['numpy>=1.8', 'six'],
keywords=['Automatic differentiation', 'backpropagation', 'gradients',
'machine learning', 'optimization', 'neural networks',
'Python', 'Numpy', 'Scipy'],
url='https://github.com/HIPS/autograd',
license='MIT',
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'])
|
{
"content_hash": "6752a8ae4494a1472956ae023ebbc2d1",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 52.72222222222222,
"alnum_prop": 0.6174920969441517,
"repo_name": "melgor/autograd",
"id": "6175f4f175815195656c6fec63f7969a5edff536",
"size": "949",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149058"
}
],
"symlink_target": ""
}
|
import vk
import json
from sentiment_classifiers import SentimentClassifier, binary_dict, files
class VkFeatureProvider(object):
def __init__(self):
self._vk_api = vk.API(vk.Session())
self._vk_delay = 0.3
self._clf = SentimentClassifier(files['binary_goods'], binary_dict)
def _vk_grace(self):
import time
time.sleep(self._vk_delay)
def get_news(self, sources, amount=10):
# entry for Alex Anlysis tool
result = []
for source in sources:
try:
data = self._vk_api.wall.get(domain=source, count=amount, extended=1, fields='name')
self._vk_grace()
except:
return {}
news = []
for node in data['wall'][1:]:
try:
if node['post_type'] != 'post':
continue
text = node['text']
#print('{}'.format(text.encode('utf-8')))
rate = self._clf.predict_text(text)[0]
news.append({'text' : '{}'.format(text.encode('utf-8')), 'rate' : rate})
except Exception as e:
print('Exception: {}'.format(e))
result.append({'source': data['groups'][0]['name'], 'news': news})
#return json.dumps(result)
return result
# NOTE: the completely other feature, very usefull personally for me
def friends_intersect(self, uid_list):
result = None
try:
result = set(self._vk_api.friends.get(user_id=uid_list[0]))
self._vk_grace()
except:
pass
for i, uid in enumerate(uid_list[1:]):
try:
tmp = set(self._vk_api.friends.get(user_id=uid))
self._vk_grace()
except:
continue
if result is not None:
result = result.intersection(tmp)
else:
result = tmp
return result
def get_user_info(self, entry_uid, fname=None, lname=None):
try:
friend_list = self._vk_api.friends.get(user_id=entry_uid, fields='personal', name_case='nom')
self._vk_grace()
except:
return []
return [x for x in friend_list
if (not fname or fname in x['first_name']) and (not lname or lname in x['last_name'])]
def get_uid_set_info(self, uid_set):
result = []
for friend_uid in uid_set:
try:
friend = self._vk_api.users.get(user_id=friend_uid, fields='sex,personal', name_case='nom')
self._vk_grace()
except:
continue
result.append(friend)
return result
if __name__ == '__main__':
provider = VkFeatureProvider()
res = provider.get_news(['scientific.american'], 5)
print(res)
|
{
"content_hash": "a76270aeea96a894dc4408967d74966e",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 107,
"avg_line_length": 33.35632183908046,
"alnum_prop": 0.5099931082012406,
"repo_name": "ArtemMIPT/sentiment_analysis",
"id": "7a0a7dc06cfe64950c0796cccdb010306e76a8bb",
"size": "2902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vk_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8943"
},
{
"name": "HTML",
"bytes": "13415"
},
{
"name": "JavaScript",
"bytes": "2181"
},
{
"name": "Python",
"bytes": "12007"
}
],
"symlink_target": ""
}
|
LOWER_VOWELS = 'aeiou'
# def shortcut(string):
# return filter(lambda a: a not in LOWER_VOWELS, string)
def shortcut(string):
return string.translate(None, LOWER_VOWELS)
|
{
"content_hash": "a23b4d28cda0827f5a4f9861a2495fb2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 20.22222222222222,
"alnum_prop": 0.7032967032967034,
"repo_name": "the-zebulan/CodeWars",
"id": "2286f67c5af14e2cca0f2b6cd5503c97bda23c31",
"size": "182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_8/vowel_remover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
|
{
"content_hash": "f363dbeab0e26fe25eadf7789c2bb245",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 26,
"alnum_prop": 0.5705128205128205,
"repo_name": "aholmback/fuse",
"id": "77da2f98a7f693f22e2126b4982796d61bdd08d0",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuse/utils/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47663"
}
],
"symlink_target": ""
}
|
import logging
from oslo_config import cfg
import redis
from oslo_messaging._drivers.zmq_driver.matchmaker import base
LOG = logging.getLogger(__name__)
matchmaker_redis_opts = [
cfg.StrOpt('host',
default='127.0.0.1',
help='Host to locate redis.'),
cfg.IntOpt('port',
default=6379,
help='Use this port to connect to redis host.'),
cfg.StrOpt('password',
default='',
secret=True,
help='Password for Redis server (optional).'),
]
class RedisMatchMaker(base.MatchMakerBase):
def __init__(self, conf, *args, **kwargs):
super(RedisMatchMaker, self).__init__(conf, *args, **kwargs)
self.conf.register_opts(matchmaker_redis_opts, "matchmaker_redis")
self._redis = redis.StrictRedis(
host=self.conf.matchmaker_redis.host,
port=self.conf.matchmaker_redis.port,
password=self.conf.matchmaker_redis.password,
)
def _target_to_key(self, target):
attributes = ['topic', 'exchange', 'server']
prefix = "ZMQ-target"
key = ":".join((getattr(target, attr) or "*") for attr in attributes)
return "%s-%s" % (prefix, key)
def _get_keys_by_pattern(self, pattern):
return self._redis.keys(pattern)
def _get_hosts_by_key(self, key):
return self._redis.lrange(key, 0, -1)
def register(self, target, hostname):
key = self._target_to_key(target)
if hostname not in self._get_hosts_by_key(key):
self._redis.lpush(key, hostname)
def get_hosts(self, target):
pattern = self._target_to_key(target)
if "*" not in pattern:
# pattern have no placeholders, so this is valid key
return self._get_hosts_by_key(pattern)
hosts = []
for key in self._get_keys_by_pattern(pattern):
hosts.extend(self._get_hosts_by_key(key))
return hosts
|
{
"content_hash": "e7c3a002cc8c03c8c9c27b87835f784f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 30.84375,
"alnum_prop": 0.5901722391084093,
"repo_name": "esse-io/zen-common",
"id": "a2ee9bccfa3aaa1d1c29d0e955d6b6df66e5119b",
"size": "2549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo-modules/oslo_messaging/_drivers/zmq_driver/matchmaker/matchmaker_redis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1155074"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from sentry.utils.db import is_mysql
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DSymSymbol'
db.create_table('sentry_dsymsymbol', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('object', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.DSymObject'])),
('address', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(db_index=True)),
('symbol', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('sentry', ['DSymSymbol'])
# Adding unique constraint on 'DSymSymbol', fields ['object', 'address']
db.create_unique('sentry_dsymsymbol', ['object_id', 'address'])
# Adding model 'DSymSDK'
db.create_table('sentry_dsymsdk', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('dsym_type', self.gf('django.db.models.fields.CharField')(max_length=20, db_index=True)),
('sdk_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('version_major', self.gf('django.db.models.fields.IntegerField')()),
('version_minor', self.gf('django.db.models.fields.IntegerField')()),
('version_patchlevel', self.gf('django.db.models.fields.IntegerField')()),
('version_build', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('sentry', ['DSymSDK'])
# Adding index on 'DSymSDK', fields ['version_major', 'version_minor', 'version_patchlevel', 'version_build']
db.create_index('sentry_dsymsdk', ['version_major', 'version_minor', 'version_patchlevel', 'version_build'])
# Adding model 'DSymObject'
db.create_table('sentry_dsymobject', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('cpu_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('object_path', self.gf('django.db.models.fields.TextField')(db_index=not is_mysql())),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=36, db_index=True)),
('vmaddr', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(null=True)),
('vmsize', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(null=True)),
))
# On MySQL we need to create the index here differently because
# the index must have a limit. As we have the type already
# defined to text in the model earlier we just restrict the index
# to 255. The hash matches what south would have created.
if is_mysql():
db.execute('''
create index sentry_dsymobject_39c06cbd
on sentry_dsymobject (object_path(255))
''')
db.send_create_signal('sentry', ['DSymObject'])
# Adding model 'DSymBundle'
db.create_table('sentry_dsymbundle', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('sdk', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.DSymSDK'])),
('object', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.DSymObject'])),
))
db.send_create_signal('sentry', ['DSymBundle'])
def backwards(self, orm):
# Removing index on 'DSymSDK', fields ['version_major', 'version_minor', 'version_patchlevel', 'version_build']
db.delete_index('sentry_dsymsdk', ['version_major', 'version_minor', 'version_patchlevel', 'version_build'])
# Removing unique constraint on 'DSymSymbol', fields ['object', 'address']
db.delete_unique('sentry_dsymsymbol', ['object_id', 'address'])
# Deleting model 'DSymSymbol'
db.delete_table('sentry_dsymsymbol')
# Deleting model 'DSymSDK'
db.delete_table('sentry_dsymsdk')
# Deleting model 'DSymObject'
db.delete_table('sentry_dsymobject')
# Deleting model 'DSymBundle'
db.delete_table('sentry_dsymbundle')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 4, 14, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
|
{
"content_hash": "51a0a2a41e5b99e40297d135fe952bcf",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 217,
"avg_line_length": 84.35101404056162,
"alnum_prop": 0.5758752704877101,
"repo_name": "JamesMura/sentry",
"id": "78ab05e1a5058d480b48f1dcbb3cf5f8a96bdb41",
"size": "54093",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0246_auto__add_dsymsymbol__add_unique_dsymsymbol_object_address__add_dsymsd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "264356"
},
{
"name": "HTML",
"bytes": "306533"
},
{
"name": "JavaScript",
"bytes": "1101462"
},
{
"name": "Lua",
"bytes": "51972"
},
{
"name": "Makefile",
"bytes": "6425"
},
{
"name": "Python",
"bytes": "15082023"
},
{
"name": "Ruby",
"bytes": "3867"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import time
import json
import uuid
import datetime
import boto3
from moto.core import BaseBackend, BaseModel
from .exceptions import (
ResourceNotFoundException,
InvalidParameterException,
ResourceExistsException,
InvalidRequestException,
ClientError
)
from .utils import random_password, secret_arn
class SecretsManager(BaseModel):
def __init__(self, region_name, **kwargs):
self.region = region_name
class SecretsManagerBackend(BaseBackend):
def __init__(self, region_name=None, **kwargs):
super(SecretsManagerBackend, self).__init__()
self.region = region_name
self.secrets = {}
def reset(self):
region_name = self.region
self.__dict__ = {}
self.__init__(region_name)
def _is_valid_identifier(self, identifier):
return identifier in self.secrets
def _unix_time_secs(self, dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()
def get_secret_value(self, secret_id, version_id, version_stage):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException()
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id]['versions']
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val['version_stages']:
version_id = ver_id
break
if not version_id:
raise ResourceNotFoundException()
# TODO check this part
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
secret = self.secrets[secret_id]
version_id = version_id or secret['default_version_id']
secret_version = secret['versions'][version_id]
response_data = {
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": secret_version['version_id'],
"VersionStages": secret_version['version_stages'],
"CreatedDate": secret_version['createdate'],
}
if 'secret_string' in secret_version:
response_data["SecretString"] = secret_version['secret_string']
if 'secret_binary' in secret_version:
response_data["SecretBinary"] = secret_version['secret_binary']
response = json.dumps(response_data)
return response
def create_secret(self, name, secret_string=None, secret_binary=None, tags=[], **kwargs):
# error if secret exists
if name in self.secrets.keys():
raise ResourceExistsException('A resource with the ID you requested already exists.')
version_id = self._add_secret(name, secret_string=secret_string, secret_binary=secret_binary, tags=tags)
response = json.dumps({
"ARN": secret_arn(self.region, name),
"Name": name,
"VersionId": version_id,
})
return response
def _add_secret(self, secret_id, secret_string=None, secret_binary=None, tags=[], version_id=None, version_stages=None):
if version_stages is None:
version_stages = ['AWSCURRENT']
if not version_id:
version_id = str(uuid.uuid4())
secret_version = {
'createdate': int(time.time()),
'version_id': version_id,
'version_stages': version_stages,
}
if secret_string is not None:
secret_version['secret_string'] = secret_string
if secret_binary is not None:
secret_version['secret_binary'] = secret_binary
if secret_id in self.secrets:
# remove all old AWSPREVIOUS stages
for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values():
if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']:
secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS')
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.secrets[secret_id]['default_version_id']
self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS']
self.secrets[secret_id]['versions'][version_id] = secret_version
self.secrets[secret_id]['default_version_id'] = version_id
else:
self.secrets[secret_id] = {
'versions': {
version_id: secret_version
},
'default_version_id': version_id,
}
secret = self.secrets[secret_id]
secret['secret_id'] = secret_id
secret['name'] = secret_id
secret['rotation_enabled'] = False
secret['rotation_lambda_arn'] = ''
secret['auto_rotate_after_days'] = 0
secret['tags'] = tags
return version_id
def put_secret_value(self, secret_id, secret_string, version_stages):
version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages)
response = json.dumps({
'ARN': secret_arn(self.region, secret_id),
'Name': secret_id,
'VersionId': version_id,
'VersionStages': version_stages
})
return response
def describe_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
secret = self.secrets[secret_id]
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"Description": "",
"KmsKeyId": "",
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"LastRotatedDate": None,
"LastChangedDate": None,
"LastAccessedDate": None,
"DeletedDate": secret.get('deleted_date', None),
"Tags": secret['tags']
})
return response
def rotate_secret(self, secret_id, client_request_token=None,
rotation_lambda_arn=None, rotation_rules=None):
rotation_days = 'AutomaticallyAfterDays'
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if client_request_token:
token_length = len(client_request_token)
if token_length < 32 or token_length > 64:
msg = (
'ClientRequestToken '
'must be 32-64 characters long.'
)
raise InvalidParameterException(msg)
if rotation_lambda_arn:
if len(rotation_lambda_arn) > 2048:
msg = (
'RotationLambdaARN '
'must <= 2048 characters long.'
)
raise InvalidParameterException(msg)
if rotation_rules:
if rotation_days in rotation_rules:
rotation_period = rotation_rules[rotation_days]
if rotation_period < 1 or rotation_period > 1000:
msg = (
'RotationRules.AutomaticallyAfterDays '
'must be within 1-1000.'
)
raise InvalidParameterException(msg)
secret = self.secrets[secret_id]
old_secret_version = secret['versions'][secret['default_version_id']]
new_version_id = client_request_token or str(uuid.uuid4())
self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT'])
secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
if rotation_rules:
secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
if secret['auto_rotate_after_days'] > 0:
secret['rotation_enabled'] = True
if 'AWSCURRENT' in old_secret_version['version_stages']:
old_secret_version['version_stages'].remove('AWSCURRENT')
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": new_version_id
})
return response
def get_random_password(self, password_length,
exclude_characters, exclude_numbers,
exclude_punctuation, exclude_uppercase,
exclude_lowercase, include_space,
require_each_included_type):
# password size must have value less than or equal to 4096
if password_length > 4096:
raise ClientError(
"ClientError: An error occurred (ValidationException) \
when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \
failed to satisfy constraint: Member must have value less than or equal to 4096".format(password_length))
if password_length < 4:
raise InvalidParameterException(
"InvalidParameterException: An error occurred (InvalidParameterException) \
when calling the GetRandomPassword operation: Password length is too short based on the required types.")
response = json.dumps({
"RandomPassword": random_password(password_length,
exclude_characters,
exclude_numbers,
exclude_punctuation,
exclude_uppercase,
exclude_lowercase,
include_space,
require_each_included_type)
})
return response
def list_secret_version_ids(self, secret_id):
secret = self.secrets[secret_id]
version_list = []
for version_id, version in secret['versions'].items():
version_list.append({
'CreatedDate': int(time.time()),
'LastAccessedDate': int(time.time()),
'VersionId': version_id,
'VersionStages': version['version_stages'],
})
response = json.dumps({
'ARN': secret['secret_id'],
'Name': secret['name'],
'NextToken': '',
'Versions': version_list,
})
return response
def list_secrets(self, max_results, next_token):
# TODO implement pagination and limits
secret_list = []
for secret in self.secrets.values():
versions_to_stages = {}
for version_id, version in secret['versions'].items():
versions_to_stages[version_id] = version['version_stages']
secret_list.append({
"ARN": secret_arn(self.region, secret['secret_id']),
"DeletedDate": secret.get('deleted_date', None),
"Description": "",
"KmsKeyId": "",
"LastAccessedDate": None,
"LastChangedDate": None,
"LastRotatedDate": None,
"Name": secret['name'],
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"SecretVersionsToStages": versions_to_stages,
"Tags": secret['tags']
})
return secret_list, None
def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if recovery_window_in_days and force_delete_without_recovery:
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \
use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays."
)
if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30):
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \
RecoveryWindowInDays value must be between 7 and 30 days (inclusive)."
)
deletion_date = datetime.datetime.utcnow()
if force_delete_without_recovery:
secret = self.secrets.pop(secret_id, None)
else:
deletion_date += datetime.timedelta(days=recovery_window_in_days or 30)
self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date)
secret = self.secrets.get(secret_id, None)
if not secret:
raise ResourceNotFoundException
arn = secret_arn(self.region, secret['secret_id'])
name = secret['name']
return arn, name, self._unix_time_secs(deletion_date)
def restore_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
self.secrets[secret_id].pop('deleted_date', None)
secret = self.secrets[secret_id]
arn = secret_arn(self.region, secret['secret_id'])
name = secret['name']
return arn, name
available_regions = (
boto3.session.Session().get_available_regions("secretsmanager")
)
secretsmanager_backends = {region: SecretsManagerBackend(region_name=region)
for region in available_regions}
|
{
"content_hash": "0ed777a2316862cfd6ee24049cf4f359",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 146,
"avg_line_length": 37.471177944862156,
"alnum_prop": 0.57634940806635,
"repo_name": "whummer/moto",
"id": "3e0424b6b043f507ce8e735edcded8d0997d0287",
"size": "14951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/secretsmanager/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1148"
},
{
"name": "Python",
"bytes": "6015085"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
from vent.api.menu_helpers import MenuHelper
from vent.api.plugins import Plugin
def test_cores():
""" Test the cores function """
instance = MenuHelper()
cores = instance.cores('install')
assert cores[0] == True
cores = instance.cores('build')
assert cores[0] == True
cores = instance.cores('start')
assert cores[0] == True
cores = instance.cores('stop')
assert cores[0] == True
cores = instance.cores('clean')
assert cores[0] == True
def test_repo_branches():
""" Test the repo_branches function """
instance = MenuHelper()
status = instance.repo_branches('https://github.com/cyberreboot/vent')
assert isinstance(status, tuple)
assert status[0] == True
def test_repo_commits():
""" Test the repo_commits function """
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False)
assert isinstance(status, tuple)
assert status[0] == True
instance = MenuHelper()
status = instance.repo_commits('https://github.com/cyberreboot/vent')
assert isinstance(status, tuple)
# flakey test on travis
#assert status[0] == True
def test_repo_tools():
""" Test the repo_tools function """
instance = MenuHelper()
status = instance.repo_tools('https://github.com/cyberreboot/vent',
'master', 'HEAD')
assert isinstance(status, tuple)
assert status[0] == True
def test_tools_status():
""" Test the tools_status function """
instance = MenuHelper()
core = instance.tools_status(True)
assert isinstance(core, tuple)
plugins = instance.tools_status(False)
assert isinstance(plugins, tuple)
|
{
"content_hash": "dbfd6978751ff977abe717cc3d9d9f72",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.321428571428573,
"alnum_prop": 0.6513545347467609,
"repo_name": "Jeff-Wang93/vent",
"id": "b7d12cfa1caa69626e38b5a6624ac4f2c6003683",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_api_menu_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "227"
},
{
"name": "Makefile",
"bytes": "4747"
},
{
"name": "Python",
"bytes": "433992"
},
{
"name": "Shell",
"bytes": "2103"
}
],
"symlink_target": ""
}
|
"""
Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def zigzagLevelOrder(self, root):
ret = []
if root is None:
return ret
queue = [root, None]
res = []
zig = False # Because we start from very root, so no reverse at that point
while len(queue) > 0:
node = queue.pop(0)
if node is None:
if zig:
ret.append(res[::-1])
else:
ret.append(res[:])
res = []
if len(queue) == 0: # Break here, otherwise will append another None
break
zig = not zig
queue.append(None)
else:
res.append(node.val) # Remember this, need to do this in node, not node.left/right
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return ret
|
{
"content_hash": "0e269474966d1d0117590e77a628a987",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 169,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.49873737373737376,
"repo_name": "yuzhangcmu/Python-Study",
"id": "0f2edc10aecc977978dcc609b8121040b5c257ad",
"size": "1584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Leetcode/Binary_Tree_Zigzag_Level_Order_Traversal.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from google.cloud import domains_v1beta1
def sample_configure_contact_settings():
# Create a client
client = domains_v1beta1.DomainsClient()
# Initialize request argument(s)
request = domains_v1beta1.ConfigureContactSettingsRequest(
registration="registration_value",
)
# Make the request
operation = client.configure_contact_settings(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END domains_v1beta1_generated_Domains_ConfigureContactSettings_sync]
|
{
"content_hash": "e81bc5951c219cea7b7e3a41eb79b5fe",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 26,
"alnum_prop": 0.7290969899665551,
"repo_name": "googleapis/python-domains",
"id": "44fcfa80bd2495cc8a8a7800ffed9c59a96e6b8e",
"size": "2004",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/domains_v1beta1_generated_domains_configure_contact_settings_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1119248"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
import sys
import copy
import functools
import datetime
import decimal
from functools import update_wrapper
from inspect import getargspec
from django import forms
from django.utils.encoding import force_unicode
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_permission_codename
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import Context, Template
from django.template.response import TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.decorators import method_decorator, classonlymethod
from django.utils.encoding import smart_unicode
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from xadmin.util import static, json, vendor, sortkeypicker
csrf_protect_m = method_decorator(csrf_protect)
class IncorrectPluginArg(Exception):
pass
def filter_chain(filters, token, func, *args, **kwargs):
if token == -1:
return func()
else:
def _inner_method():
fm = filters[token]
fargs = getargspec(fm)[0]
if len(fargs) == 1:
# Only self arg
result = func()
if result is None:
return fm()
else:
raise IncorrectPluginArg(u'Plugin filter method need a arg to receive parent method result.')
else:
return fm(func if fargs[1] == '__' else func(), *args, **kwargs)
return filter_chain(filters, token - 1, _inner_method, *args, **kwargs)
def filter_hook(func):
tag = func.__name__
func.__doc__ = "``filter_hook``\n\n" + (func.__doc__ or "")
@functools.wraps(func)
def method(self, *args, **kwargs):
def _inner_method():
return func(self, *args, **kwargs)
if self.plugins:
filters = [(getattr(getattr(p, tag), 'priority', 10), getattr(p, tag))
for p in self.plugins if callable(getattr(p, tag, None))]
filters = [f for p, f in sorted(filters, key=lambda x:x[0])]
return filter_chain(filters, len(filters) - 1, _inner_method, *args, **kwargs)
else:
return _inner_method()
return method
def inclusion_tag(file_name, context_class=Context, takes_context=False):
def wrap(func):
@functools.wraps(func)
def method(self, context, nodes, *arg, **kwargs):
_dict = func(self, context, nodes, *arg, **kwargs)
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
new_context = context_class(_dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
'use_tz': context.use_tz,
})
new_context['admin_view'] = context['admin_view']
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
nodes.append(t.render(new_context))
return method
return wrap
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class BaseAdminObject(object):
def get_view(self, view_class, option_class=None, *args, **kwargs):
opts = kwargs.pop('opts', {})
return self.admin_site.get_view_class(view_class, option_class, **opts)(self.request, *args, **kwargs)
def get_model_view(self, view_class, model, *args, **kwargs):
return self.get_view(view_class, self.admin_site._registry.get(model), *args, **kwargs)
def get_admin_url(self, name, *args, **kwargs):
return reverse('%s:%s' % (self.admin_site.app_name, name), args=args, kwargs=kwargs)
def get_model_url(self, model, name, *args, **kwargs):
return reverse(
'%s:%s_%s_%s' % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, name),
args=args, kwargs=kwargs, current_app=self.admin_site.name)
def get_model_perm(self, model, name):
return '%s.%s_%s' % (model._meta.app_label, name, model._meta.model_name)
def has_model_perm(self, model, name, user=None):
user = user or self.user
return user.has_perm(self.get_model_perm(model, name)) or (name == 'view' and self.has_model_perm(model, 'change', user))
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_form_params(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return mark_safe(''.join(
'<input type="hidden" name="%s" value="%s"/>' % (k, v) for k, v in p.items() if v))
def render_response(self, content, response_type='json'):
if response_type == 'json':
response = HttpResponse(content_type="application/json; charset=UTF-8")
response.write(
json.dumps(content, cls=JSONEncoder, ensure_ascii=False))
return response
return HttpResponse(content)
def template_response(self, template, context):
return TemplateResponse(self.request, template, context, current_app=self.admin_site.name)
def message_user(self, message, level='info'):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
"""
if hasattr(messages, level) and callable(getattr(messages, level)):
getattr(messages, level)(self.request, message)
def static(self, path):
return static(path)
def vendor(self, *tags):
return vendor(*tags)
class BaseAdminPlugin(BaseAdminObject):
def __init__(self, admin_view):
self.admin_view = admin_view
self.admin_site = admin_view.admin_site
if hasattr(admin_view, 'model'):
self.model = admin_view.model
self.opts = admin_view.model._meta
def init_request(self, *args, **kwargs):
pass
class BaseAdminView(BaseAdminObject, View):
""" Base Admin view, support some comm attrs."""
base_template = 'xadmin/base.html'
need_site_permission = True
def __init__(self, request, *args, **kwargs):
self.request = request
self.request_method = request.method.lower()
self.user = request.user
self.base_plugins = [p(self) for p in getattr(self,
"plugin_classes", [])]
self.args = args
self.kwargs = kwargs
self.init_plugin(*args, **kwargs)
self.init_request(*args, **kwargs)
@classonlymethod
def as_view(cls):
def view(request, *args, **kwargs):
self = cls(request, *args, **kwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
if self.request_method in self.http_method_names:
handler = getattr(
self, self.request_method, self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
view.need_site_permission = cls.need_site_permission
return view
def init_request(self, *args, **kwargs):
pass
def init_plugin(self, *args, **kwargs):
plugins = []
for p in self.base_plugins:
p.request = self.request
p.user = self.user
p.args = self.args
p.kwargs = self.kwargs
result = p.init_request(*args, **kwargs)
if result is not False:
plugins.append(p)
self.plugins = plugins
@filter_hook
def get_context(self):
return {'admin_view': self, 'media': self.media, 'base_template': self.base_template}
@property
def media(self):
return self.get_media()
@filter_hook
def get_media(self):
return forms.Media()
class CommAdminView(BaseAdminView):
base_template = 'xadmin/base_site.html'
menu_template = 'xadmin/includes/sitemenu_default.html'
site_title = None
site_footer = None
global_models_icon = {}
default_model_icon = None
apps_label_title = {}
apps_icons = {}
def get_site_menu(self):
return None
@filter_hook
def get_nav_menu(self):
site_menu = list(self.get_site_menu() or [])
had_urls = []
def get_url(menu, had_urls):
if 'url' in menu:
had_urls.append(menu['url'])
if 'menus' in menu:
for m in menu['menus']:
get_url(m, had_urls)
get_url({'menus': site_menu}, had_urls)
nav_menu = SortedDict()
for model, model_admin in self.admin_site._registry.items():
if getattr(model_admin, 'hidden_menu', False):
continue
app_label = model._meta.app_label
app_icon = None
model_dict = {
'title': unicode(capfirst(model._meta.verbose_name_plural)),
'url': self.get_model_url(model, "changelist"),
'icon': self.get_model_icon(model),
'perm': self.get_model_perm(model, 'view'),
'order': model_admin.order,
}
if model_dict['url'] in had_urls:
continue
app_key = "app:%s" % app_label
if app_key in nav_menu:
nav_menu[app_key]['menus'].append(model_dict)
else:
# Find app title
app_title = unicode(app_label.title())
if app_label.lower() in self.apps_label_title:
app_title = self.apps_label_title[app_label.lower()]
else:
mods = model.__module__.split('.')
if len(mods) > 1:
mod = '.'.join(mods[0:-1])
if mod in sys.modules:
mod = sys.modules[mod]
if 'verbose_name' in dir(mod):
app_title = getattr(mod, 'verbose_name')
elif 'app_title' in dir(mod):
app_title = getattr(mod, 'app_title')
#find app icon
if app_label.lower() in self.apps_icons:
app_icon = self.apps_icons[app_label.lower()]
nav_menu[app_key] = {
'title': app_title,
'menus': [model_dict],
}
app_menu = nav_menu[app_key]
if app_icon:
app_menu['first_icon'] = app_icon
elif ('first_icon' not in app_menu or
app_menu['first_icon'] == self.default_model_icon) and model_dict.get('icon'):
app_menu['first_icon'] = model_dict['icon']
if 'first_url' not in app_menu and model_dict.get('url'):
app_menu['first_url'] = model_dict['url']
for menu in nav_menu.values():
menu['menus'].sort(key=sortkeypicker(['order', 'title']))
nav_menu = nav_menu.values()
nav_menu.sort(key=lambda x: x['title'])
site_menu.extend(nav_menu)
return site_menu
@filter_hook
def get_context(self):
context = super(CommAdminView, self).get_context()
if not settings.DEBUG and 'nav_menu' in self.request.session:
nav_menu = json.loads(self.request.session['nav_menu'])
else:
menus = copy.copy(self.get_nav_menu())
def check_menu_permission(item):
need_perm = item.pop('perm', None)
if need_perm is None:
return True
elif callable(need_perm):
return need_perm(self.user)
elif need_perm == 'super':
return self.user.is_superuser
else:
return self.user.has_perm(need_perm)
def filter_item(item):
if 'menus' in item:
before_filter_length = len(item['menus'])
item['menus'] = [filter_item(
i) for i in item['menus'] if check_menu_permission(i)]
after_filter_length = len(item['menus'])
if after_filter_length == 0 and before_filter_length > 0:
return None
return item
nav_menu = [filter_item(item) for item in menus if check_menu_permission(item)]
nav_menu = filter(lambda x:x, nav_menu)
if not settings.DEBUG:
self.request.session['nav_menu'] = json.dumps(nav_menu)
self.request.session.modified = True
def check_selected(menu, path):
selected = False
if 'url' in menu:
chop_index = menu['url'].find('?')
if chop_index == -1:
selected = path.startswith(menu['url'])
else:
selected = path.startswith(menu['url'][:chop_index])
if 'menus' in menu:
for m in menu['menus']:
_s = check_selected(m, path)
if _s:
selected = True
if selected:
menu['selected'] = True
return selected
for menu in nav_menu:
check_selected(menu, self.request.path)
context.update({
'menu_template': self.menu_template,
'nav_menu': nav_menu,
'site_title': self.site_title or _(u'Django Xadmin'),
'site_footer': self.site_footer or _(u'my-company.inc'),
'breadcrumbs': self.get_breadcrumb()
})
return context
@filter_hook
def get_model_icon(self, model):
icon = self.global_models_icon.get(model)
if icon is None and model in self.admin_site._registry:
icon = getattr(self.admin_site._registry[model],
'model_icon', self.default_model_icon)
return icon
@filter_hook
def get_breadcrumb(self):
return [{
'url': self.get_admin_url('index'),
'title': _('Home')
}]
class ModelAdminView(CommAdminView):
fields = None
exclude = None
ordering = None
model = None
remove_permissions = []
def __init__(self, request, *args, **kwargs):
self.opts = self.model._meta
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
self.model_info = (self.app_label, self.model_name)
super(ModelAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def get_context(self):
new_context = {
"opts": self.opts,
"app_label": self.app_label,
"model_name": self.model_name,
"verbose_name": force_unicode(self.opts.verbose_name),
'model_icon': self.get_model_icon(self.model),
}
context = super(ModelAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelAdminView, self).get_breadcrumb()
item = {'title': self.opts.verbose_name_plural}
if self.has_view_permission():
item['url'] = self.model_admin_url('changelist')
bcs.append(item)
return bcs
@filter_hook
def get_object(self, object_id):
"""
Get model object instance by object_id, used for change admin view
"""
# first get base admin view property queryset, return default model queryset
queryset = self.queryset()
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError):
return None
@filter_hook
def get_object_url(self, obj):
if self.has_change_permission(obj):
return self.model_admin_url("change", getattr(obj, self.opts.pk.attname))
elif self.has_view_permission(obj):
return self.model_admin_url("detail", getattr(obj, self.opts.pk.attname))
else:
return None
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.opts.app_label,
self.model_name, name), args=args, kwargs=kwargs)
def get_model_perms(self):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'view': self.has_view_permission(),
'add': self.has_add_permission(),
'change': self.has_change_permission(),
'delete': self.has_delete_permission(),
}
def get_template_list(self, template_name):
opts = self.opts
return (
"xadmin/%s/%s/%s" % (
opts.app_label, opts.object_name.lower(), template_name),
"xadmin/%s/%s" % (opts.app_label, template_name),
"xadmin/%s" % template_name,
)
def get_ordering(self):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
@filter_hook
def queryset(self):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
return self.model._default_manager.get_queryset()
def has_view_permission(self, obj=None):
view_codename = get_permission_codename('view', self.opts)
change_codename = get_permission_codename('change', self.opts)
return ('view' not in self.remove_permissions) and (self.user.has_perm('%s.%s' % (self.app_label, view_codename)) or \
self.user.has_perm('%s.%s' % (self.app_label, change_codename)))
def has_add_permission(self):
codename = get_permission_codename('add', self.opts)
return ('add' not in self.remove_permissions) and self.user.has_perm('%s.%s' % (self.app_label, codename))
def has_change_permission(self, obj=None):
codename = get_permission_codename('change', self.opts)
return ('change' not in self.remove_permissions) and self.user.has_perm('%s.%s' % (self.app_label, codename))
def has_delete_permission(self, obj=None):
codename = get_permission_codename('delete', self.opts)
return ('delete' not in self.remove_permissions) and self.user.has_perm('%s.%s' % (self.app_label, codename))
|
{
"content_hash": "61e48d11d1a1ae332244d182907e5ae9",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 129,
"avg_line_length": 35.76923076923077,
"alnum_prop": 0.557037037037037,
"repo_name": "wcybxzj/django_xadmin17",
"id": "a63c3adf524a63569dc186a895bda9fbc36f37f7",
"size": "20925",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "xadmin_bak/views/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "49963"
},
{
"name": "Gettext Catalog",
"bytes": "414591"
},
{
"name": "HTML",
"bytes": "196784"
},
{
"name": "JavaScript",
"bytes": "132290"
},
{
"name": "Python",
"bytes": "440307"
}
],
"symlink_target": ""
}
|
"""Base Config control class."""
class BaseConfig(object):
"""Base class for identifier based configuration management."""
def __init__(self, config=None):
self._config = config or {}
@staticmethod
def _split_identifier(identifier):
return identifier.split('.')
def export(self):
"""Export the raw config."""
return self._config
def get(self, identifier, default_value=None):
"""Retrieve the identifier value in the config."""
parts = self._split_identifier(identifier)
item = self._config
for part in parts:
if part not in item:
return default_value
item = item[part]
return item
def prefixed(self, prefix):
"""Create a utility for accessing config with a common identifier prefix."""
return BaseConfigPrefixed(self, prefix)
def set(self, identifier, value):
"""Set the value of the identifier in the config."""
parts = self._split_identifier(identifier)
key = parts.pop()
item = self._config
for part in parts:
if part not in item:
item[part] = {}
item = item[part]
item[key] = value
class BaseConfigPrefixed(object):
"""Utility class for shortcutting common prefixes in identifiers."""
def __init__(self, config, prefix):
self._config = config
self.prefix = self.normalize_prefix(prefix)
@staticmethod
def normalize_prefix(prefix):
"""Normalize how the prefix is formatted."""
prefix = prefix.strip()
if not prefix:
return ''
if not prefix.endswith('.'):
prefix = '{}.'.format(prefix)
return prefix
def prefix_identifier(self, identifier):
"""Adds the prefix to the identifier"""
return '{}{}'.format(self.prefix, identifier)
def get(self, identifier, default_value=None):
"""Retrieve the identifier value in the config with prefix."""
return self._config.get(self.prefix_identifier(identifier), default_value=default_value)
def set(self, identifier, value):
"""Set the value of the identifier in the config with prefix."""
return self._config.set(self.prefix_identifier(identifier), value=value)
|
{
"content_hash": "8505faeaa45b34d486ea39a35c5ee63e",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 96,
"avg_line_length": 32.66197183098591,
"alnum_prop": 0.6097455799913756,
"repo_name": "grow/pygrow",
"id": "be5dc30e63d8508f3cfe4fa88f622a5d8c8b6688",
"size": "2319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/common/base_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
}
|
"""
Test for softmax_regression.ipynb
"""
import os
from pylearn2.testing.skip import skip_if_no_data
from pylearn2.config import yaml_parse
from theano import config
def test():
skip_if_no_data()
dirname = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
with open(os.path.join(dirname, 'sr_dataset.yaml'), 'r') as f:
dataset = f.read()
if config.mode == "DEBUG_MODE":
hyper_params = {'train_stop': 10}
else:
hyper_params = {'train_stop': 50}
dataset = dataset % (hyper_params)
with open(os.path.join(dirname, 'sr_model.yaml'), 'r') as f:
model = f.read()
with open(os.path.join(dirname, 'sr_algorithm.yaml'), 'r') as f:
algorithm = f.read()
if config.mode == "DEBUG_MODE":
hyper_params = {'batch_size': 10,
'valid_stop': 50010}
else:
hyper_params = {'batch_size': 10,
'valid_stop': 50050}
algorithm = algorithm % (hyper_params)
with open(os.path.join(dirname, 'sr_train.yaml'), 'r') as f:
train = f.read()
save_path = os.path.dirname(os.path.realpath(__file__))
train = train % locals()
train = yaml_parse.load(train)
train.main_loop()
try:
os.remove("{}/softmax_regression.pkl".format(save_path))
os.remove("{}/softmax_regression_best.pkl".format(save_path))
except:
pass
if __name__ == '__main__':
test()
|
{
"content_hash": "4e8d0ce8f6cbad58adaeaf3bfb0339a4",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 76,
"avg_line_length": 25.892857142857142,
"alnum_prop": 0.5772413793103448,
"repo_name": "shiquanwang/pylearn2",
"id": "809533b5189a4a5e24073d5616e8b52adf04d345",
"size": "1450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylearn2/scripts/tutorials/softmax_regression/tests/test_softmaxreg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "53316"
},
{
"name": "C++",
"bytes": "46935"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1267472"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3452538"
},
{
"name": "Shell",
"bytes": "4195"
}
],
"symlink_target": ""
}
|
import sys, os
import argparse
from pynux import utils
import argparse
def main(argv=None):
parser = argparse.ArgumentParser(description='convert an object to jp2')
parser.add_argument('path', help="Nuxeo document path")
utils.get_common_options(parser)
if argv is None:
argv = parser.parse_args()
print argv.path
nx = utils.Nuxeo(rcfile=argv.rcfile, loglevel=argv.loglevel.upper())
# under construction...
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "55b58fdfd9ee2f6ac5af013cdc7e9998",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 25,
"alnum_prop": 0.674,
"repo_name": "mredar/ucldc-iiif",
"id": "754196e2aa2617d48152dee7741a12b932375a21",
"size": "546",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "utils/convert_single.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1894"
},
{
"name": "Python",
"bytes": "15086"
},
{
"name": "Shell",
"bytes": "5070"
}
],
"symlink_target": ""
}
|
from .health_evaluation import HealthEvaluation
class PartitionsHealthEvaluation(HealthEvaluation):
"""Represents health evaluation for the partitions of a service, containing
health evaluations for each unhealthy partition that impacts current
aggregated health state. Can be returned when evaluating service health and
the aggregated health state is either Error or Warning.
:param aggregated_health_state: The health state of a Service Fabric
entity such as Cluster, Node, Application, Service, Partition, Replica
etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error',
'Unknown'
:type aggregated_health_state: str or
~azure.servicefabric.models.HealthState
:param description: Description of the health evaluation, which represents
a summary of the evaluation process.
:type description: str
:param kind: Constant filled by server.
:type kind: str
:param max_percent_unhealthy_partitions_per_service: Maximum allowed
percentage of unhealthy partitions per service from the
ServiceTypeHealthPolicy.
:type max_percent_unhealthy_partitions_per_service: int
:param total_count: Total number of partitions of the service from the
health store.
:type total_count: long
:param unhealthy_evaluations: List of unhealthy evaluations that led to
the aggregated health state. Includes all the unhealthy
PartitionHealthEvaluation that impacted the aggregated health.
:type unhealthy_evaluations:
list[~azure.servicefabric.models.HealthEvaluationWrapper]
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'kind': {'key': 'Kind', 'type': 'str'},
'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'},
'total_count': {'key': 'TotalCount', 'type': 'long'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_partitions_per_service=None, total_count=None, unhealthy_evaluations=None):
super(PartitionsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description)
self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service
self.total_count = total_count
self.unhealthy_evaluations = unhealthy_evaluations
self.kind = 'Partitions'
|
{
"content_hash": "74f130925fea093a3d0fc650456dd366",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 168,
"avg_line_length": 50.905660377358494,
"alnum_prop": 0.7171979243884359,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "ea3006a153bca383d028fda4cc2a41f01916fa78",
"size": "3172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import numpy as np
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
sequential_mode = len(shape) > 2 #need to deal with sequential case that input_ is batch x seq_len x dim
#tensorflow seems not supporting tensor product right now, use reshape
if sequential_mode:
reshaped_input = tf.reshape(input_, (-1, shape[-1]))
else:
reshaped_input = input_
with tf.variable_scope(scope or "linear"):
matrix = tf.get_variable("matrix", [shape[-1], output_size], tf.float32, tf.random_uniform_initializer(-stddev, stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
ret = tf.matmul(reshaped_input, matrix) + bias
if sequential_mode:
reshaped_ret = tf.reshape(ret, (shape[0], shape[1], -1))
else:
reshaped_ret = ret
if with_w:
return reshaped_ret, matrix, bias
else:
return reshaped_ret + bias
def nonlinear(input_, n_units=10, n_layers=1, nonlinearity_type='relu', scope=None, stddev=0.02, bias_start=0.0):
"""
Wrapper to represent a nonlinear transformation phi(x)
"""
if type(n_units) is not list:
n_latent_units = [n_units for i in range(n_layers)]
else:
n_latent_units = n_units
n_layers = len(n_units)
shape = input_.get_shape().as_list()
sequential_mode = len(shape) > 2 #need to deal with sequential case that input_ is batch x seq_len x dim
#tensorflow seems not supporting tensor product right now, use reshape
if sequential_mode:
reshaped_input = tf.reshape(input_, (-1, shape[-1]))
else:
reshaped_input = input_
with tf.variable_scope(scope or 'nonlinear'):
last_output_dim = shape[-1]
last_output = reshaped_input
for i in range(n_layers):
matrix = tf.get_variable('matrix_{0}'.format(i), [last_output_dim, n_latent_units[i]], tf.float32,
tf.random_uniform_initializer(-stddev, stddev))
bias = tf.get_variable("bias_{0}".format(i), [n_latent_units[i]], initializer=tf.constant_initializer(bias_start))
last_output = tf.matmul(last_output, matrix) + bias
if nonlinearity_type == 'relu':
last_output = tf.nn.relu(last_output)
last_output_dim = n_latent_units[i]
if sequential_mode:
reshaped_last_output = tf.reshape(last_output, (shape[0], shape[1], -1))
else:
reshaped_last_output = last_output
return reshaped_last_output
from tensorflow.contrib import slim
def conv_encoder(input_, n_units=10, feature_shape=(28, 28, 1), is_training=True):
#convoluational layers to process input pixels
#input_: batch_size * dim_size
with tf.variable_scope('convolution'):
net = tf.reshape(input_, [-1, feature_shape[0], feature_shape[1], feature_shape[2]]) #batch_size*seq_len, horizon, vertical, channels
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.elu
):
net = slim.conv2d(net, 32, 5, stride=2) #14x14x32
net = slim.conv2d(net, 64, 5, stride=2) #7x7x64
net = slim.conv2d(net, 128, 5, padding='VALID') #3x3x128
net = slim.conv2d(net, 128, 3, padding='VALID') #1x1x128
# net = slim.dropout(net, 0.9)
net = tf.reshape(net, [-1, 128])
enc_hidden = linear(net, n_units) #for mu and sigma
return enc_hidden
def deconv_decoder(input_, feature_shape=(28, 28, 1), is_training=True):
with tf.variable_scope('deconvolution'):
net = tf.reshape(input_, [-1, 1, 1, input_.get_shape().as_list()[-1]]) #batch_size*seq_len, 1, 1, channels (would be a concatenated one)
with slim.arg_scope([slim.conv2d_transpose],
activation_fn=tf.nn.elu
):
net = slim.conv2d_transpose(net, 128, 3, padding='VALID') #3x3x128
net = slim.conv2d_transpose(net, 64, 5, padding='VALID') #5x5x64
net = slim.conv2d_transpose(net, 32, 5, stride=2) #13x13x32
net = slim.conv2d_transpose(net, 1, 5, stride=2, activation_fn=None) #28x28x1
dec_hidden = tf.reshape(net, [input_.get_shape().as_list()[0], -1, feature_shape[0]*feature_shape[1]*feature_shape[2]])
return dec_hidden
class VartiationalRNNEncoder(tf.contrib.rnn.RNNCell):
"""Variational RNN encoder."""
def __init__(self, x_dim, h_dim, z_dim = 100, renc=True, use_cnn=False, is_training=True):
self.n_h = h_dim
self.n_x = x_dim
self.n_z = z_dim
self.n_x_1 = x_dim
self.n_z_1 = z_dim
self.n_enc_hidden = z_dim
self.n_dec_hidden = z_dim
self.n_prior_hidden = z_dim
self.lstm = tf.contrib.rnn.LSTMCell(self.n_h, state_is_tuple=True, initializer=tf.orthogonal_initializer(gain=1.0))
self.prior_prod = 0.0
self.renc = renc
self.use_cnn = use_cnn
self.is_training = is_training
@property
def state_size(self):
return (self.n_h, self.n_h)
@property
def output_size(self):
return (self.n_z, self.n_z, self.n_z, self.n_z, self.n_z, self.n_z)
def zero_state(self, batch_size, dtype):
return self.lstm.zero_state(batch_size, dtype)
def __call__(self, x, state, scope=None):
if isinstance(state, tf.nn.rnn_cell.LSTMStateTuple):
c = state.c
h = state.h
else:
c, h = state
if not self.use_cnn:
with tf.variable_scope("phi_x"):
x_1 = nonlinear(x, n_units=[400, 400, 200], n_layers=3)
with tf.variable_scope("hidden"):
if self.renc:
enc_hidden = nonlinear(tf.concat((x_1, h), 1), n_units=200, n_layers=1)
else:
enc_hidden = nonlinear(x_1, n_units=200, n_layers=1)
with tf.variable_scope("mu"):
enc_mu = linear(enc_hidden, self.n_z)
with tf.variable_scope("sigma"):
enc_sigma = tf.exp(linear(enc_hidden, self.n_z, bias_start=0.0))
else:
enc_cnn = conv_encoder(input_=x, n_units=128, feature_shape=(28, 28, 1), is_training=self.is_training)
with tf.variable_scope("hidden"):
concat_rep = tf.concat((enc_cnn, h), 1)
enc_hidden = nonlinear(concat_rep, n_units=128, n_layers=1)
with tf.variable_scope("mu"):
enc_mu = linear(enc_hidden, self.n_z)
with tf.variable_scope("sigma"):
enc_sigma = tf.exp(linear(enc_hidden, self.n_z, bias_start=0.0))
self.eps = tf.random_normal((x.get_shape().as_list()[0], self.n_z), 0.0, 1.0, dtype=tf.float32)
self.enc_mu = enc_mu
self.enc_sigma = enc_sigma
self.enc_mu_post = enc_mu
self.enc_sigma_post = enc_sigma
with tf.variable_scope("z"):
self.z = tf.add(self.enc_mu_post, tf.multiply(self.enc_sigma_post, self.eps))
with tf.variable_scope("phi_z"):
z_1 = nonlinear(self.z, self.n_z_1, n_layers=1)
output, new_state = self.lstm(z_1, state, scope="LSTMCell")
return (self.enc_mu, self.enc_sigma, self.enc_mu_post, self.enc_sigma_post, self.z, h), new_state
class VAEDYN():
def __init__(self, args, sample=False):
def tf_cross_entropy(y, mu):
#note here y and mu are reshaped with the batch and seq_length dimensions merged
#so reduce_sum(x, 1) means summing up the difference of pixels
reconstr_loss = \
-tf.reduce_sum(y * tf.log(1e-5 + mu)
+ (1-y) * tf.log(1e-5 + 1 - mu),
1)
return reconstr_loss
def tf_square(y, mu):
result = tf.reduce_sum(tf.square(tf.subtract(y, mu)), 1)
return result
def tf_normal(y, mu, s, rho):
with tf.variable_scope('normal'):
ss = tf.maximum(1e-10,tf.square(s))
norm = tf.subtract(y[:,:], mu)
z = tf.div(tf.square(norm), ss)
denom_log = tf.log(2*np.pi*ss, name='denom_log')
result = tf.reduce_sum(z+denom_log, 1)/2
return result
def tf_kl_gaussgauss(mu_1, sigma_1, mu_2, sigma_2):
with tf.variable_scope("kl_gaussgauss"):
return tf.reduce_sum(0.5 * (
2 * tf.log(tf.maximum(1e-8,sigma_2),name='log_sigma_2')
- 2 * tf.log(tf.maximum(1e-8,sigma_1),name='log_sigma_1')
+ (tf.square(sigma_1) + tf.square(mu_1 - mu_2)) / tf.maximum(1e-8,(tf.square(sigma_2))) - 1
), 1)
def tf_kl_gaussunisotropic(mu1, sigma_1):
with tf.variable_scope("kl_gaussunistropic"):
return -0.5 * tf.reduce_sum(1 + 2*tf.log(tf.maximum(1e-8,sigma_1),name='log_sigma_1')
- tf.square(mu1)
- tf.square(sigma_1), 1)
def tf_kl_smooth(mu, sigma, args):
with tf.variable_scope('kl_smooth'):
mu_split = tf.split(mu, args.batch_size, 0)
sigma_split = tf.split(sigma, args.batch_size, 0)
mu_1 = tf.slice(mu_split, [0, 0, 0], [-1, args.seq_length-1, -1])
sigma_1 = tf.slice(sigma_split, [0, 0, 0], [-1, args.seq_length-1, -1])
mu_2 = tf.slice(mu_split, [0, 1, 0], [-1, -1, -1])
sigma_2 = tf.slice(sigma_split, [0, 1, 0], [-1, -1, -1])
mu_1_reshape = tf.reshape(mu_1, [(args.seq_length-1)*args.batch_size, -1])
sigma_1_reshape = tf.reshape(sigma_1, [(args.seq_length-1)*args.batch_size, -1])
mu_2_reshape = tf.reshape(mu_2, [(args.seq_length-1)*args.batch_size, -1])
sigma_2_reshape = tf.reshape(sigma_2, [(args.seq_length-1)*args.batch_size, -1])
return tf.reduce_sum(0.5 * (
2 * tf.log(tf.maximum(1e-8,sigma_2_reshape),name='log_sigma_2')
- 2 * tf.log(tf.maximum(1e-8,sigma_1_reshape),name='log_sigma_1')
+ (tf.square(sigma_1_reshape) + tf.square(mu_1_reshape - mu_2_reshape)) / tf.maximum(1e-8,(tf.square(sigma_2_reshape))) - 1
), 1)
def get_lossfunc(enc_mu, enc_sigma, dec_mu, prior_mu, prior_sigma, y, anneal_rate, args):
kl_loss = tf_kl_gaussgauss(enc_mu, enc_sigma, prior_mu, prior_sigma)
kl_unitropic_loss = tf_kl_gaussunisotropic(enc_mu, enc_sigma)
square_loss = tf_square(y, dec_mu)
cross_ent_loss = tf_cross_entropy(y, dec_mu)
# smooth_reg_loss = tf_kl_smooth(prior_mu, prior_sigma, args) * 0
#<hyin/Apr-04-2017> also try an isotropic regularization on the prior z, would that imply a smooth prior?
return tf.reduce_mean(anneal_rate * kl_loss + cross_ent_loss) #+ tf.reduce_mean(kl_unitropic_loss) #original vrnn needs to disable this
self.args = args
if sample:
args.batch_size = 1
args.seq_length = 1
self.is_sample = sample
self.is_training = not self.is_sample
self.input_data = tf.placeholder(dtype=tf.float32, shape=[args.batch_size, args.seq_length, args.dim_size], name='input_data')
self.target_data = tf.placeholder(dtype=tf.float32, shape=[args.batch_size, args.seq_length, args.dim_size],name = 'target_data')
# rnn cell for encoder
cell = VartiationalRNNEncoder(args.dim_size, args.rnn_size, args.latent_size, True, args.use_cnn, self.is_training)
self.cell = cell
#zero state
self.initial_state_c, self.initial_state_h = cell.zero_state(batch_size=args.batch_size, dtype=tf.float32)
self.initial_lstm_state = tf.nn.rnn_cell.LSTMStateTuple(self.initial_state_c,self.initial_state_h)
self.input_sequence_len = tf.placeholder(dtype=tf.int32, shape=[args.batch_size])
input_seq_len = [args.seq_length] * args.batch_size
if self.is_sample:
inputs = tf.transpose(self.input_data, [1, 0, 2]) # permute n_steps and batch_size
inputs = tf.reshape(inputs, [-1, args.dim_size])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
inputs = tf.split(inputs, args.seq_length, 0) # n_steps * (batch_size, n_input)
outputs, last_state = tf.nn.static_rnn( cell=cell,
inputs=inputs,
initial_state=self.initial_lstm_state,
dtype=tf.float32,
sequence_length=input_seq_len,
scope='encoder')
outputs_reshape = []
names = ["enc_mu", "enc_sigma", "enc_mu_post", "enc_sigma_post", "z", "hidden_state"]
for n,name in enumerate(names):
x = tf.stack([o[n] for o in outputs])
x = tf.transpose(x,[1,0,2]) #now batch*n_steps*n_input
outputs_reshape.append(x)
enc_mu, enc_sigma, enc_mu_post, enc_sigma_post, z_sample, hidden_state = outputs_reshape
else:
outputs, last_state = tf.nn.dynamic_rnn(cell=cell,
dtype=tf.float32,
sequence_length=input_seq_len,
inputs=self.input_data,
initial_state=self.initial_lstm_state,
scope="encoder"
)
enc_mu, enc_sigma, enc_mu_post, enc_sigma_post, z_sample, hidden_state = outputs
self.enc_mu = enc_mu
self.enc_sigma = enc_sigma
self.enc_mu_post = enc_mu_post
self.enc_sigma_post = enc_sigma_post
self.final_state_c,self.final_state_h = last_state
#prior from hidden state
with tf.variable_scope("prior"):
with tf.variable_scope("hidden"):
#<hyin/Apr-07-2017> note that h depends on the last z
prior_hidden = nonlinear(hidden_state, n_units=self.cell.n_prior_hidden, n_layers=1)
with tf.variable_scope("mu"):
prior_mu = linear(prior_hidden, self.cell.n_z)
with tf.variable_scope("sigma"):
prior_sigma = tf.exp(linear(prior_hidden, self.cell.n_z, bias_start=0.0))
self.prior_mu = prior_mu
self.prior_sigma = prior_sigma
self.z_sample = z_sample
self.hidden_state = hidden_state
if not args.use_cnn:
with tf.variable_scope("decoder"):
with tf.variable_scope("hidden"):
# dec_hidden = nonlinear(z_sample, n_units=[200, 200, 400, 400], n_layers=4)
dec_hidden = nonlinear(tf.concat((z_sample, hidden_state), 2), n_units=[200, 200, 400, 400], n_layers=4)
with tf.variable_scope("mu"):
dec_mu = tf.nn.sigmoid(linear(dec_hidden, self.cell.n_x))
else:
with tf.variable_scope("decoder"):
dec_hidden = nonlinear(tf.concat((z_sample, hidden_state), 2), n_units=self.cell.n_z, n_layers=1)
dec_hidden = deconv_decoder(input_=dec_hidden, feature_shape=(28, 28, 1), is_training=self.is_training)
with tf.variable_scope("mu"):
dec_mu = tf.nn.sigmoid(linear(dec_hidden, self.cell.n_x))
self.mu = dec_mu
#########
# for training
#########
enc_mu_flatten = tf.reshape(self.enc_mu_post,[-1, args.latent_size])
enc_sigma_flatten = tf.reshape(self.enc_sigma_post,[-1, args.latent_size])
dec_mu_flatten = tf.reshape(self.mu, [-1, args.dim_size])
prior_mu_flatten = tf.reshape(self.prior_mu, [-1, args.latent_size])
prior_sigma_flatten = tf.reshape(self.prior_sigma, [-1, args.latent_size])
flat_target_data = tf.reshape(self.target_data,[-1, args.dim_size])
self.target = flat_target_data
self.flat_input = flat_target_data
self.anneal_rate = tf.Variable(1.0, trainable=False)
lossfunc = get_lossfunc(enc_mu_flatten, enc_sigma_flatten, dec_mu_flatten, prior_mu_flatten, prior_sigma_flatten, flat_target_data, self.anneal_rate, args)
with tf.variable_scope('cost'):
self.cost = lossfunc
tf.summary.scalar('cost', self.cost)
# tf.summary.scalar('mu', tf.reduce_mean(self.mu))
self.merged_summary = tf.summary.merge_all()
self.lr = tf.Variable(1.00, trainable=False)
tvars = tf.trainable_variables()
for t in tvars:
print t.name
# grads = tf.gradients(self.cost, tvars)
# grads = tf.cond(
# tf.global_norm(grads) > 1e-20,
# lambda: tf.clip_by_global_norm(grads, args.grad_clip)[0],
# lambda: grads)
optimizer = tf.train.AdamOptimizer(self.lr)
# self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.train_op = optimizer.minimize(lossfunc)
return
def reconstruct(self, sess, args, seq):
"a function to test reconstruction especially if the prior is properly learned"
if self.is_sample:
prev_state = sess.run(self.cell.zero_state(1, tf.float32))
# prev_state = self.cell.zero_state(1, tf.float32)
num = len(seq)
mus = np.zeros((num, args.dim_size), dtype=np.float32)
sigmas = np.zeros((num, args.dim_size), dtype=np.float32)
prior_mus = np.zeros((num, args.latent_size), dtype=np.float32)
prior_sigmas = np.zeros((num, args.latent_size), dtype=np.float32)
enc_mus = np.zeros((num, args.latent_size), dtype=np.float32)
enc_sigmas = np.zeros((num, args.latent_size), dtype=np.float32)
reconstr_cost = 0
for i in range(num):
prev_x = np.zeros((1, 1, args.dim_size), dtype=np.float32)
prev_x[0][0] = seq[i]
#<hyin/Jan-23rd-2017> now the problem for the reconstruction is twofold:
#the second step (the image resulted by the first space hit) is always of a same pattern. And the problem is not the consistency as all the letter images start from
#a blank image so initially it contains just dot locates at different places. The thing is the pattern is far away from a dot. But this wrong pattern doesn't seem to harm
# very much to the following reconstruction
# the other thing is the if the reconstruction is fed with the synthesized image, the dynamics divert immediately. This sounds like the adversarial samples to NN, where if the input
# is just slightly corrupted, the output will be drastically impacted. Does that imply an overfitting? Ideally, the latent dynamics should not depend on the input (thus should be robust to corrupted input)
# but as the comment under the LSTM update indicates, it capture the dynamical behavior poorly... Maybe I should try it again as I fixed the evaluation of next_state_c and next_state_h
#<hyin/Jul-5th-2017> the step-based reconstruction seems really bad compare with the one processing the entire trajectory. This makes us not able to encode a piece of trajectory. what is the problem?
if i == 0:
feed = {self.input_data: prev_x,
self.initial_state_c:prev_state[0],
self.initial_state_h:prev_state[1]}
# prior_mu, prior_sigma = sess.run([self.enc_mu, self.enc_sigma], feed)
prior_mu, prior_sigma = sess.run([self.enc_mu_post, self.enc_sigma_post], feed)
else:
feed = {self.initial_state_c:prev_state[0],
self.initial_state_h:prev_state[1]}
prior_mu, prior_sigma = sess.run([self.prior_mu, self.prior_sigma], feed)
feed = {self.input_data: prev_x,
self.initial_state_c:prev_state[0],
self.initial_state_h:prev_state[1]}
# this is the working evaluation, what is the difference
# [next_state_c, next_state_h, o_mu, enc_mu, enc_sigma] = sess.run([self.final_state_c, self.final_state_h, self.mu, self.enc_mu, self.enc_sigma], feed)
[next_state_c, next_state_h, o_mu, enc_mu, enc_sigma] = sess.run([self.final_state_c, self.final_state_h, self.mu, self.enc_mu_post, self.enc_sigma_post], feed)
#calculate cost
input_data = np.zeros((1, 1, args.dim_size), dtype=np.float32)
# output_data = np.zeros((1, 1, args.dim_size), dtype=np.float32)
input_data[0][0] = seq[i]
# output_data[0][0] = mus[i]
cost = sess.run(self.cost, {self.input_data: input_data, self.target_data: input_data, self.initial_state_c:prev_state[0], self.initial_state_h:prev_state[1]})
reconstr_cost += cost
#record all results
mus[i] = o_mu
prior_mus[i] = prior_mu
prior_sigmas[i] = prior_sigma
enc_mus[i] = enc_mu
enc_sigmas[i] = enc_sigma
prev_state = next_state_c, next_state_h
final_state_c = prev_state[0]
final_state_h = prev_state[1]
reconstr_cost = reconstr_cost / float(num)
else:
input_data = np.zeros((args.batch_size, args.seq_length, args.dim_size), dtype=np.float32)
input_data = seq
feed = {self.input_data: input_data, self.target_data: input_data}
# reconstr_cost, final_state_c, final_state_h, mus, prior_mus, prior_sigmas, enc_mus, enc_sigmas, flat_input, target = sess.run(
# [self.cost, self.final_state_c, self.final_state_h, self.mu, self.prior_mu, self.prior_sigma, self.enc_mu, self.enc_sigma, self.flat_input, self.target], feed)
reconstr_cost, final_state_c, final_state_h, mus, prior_mus, prior_sigmas, enc_mus, enc_sigmas, flat_input, target = sess.run(
[self.cost, self.final_state_c, self.final_state_h, self.mu, self.prior_mu, self.prior_sigma, self.enc_mu_post, self.enc_sigma_post, self.flat_input, self.target], feed)
mus = np.reshape(mus, (args.batch_size, args.seq_length, args.dim_size))
# mus = np.rollaxis(mus, 1, 0)
return mus, prior_mus, prior_sigmas, enc_mus, enc_sigmas, reconstr_cost, final_state_c, final_state_h
def encode(self, sess, args, seq):
mus, prior_mus, prior_sigmas, enc_mus, enc_sigmas, reconstr_cost, final_state_c, final_state_h = self.reconstruct(sess, args, seq)
return prior_mus, prior_sigmas, enc_mus, enc_sigmas, final_state_c, final_state_h
def sample(self, sess, args, num=20, start=None):
def sample_gaussian(mu, sigma):
return mu + (sigma*np.random.randn(*sigma.shape)*0.0)
if start is None:
prev_state = sess.run(self.cell.zero_state(1, tf.float32))
prev_x = np.zeros((1, 1, args.dim_size))
else:
prev_x = np.zeros((1, 1, args.dim_size))
prev_x[0][0] = start[0]
prev_state = start[1]
chunks = np.zeros((num, args.dim_size), dtype=np.float32)
zs = np.zeros((num, args.latent_size), dtype=np.float32)
for i in xrange(num):
if i == 0:
feed = {self.input_data: prev_x,
self.initial_state_c:prev_state[0],
self.initial_state_h:prev_state[1]}
# prior_mu, prior_sigma = sess.run([self.enc_mu_post, self.enc_sigma_post], feed)
prior_mu, prior_sigma = sess.run([self.enc_mu, self.enc_sigma], feed)
else:
feed = {self.initial_state_c:prev_state[0],
self.initial_state_h:prev_state[1]}
prior_mu, prior_sigma = sess.run([self.prior_mu, self.prior_sigma], feed)
# print prior_mu, prior_sigma
z_sample = prior_mu * 1 + np.random.randn(prev_x.shape[0], args.latent_size) * prior_sigma * 1
#<hyin/Sep-25th-2017> this does not work, the 'unfetchable' problem is that tensorflow does not allow us to access
#variable created inside a cond or loop like dynamic_rnn. We might have to hugely restructure the code to move
#output layers outside the variationalrnn cell
# enc_mu_post, enc_sigma_post, z_sample = sess.run([self.enc_mu_post, self.enc_sigma_post, self.cell.z], feed)
o_mu = sess.run([self.mu], {self.hidden_state:[prev_state[1]], self.z_sample:z_sample})[0]
# print(enc_mu_post, enc_sigma_post, z_sample)
# curr_x_reshaped = np.zeros((1, 1, args.dim_size), dtype=np.float32)
# curr_x_reshaped[0][0] = curr_x
# [next_state_c, next_state_h] = sess.run([self.final_state_c, self.final_state_h], { self.input_data:curr_x_reshaped,
# self.cell.z:z_sample,
# self.initial_state_c:prev_state[0],
# self.initial_state_h:prev_state[1]})
# <hyin/Jul-14th-2017> today i found that removing x from the state propagation seems to be important if we increase the kl divergence weight, say, to 15
# but arent the gradients through sequence critical?
# <hyin/Sep-26th-2017> so cannot be fed again...
[next_state_c, next_state_h] = sess.run([self.final_state_c, self.final_state_h], {self.cell.z:z_sample[0], self.initial_state_c:prev_state[0], self.initial_state_h:prev_state[1]})
chunks[i] = o_mu[0][0]
zs[i] = z_sample
prev_x = np.zeros((1, 1, args.dim_size), dtype=np.float32)
prev_x = o_mu
prev_state = next_state_c, next_state_h
return chunks, zs
|
{
"content_hash": "b7fc38d50c8a1642b9b9341ea2dbe3cf",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 221,
"avg_line_length": 51.234082397003746,
"alnum_prop": 0.5491428780291677,
"repo_name": "navigator8972/vae_dyn",
"id": "f928d3d7c21a58dc1f4d095ae8c7ca8dac99dd4e",
"size": "27359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_vaedyn.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69444"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
}
|
"""
desispec.io.skygradpca
===============
IO routines for sky gradient pca.
"""
from __future__ import absolute_import, division
import os
import time
from astropy.io import fits
from desiutil.log import get_logger
from . import iotime
from .util import get_tempfilename
def write_skygradpca(outfile, skygradpca):
"""Write sky model.
Args:
outfile : filename or (night, expid, camera) tuple
skygradpca : SkyGradPCA object
"""
from .util import fitsheader, makepath
log = get_logger()
outfile = makepath(outfile, 'skygradpca')
# Convert header to fits.Header if needed
hdr = fitsheader(skygradpca.header)
hx = fits.HDUList()
hdr['EXTNAME'] = 'FLUX'
hx.append(fits.PrimaryHDU(skygradpca.flux.astype('f4'), header=hdr))
hx.append(fits.ImageHDU(skygradpca.wave.astype('f8'), name='WAVELENGTH'))
t0 = time.time()
tmpfile = get_tempfilename(outfile)
hx.writeto(tmpfile, overwrite=True, checksum=True)
os.rename(tmpfile, outfile)
duration = time.time() - t0
log.info(iotime.format('write', outfile, duration))
return outfile
def read_skygradpca(filename):
"""Read sky grad pca file and return SkyGradPCA object.
"""
from .util import native_endian
from ..skygradpca import SkyGradPCA
log = get_logger()
t0 = time.time()
fx = fits.open(filename, memmap=False, uint=True)
hdr = fx[0].header
wave = native_endian(fx["WAVELENGTH"].data.astype('f8'))
flux = native_endian(fx["FLUX"].data.astype('f4'))
fx.close()
duration = time.time() - t0
log.info(iotime.format('read', filename, duration))
return SkyGradPCA(wave=wave, flux=flux, header=hdr)
|
{
"content_hash": "d9dc654b5037379edb06b46415e38bff",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 26.515625,
"alnum_prop": 0.6700058927519151,
"repo_name": "desihub/desispec",
"id": "5955b2302fd692841ef6c836f3c85272e9fa6fdb",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desispec/io/skygradpca.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "761"
},
{
"name": "Python",
"bytes": "4219435"
},
{
"name": "Shell",
"bytes": "17927"
}
],
"symlink_target": ""
}
|
from adapter import NetworkAdapter
import StringIO
class InterfacesReader:
''' Short lived class to read interfaces file '''
def __init__(self, interfaces_path):
self._interfaces_path = interfaces_path
self._reset()
@property
def adapters(self):
return self._adapters
def parse_interfaces(self):
''' Read /etc/network/interfaces. '''
self._reset()
# Open up the interfaces file. Read only.
with open(self._interfaces_path, "r") as interfaces:
self._read_lines_from_file(interfaces)
return self._parse_interfaces_impl()
def parse_interfaces_from_string(self, data):
self._reset()
# Can't be used in 'with..as'
string_file = StringIO.StringIO(data)
self._read_lines_from_file(string_file)
string_file.close()
return self._parse_interfaces_impl()
def _parse_interfaces_impl(self):
''' Save adapters
Return an array of networkAdapter instances.
'''
for entry in self._auto_list:
for adapter in self._adapters:
if adapter._ifAttributes['name'] == entry:
adapter.setAuto(True)
for entry in self._hotplug_list:
for adapter in self._adapters:
if adapter._ifAttributes['name'] == entry:
adapter.setHotplug(True)
return self._adapters
def _read_lines_from_file(self, fileObj):
# Loop through the interfaces file.
for line in fileObj:
# Identify the clauses by analyzing the first word of each line.
# Go to the next line if the current line is a comment.
if line.strip().startswith("#") is True:
pass
else:
self._parse_iface(line)
# Ignore blank lines.
if line.isspace() is True:
pass
else:
self._parse_details(line)
self._read_auto(line)
self._read_hotplug(line)
def _parse_iface(self, line):
if line.startswith('iface'):
sline = line.split()
# Update the self._context when an iface clause is encountered.
self._context += 1
self._adapters.append(NetworkAdapter(sline[1]))
self._adapters[self._context].setAddressSource(sline[-1])
self._adapters[self._context].setAddrFam(sline[2])
def _parse_details(self, line):
if line[0].isspace() is True:
sline = line.split()
if sline[0] == 'address':
self._adapters[self._context].setAddress(sline[1])
elif sline[0] == 'netmask':
self._adapters[self._context].setNetmask(sline[1])
elif sline[0] == 'gateway':
self._adapters[self._context].setGateway(sline[1])
elif sline[0] == 'broadcast':
self._adapters[self._context].setBroadcast(sline[1])
elif sline[0] == 'network':
self._adapters[self._context].setNetwork(sline[1])
elif sline[0].startswith('bridge') is True:
opt = sline[0].split('_')
sline.pop(0)
ifs = " ".join(sline)
self._adapters[self._context].replaceBropt(opt[1], ifs)
elif sline[0] == 'up' or sline[0] == 'down' or sline[0] == 'pre-up' or sline[0] == 'post-down':
ud = sline.pop(0)
cmd = ' '.join(sline)
if ud == 'up':
self._adapters[self._context].appendUp(cmd)
elif ud == 'down':
self._adapters[self._context].appendDown(cmd)
elif ud == 'pre-up':
self._adapters[self._context].appendPreUp(cmd)
elif ud == 'post-down':
self._adapters[self._context].appendPostDown(cmd)
else:
# store as if so as not to loose it
self._adapters[self._context].setUnknown(sline[0], sline[1])
def _read_auto(self, line):
''' Identify which adapters are flagged auto. '''
if line.startswith('auto'):
sline = line.split()
for word in sline:
if word == 'auto':
pass
else:
self._auto_list.append(word)
def _read_hotplug(self, line):
''' Identify which adapters are flagged allow-hotplug. '''
if line.startswith('allow-hotplug'):
sline = line.split()
for word in sline:
if word == 'allow-hotplug':
pass
else:
self._hotplug_list.append(word)
def _reset(self):
# Initialize a place to store created networkAdapter objects.
self._adapters = []
# Keep a list of adapters that have the auto or allow-hotplug flags set.
self._auto_list = []
self._hotplug_list = []
# Store the interface context.
# This is the index of the adapters collection.
self._context = -1
|
{
"content_hash": "1c2d439c0632850d86461eb0f0dbc175",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 107,
"avg_line_length": 36.716312056737586,
"alnum_prop": 0.5288777284141395,
"repo_name": "dggreenbaum/debinterface",
"id": "f016376a98938323188722da851a07274a0bddb9",
"size": "5241",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "interfacesReader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35189"
}
],
"symlink_target": ""
}
|
"""Scans a source JS file for its provided and required namespaces.
Simple class to scan a JavaScript file and express its dependencies.
"""
import re
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
_PROVIDE_REGEX = re.compile(_BASE_REGEX_STRING % 'provide')
_REQUIRES_REGEX = re.compile(_BASE_REGEX_STRING % 'require')
class Source(object):
"""Scans a JavaScript source for its provided and required namespaces."""
def __init__(self, source):
"""Initialize a source.
Args:
source: str, The JavaScript source.
"""
self.provides = set()
self.requires = set()
self._source = source
self._ScanSource()
def __str__(self):
return 'Source %s' % self._path
def GetSource(self):
"""Get the source as a string."""
return self._source
def _ScanSource(self):
"""Fill in provides and requires by scanning the source."""
# TODO: Strip source comments first, as these might be in a comment
# block. RegExes can be borrowed from other projects.
source = self.GetSource()
for line in source.splitlines():
match = _PROVIDE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
match = _REQUIRES_REGEX.match(line)
if match:
self.requires.add(match.group(1))
def GetFileContents(path):
"""Get a file's contents as a string.
Args:
path: str, Path to file.
Returns:
str, Contents of file.
Raises:
IOError: An error occurred opening or reading the file.
"""
fileobj = open(path)
try:
return fileobj.read()
finally:
fileobj.close()
|
{
"content_hash": "fe910f00627d5cd9a63f769aa868bb05",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 75,
"avg_line_length": 22.591549295774648,
"alnum_prop": 0.6408977556109726,
"repo_name": "maxogden/googlyscript",
"id": "4c1418d2c968c150febf586b09546362fb81478e",
"size": "1672",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "closure-library/closure/bin/build/source.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2418"
},
{
"name": "JavaScript",
"bytes": "8181620"
},
{
"name": "Python",
"bytes": "45595"
},
{
"name": "Ruby",
"bytes": "36791"
}
],
"symlink_target": ""
}
|
import os
import sublime
import sublime_plugin
import platform
import re
import subprocess
import tempfile
from subprocess import Popen, PIPE
settings = None
class Settings:
def __init__(self):
package_settings = sublime.load_settings("RustAutoComplete.sublime-settings")
package_settings.add_on_change("racer", settings_changed)
package_settings.add_on_change("search_paths", settings_changed)
self.racer_bin = package_settings.get("racer", "racer")
self.search_paths = package_settings.get("search_paths", [])
self.package_settings = package_settings
def unload(self):
self.package_settings.clear_on_change("racer")
self.package_settings.clear_on_change("search_paths")
def plugin_loaded():
global settings
settings = Settings()
def plugin_unloaded():
global settings
if settings != None:
settings.unload()
settings = None
def settings_changed():
global settings
if settings != None:
settings.unload()
settings = None
settings = Settings()
class Result:
def __init__(self, parts):
self.completion = parts[0]
self.snippet = parts[1]
self.row = int(parts[2])
self.column = int(parts[3])
self.path = parts[4]
self.type = parts[5]
self.context = parts[6]
def expand_all(paths):
return [os.path.expanduser(path)
for path in paths]
def determine_save_dir(view):
# If we return None then it will fall back on the system tmp directory
save_dir = None
# Try to save to the same directory the file is saved in
if view.file_name() is not None:
save_dir = os.path.dirname(view.file_name())
# If the file has not been saved, and the window has a folder open,
# try to treat the main folder as if it were a cargo project
source_folder = ""
if len(view.window().folders()) > 0:
source_folder = os.path.join(view.window().folders()[0], "src")
if save_dir is None and os.path.isdir(source_folder):
save_dir = source_folder
# If nothing else has worked, look at the folders that other open files are in
if save_dir is None:
paths = [view.file_name() for view in view.window().views() if view.file_name() is not None]
# We only care about open rust files
paths = [path for path in paths if path[-3:] == ".rs"]
directories = [os.path.dirname(path) for path in paths]
if len(directories) == 0:
return None
# Count the frequency of occurance of each path
dirs = {}
for item in directories:
if item not in dirs:
dirs[item] = 1
else:
dirs[item] += 1
# Use the most common path
save_dir = max(dirs.keys(), key=(lambda key: dirs[key]))
return save_dir
def run_racer(view, cmd_list):
# Retrieve the entire buffer
region = sublime.Region(0, view.size())
content = view.substr(region)
with_snippet = cmd_list[0] == "complete-with-snippet"
# Figure out where to save the temp file so that racer can do
# autocomplete based on other user files
save_dir = determine_save_dir(view)
print(save_dir)
# Save that buffer to a temporary file for racer to use
temp_file = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', delete=False, dir=save_dir)
temp_file_path = temp_file.name
temp_file.write(content)
temp_file.close()
cmd_list.insert(0, settings.racer_bin)
cmd_list.append(temp_file_path)
# Copy the system environment and add the source search
# paths for racer to it
env = os.environ.copy()
expanded_search_paths = expand_all(settings.search_paths)
if 'RUST_SRC_PATH' in env:
expanded_search_paths.append(env['RUST_SRC_PATH'])
env['RUST_SRC_PATH'] = os.pathsep.join(expanded_search_paths)
# Run racer
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = Popen(cmd_list, stdout=PIPE, env=env, startupinfo=startupinfo)
(output, err) = process.communicate()
exit_code = process.wait()
# print(output)
# Remove temp file
os.remove(temp_file_path)
# Parse results
results = []
match_string = "MATCH "
if exit_code == 0:
for byte_line in output.splitlines():
line = byte_line.decode("utf-8")
if line.startswith(match_string):
if with_snippet:
parts = line[len(match_string):].split(';', 7)
else:
parts = line[len(match_string):].split(',', 6)
parts.insert(1, "")
result = Result(parts)
if result.path == view.file_name():
continue
if result.path == temp_file_path:
result.path = view.file_name()
results.append(result)
else:
print("failed: exit_code:", exit_code, output)
return results
class RustAutocomplete(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
# Check if this is a Rust source file. This check
# relies on the Rust syntax formatting extension
# being installed - https://github.com/jhasse/sublime-rust
if view.match_selector(locations[0], "source.rust"):
# Get the buffer location in correct format for racer
row, col = view.rowcol(locations[0])
row += 1
try:
raw_results = run_racer(view, ["complete-with-snippet", str(row), str(col)])
except FileNotFoundError:
print("Unable to find racer executable (check settings)")
return
results = []
lalign = 0;
ralign = 0;
for result in raw_results:
result.middle = "{0} ({1})".format(result.type, os.path.basename(result.path))
lalign = max(lalign,len(result.completion)+len(result.middle))
ralign = max(ralign, len(result.context))
for result in raw_results:
context = result.context
result = "{0} {1:>{3}} : {2:{4}}".format(result.completion, result.middle, result.context, lalign - len(result.completion), ralign), result.snippet
results.append(result)
if len(results) > 0:
# return list(set(results))
return (list(set(results)),
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
class RustGotoDefinitionCommand(sublime_plugin.TextCommand):
def run(self, edit):
# Get the buffer location in correct format for racer
row, col = self.view.rowcol(self.view.sel()[0].begin())
row += 1
results = run_racer(self.view, ["find-definition", str(row), str(col)])
if len(results) == 1:
result = results[0]
path = result.path
# On Windows the racer will return the paths without the drive
# letter and we need the letter for the open_file to work.
if platform.system() == 'Windows' and not re.compile('^\w\:').match(path):
path = 'c:' + path
encoded_path = "{0}:{1}:{2}".format(path, result.row, result.column)
self.view.window().open_file(encoded_path, sublime.ENCODED_POSITION)
|
{
"content_hash": "e0c00b89d0b5c1c25eff3b73166e3328",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 163,
"avg_line_length": 33.990990990990994,
"alnum_prop": 0.6015107341637954,
"repo_name": "nickmab/RustAutoComplete",
"id": "b3478f658e6ab19ba2d72a2335dcea734f3e8370",
"size": "7546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RustAutoComplete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1655"
},
{
"name": "Python",
"bytes": "13107"
}
],
"symlink_target": ""
}
|
"""This module implements the Sublime Text 3 commands provided by remote."""
import os
import re
import sys
import sublime
import sublime_plugin
import subprocess
import threading
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import remote.sublime_api as sublime_api
import remote.sync_api as sync_api
import remote.vagrant_api as vagrant_api
# =============================================================================
class RsyncOptionsCommand(sublime_plugin.TextCommand):
"""Override default rsync options for Remote."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0)
def rsync_options(path, callback):
print("Local path", path)
w = sublime.active_window()
def done_with_options(userInput):
print("Options", userInput)
if len(userInput) == 0:
do_it(sync_api.default_rsync_options())
return True
do_it(userInput)
return True
def do_it(rsyncOptions):
settings = {"rsyncOptions": rsyncOptions}
sublime_api.update_project_settings(w, path, settings)
if callback is not None:
callback(settings)
options = sync_api.default_rsync_options()
found = sublime_api.project_by_path(w, path)
if found is not None and found["rsyncOptions"] != "":
options = found["rsyncOptions"]
sublime_api.show_input_panel("Use these rsync options:",
options, done_with_options, None, None)
# =============================================================================
class AddRemoteCommand(sublime_plugin.TextCommand):
"""Map a new remote path to a local project path."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0)
def add_remote_async(path, callback):
print("Local path", path)
w = sublime.active_window()
def done_with_folder(userInput):
print("Remote path", userInput)
if len(userInput) == 0:
do_it("", "")
return True
parts = userInput.split(":")
if len(parts) != 2:
sublime_api.error_message("The remote path you entered does not" +
" appear to contain a host")
return False
more = parts[0].split("@")
host = ""
if len(more) > 2:
sublime_api.error_message("Unable to parse the remote path you" +
" entered")
return False
elif len(more) == 2:
host = more[1]
else:
host = more[0]
if host == "vagrant":
vms = ["Select VM below...", "---"]
vagrant_api.get_vm_list(vms)
if len(vms) == 2:
sublime_api.error_message("No vagrant VMs found")
return False
if len(vms) == 3:
done_with_vm(userInput, vms, 2)
else:
sublime_api.show_quick_panel(vms,
lambda i=-1:
done_with_vm(userInput, vms, i))
else:
do_it(userInput, "")
return True
def done_with_vm(remotePath, vms, userSelection):
if userSelection == -1:
return False
vm = vagrant_api.parse_vm_id(vms[userSelection])
if vm is None:
return False
print("VM selected", vm)
sshOptions = vagrant_api.get_ssh_options(vm)
print("ssh options", sshOptions)
if sshOptions != "":
do_it(remotePath, sshOptions)
def do_it(remotePath, sshOptions):
settings = {"remotePath": remotePath, "remoteOptions": sshOptions}
sublime_api.update_project_settings(w, path, settings)
if callback is not None:
callback(settings)
remotePath = ""
found = sublime_api.project_by_path(w, path)
if found is not None and found["remotePath"] != "":
remotePath = found["remotePath"]
sublime_api.show_input_panel("Sync this folder to remote folder:",
remotePath, done_with_folder, None, None)
# =============================================================================
class FromRemote(sublime_plugin.TextCommand):
"""Sync a local directory from a remote directory."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0)
def from_remote_async(path):
print("From local path", path)
w = sublime.active_window()
found = sublime_api.project_by_path(w, path)
if found is None or found["remotePath"] == "":
add_remote_async(path, lambda o: sync_api.rsync_remote(
o.get("remotePath", ""), path,
o.get("remoteOptions", ""),
o.get("rsyncOptions", "")))
return True
return sync_api.rsync_remote(found.get("remotePath", ""),
found.get("path", ""),
found.get("remoteOptions", ""),
found.get("rsyncOptions", ""))
# =============================================================================
class ToRemote(sublime_plugin.TextCommand):
"""Sync a local directory to a remote directory."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0)
def to_remote_async(path):
print("To local path", path)
w = sublime.active_window()
found = sublime_api.project_by_path(w, path)
if found is None or found["remotePath"] == "":
add_remote_async(path, lambda o: sync_api.rsync_remote(path,
o.get("remotePath", ""),
o.get("remoteOptions", ""),
o.get("rsyncOptions", "")))
return True
return sync_api.rsync_remote(found.get("path", ""),
found.get("remotePath", ""),
found.get("remoteOptions", ""),
found.get("rsyncOptions", ""))
# =============================================================================
class RemoteEdit(sublime_plugin.EventListener):
"""Sync a local change out."""
def on_post_save_async(self, view):
filename = view.file_name()
w = sublime.active_window()
found = sublime_api.project_by_path(w, filename)
if found is None:
return False
return sync_api.rsync_remote_file(found.get("path", ""), filename,
found.get("remotePath", ""),
found.get("remoteOptions", ""),
found.get("rsyncOptions", ""))
|
{
"content_hash": "4739de87ec3fd97d661e798a2b12973e",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 32.733333333333334,
"alnum_prop": 0.5158568519057317,
"repo_name": "devaos/sublime-remote",
"id": "e63ab3a59d370efcbb06433d108e7c3dfebec77b",
"size": "6995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "514"
},
{
"name": "Python",
"bytes": "15493"
}
],
"symlink_target": ""
}
|
from datetime import *
from django.db import models
from django.utils.translation import ugettext as _
from filebrowser.fields import FileBrowseField
from koalixcrm.subscriptions.const.events import *
import koalixcrm.crm.documents
class Subscription(models.Model):
contract = models.ForeignKey('crm.Contract', verbose_name=_('Subscription Type'))
subscription_type = models.ForeignKey('SubscriptionType', verbose_name=_('Subscription Type'), null=True)
def create_subscription_from_contract(self, contract):
self.contract = contract
self.save()
return self
def create_quote(self):
quote = koalixcrm.crm.documents.quote.Quote()
quote.contract = self.contract
quote.discount = 0
quote.staff = self.contract.staff
quote.customer = self.contract.defaultcustomer
quote.status = 'C'
quote.currency = self.contract.defaultcurrency
quote.valid_until = date.today().__str__()
quote.date_of_creation = date.today().__str__()
quote.save()
return quote
def create_invoice(self):
invoice = koalixcrm.crm.documents.invoice.Invoice()
invoice.contract = self.contract
invoice.discount = 0
invoice.staff = self.contract.staff
invoice.customer = self.contract.default_customer
invoice.status = 'C'
invoice.currency = self.contract.default_currency
invoice.payable_until = date.today() + timedelta(
days=self.contract.defaultcustomer.defaultCustomerBillingCycle.timeToPaymentDate)
invoice.date_of_creation = date.today().__str__()
invoice.save()
return invoice
class Meta:
app_label = "subscriptions"
verbose_name = _('Subscription')
verbose_name_plural = _('Subscriptions')
class SubscriptionEvent(models.Model):
subscriptions = models.ForeignKey('Subscription',
verbose_name=_('Subscription'))
event_date = models.DateField(verbose_name=_("Event Date"),
blank=True, null=True)
event = models.CharField(max_length=1, choices=SUBSCRITIONEVENTS,
verbose_name=_('Event'))
def __str__(self):
return self.event
class Meta:
app_label = "subscriptions"
verbose_name = _('Subscription Event')
verbose_name_plural = _('Subscription Events')
class SubscriptionType(models.Model):
product_type = models.ForeignKey('crm.ProductType',
verbose_name=_('Product Type'),
on_delete=models.deletion.SET_NULL,
null=True,
blank=True)
cancellation_period = models.IntegerField(verbose_name=_("Cancellation Period (months)"),
blank=True,
null=True)
automatic_contract_extension = models.IntegerField(verbose_name=_("Automatic Contract Extension (months)"),
blank=True,
null=True)
automatic_contract_extension_reminder = models.IntegerField(
verbose_name=_("Automatic Contract Extension Reminder (days)"),
blank=True,
null=True)
minimum_duration = models.IntegerField(verbose_name=_("Minimum Contract Duration"),
blank=True,
null=True)
payment_interval = models.IntegerField(verbose_name=_("Payment Interval (days)"),
blank=True,
null=True)
contract_document = FileBrowseField(verbose_name=_("Contract Documents"),
blank=True,
null=True,
max_length=200)
class Meta:
app_label = "subscriptions"
verbose_name = _('Subscription Type')
verbose_name_plural = _('Subscription Types')
|
{
"content_hash": "24ecf75901eba4728d46d168be94a2f5",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 111,
"avg_line_length": 42.704081632653065,
"alnum_prop": 0.565352449223417,
"repo_name": "scaphilo/koalixcrm",
"id": "c9ba641f24423d5fcfd77421f3df5f0ccc63ce9c",
"size": "4210",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "koalixcrm/subscriptions/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "8486"
},
{
"name": "Python",
"bytes": "879730"
},
{
"name": "Shell",
"bytes": "1965"
},
{
"name": "XSLT",
"bytes": "600397"
}
],
"symlink_target": ""
}
|
import os
import os.path as pt
from scripttest import TestFileEnvironment
def before_scenario(context, _):
root_dir = pt.abspath(pt.join(pt.dirname(__file__), '..'))
path = ":" + pt.join(root_dir, 'bin')
tmp = pt.join(root_dir, "tmp", "feature")
python_path = pt.join(root_dir, 'vendor', 'python', 'lib', 'python2.7', 'site-packages')
os.environ['PATH'] = path + ":" + os.environ['PATH']
os.environ['PYTHONPATH'] = python_path
context.env = TestFileEnvironment(base_path = tmp)
|
{
"content_hash": "94057c33e2407d46188f8c3e7546a8e2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 92,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.6132075471698113,
"repo_name": "michaelbarton/command-line-interface",
"id": "8d8b2393e5080bd2430f08447c20eb9e5eeab43a",
"size": "530",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "features/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "41604"
},
{
"name": "Makefile",
"bytes": "2845"
},
{
"name": "Python",
"bytes": "34153"
},
{
"name": "Shell",
"bytes": "1007"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import, print_function
"""Tests for the general importer functionality.
"""
import os
import re
import shutil
import unicodedata
import sys
import stat
from six import StringIO
from tempfile import mkstemp
from zipfile import ZipFile
from tarfile import TarFile
from mock import patch, Mock
import unittest
from test import _common
from beets.util import displayable_path, bytestring_path, py3_path
from test.helper import TestHelper, has_program, capture_log
from test.helper import ImportSessionFixture
from beets import importer
from beets.importer import albums_in_dir
from mediafile import MediaFile
from beets import autotag
from beets.autotag import AlbumInfo, TrackInfo, AlbumMatch
from beets import config
from beets import logging
from beets import util
class AutotagStub(object):
"""Stub out MusicBrainz album and track matcher and control what the
autotagger returns.
"""
NONE = 'NONE'
IDENT = 'IDENT'
GOOD = 'GOOD'
BAD = 'BAD'
MISSING = 'MISSING'
"""Generate an album match for all but one track
"""
length = 2
matching = IDENT
def install(self):
self.mb_match_album = autotag.mb.match_album
self.mb_match_track = autotag.mb.match_track
self.mb_album_for_id = autotag.mb.album_for_id
self.mb_track_for_id = autotag.mb.track_for_id
autotag.mb.match_album = self.match_album
autotag.mb.match_track = self.match_track
autotag.mb.album_for_id = self.album_for_id
autotag.mb.track_for_id = self.track_for_id
return self
def restore(self):
autotag.mb.match_album = self.mb_match_album
autotag.mb.match_track = self.mb_match_track
autotag.mb.album_for_id = self.mb_album_for_id
autotag.mb.track_for_id = self.mb_track_for_id
def match_album(self, albumartist, album, tracks, extra_tags):
if self.matching == self.IDENT:
yield self._make_album_match(albumartist, album, tracks)
elif self.matching == self.GOOD:
for i in range(self.length):
yield self._make_album_match(albumartist, album, tracks, i)
elif self.matching == self.BAD:
for i in range(self.length):
yield self._make_album_match(albumartist, album, tracks, i + 1)
elif self.matching == self.MISSING:
yield self._make_album_match(albumartist, album, tracks, missing=1)
def match_track(self, artist, title):
yield TrackInfo(
title=title.replace('Tag', 'Applied'),
track_id=u'trackid',
artist=artist.replace('Tag', 'Applied'),
artist_id=u'artistid',
length=1,
index=0,
)
def album_for_id(self, mbid):
return None
def track_for_id(self, mbid):
return None
def _make_track_match(self, artist, album, number):
return TrackInfo(
title=u'Applied Title %d' % number,
track_id=u'match %d' % number,
artist=artist,
length=1,
index=0,
)
def _make_album_match(self, artist, album, tracks, distance=0, missing=0):
if distance:
id = ' ' + 'M' * distance
else:
id = ''
if artist is None:
artist = u"Various Artists"
else:
artist = artist.replace('Tag', 'Applied') + id
album = album.replace('Tag', 'Applied') + id
track_infos = []
for i in range(tracks - missing):
track_infos.append(self._make_track_match(artist, album, i + 1))
return AlbumInfo(
artist=artist,
album=album,
tracks=track_infos,
va=False,
album_id=u'albumid' + id,
artist_id=u'artistid' + id,
albumtype=u'soundtrack'
)
class ImportHelper(TestHelper):
"""Provides tools to setup a library, a directory containing files that are
to be imported and an import session. The class also provides stubs for the
autotagging library and several assertions for the library.
"""
def setup_beets(self, disk=False):
super(ImportHelper, self).setup_beets(disk)
self.lib.path_formats = [
(u'default', os.path.join('$artist', '$album', '$title')),
(u'singleton:true', os.path.join('singletons', '$title')),
(u'comp:true', os.path.join('compilations', '$album', '$title')),
]
def _create_import_dir(self, count=3):
"""Creates a directory with media files to import.
Sets ``self.import_dir`` to the path of the directory. Also sets
``self.import_media`` to a list :class:`MediaFile` for all the files in
the directory.
The directory has following layout
the_album/
track_1.mp3
track_2.mp3
track_3.mp3
:param count: Number of files to create
"""
self.import_dir = os.path.join(self.temp_dir, b'testsrcdir')
if os.path.isdir(self.import_dir):
shutil.rmtree(self.import_dir)
album_path = os.path.join(self.import_dir, b'the_album')
os.makedirs(album_path)
resource_path = os.path.join(_common.RSRC, b'full.mp3')
metadata = {
'artist': u'Tag Artist',
'album': u'Tag Album',
'albumartist': None,
'mb_trackid': None,
'mb_albumid': None,
'comp': None
}
self.media_files = []
for i in range(count):
# Copy files
medium_path = os.path.join(
album_path,
bytestring_path('track_%d.mp3' % (i + 1))
)
shutil.copy(resource_path, medium_path)
medium = MediaFile(medium_path)
# Set metadata
metadata['track'] = i + 1
metadata['title'] = u'Tag Title %d' % (i + 1)
for attr in metadata:
setattr(medium, attr, metadata[attr])
medium.save()
self.media_files.append(medium)
self.import_media = self.media_files
def _setup_import_session(self, import_dir=None, delete=False,
threaded=False, copy=True, singletons=False,
move=False, autotag=True, link=False,
hardlink=False):
config['import']['copy'] = copy
config['import']['delete'] = delete
config['import']['timid'] = True
config['threaded'] = False
config['import']['singletons'] = singletons
config['import']['move'] = move
config['import']['autotag'] = autotag
config['import']['resume'] = False
config['import']['link'] = link
config['import']['hardlink'] = hardlink
self.importer = ImportSessionFixture(
self.lib, loghandler=None, query=None,
paths=[import_dir or self.import_dir]
)
def assert_file_in_lib(self, *segments):
"""Join the ``segments`` and assert that this path exists in the library
directory
"""
self.assertExists(os.path.join(self.libdir, *segments))
def assert_file_not_in_lib(self, *segments):
"""Join the ``segments`` and assert that this path exists in the library
directory
"""
self.assertNotExists(os.path.join(self.libdir, *segments))
def assert_lib_dir_empty(self):
self.assertEqual(len(os.listdir(self.libdir)), 0)
@_common.slow_test()
class NonAutotaggedImportTest(_common.TestCase, ImportHelper):
def setUp(self):
self.setup_beets(disk=True)
self._create_import_dir(2)
self._setup_import_session(autotag=False)
def tearDown(self):
self.teardown_beets()
def test_album_created_with_track_artist(self):
self.importer.run()
albums = self.lib.albums()
self.assertEqual(len(albums), 1)
self.assertEqual(albums[0].albumartist, u'Tag Artist')
def test_import_copy_arrives(self):
self.importer.run()
for mediafile in self.import_media:
self.assert_file_in_lib(
b'Tag Artist', b'Tag Album',
util.bytestring_path('{0}.mp3'.format(mediafile.title)))
def test_threaded_import_copy_arrives(self):
config['threaded'] = True
self.importer.run()
for mediafile in self.import_media:
self.assert_file_in_lib(
b'Tag Artist', b'Tag Album',
util.bytestring_path('{0}.mp3'.format(mediafile.title)))
def test_import_with_move_deletes_import_files(self):
config['import']['move'] = True
for mediafile in self.import_media:
self.assertExists(mediafile.path)
self.importer.run()
for mediafile in self.import_media:
self.assertNotExists(mediafile.path)
def test_import_with_move_prunes_directory_empty(self):
config['import']['move'] = True
self.assertExists(os.path.join(self.import_dir, b'the_album'))
self.importer.run()
self.assertNotExists(os.path.join(self.import_dir, b'the_album'))
def test_import_with_move_prunes_with_extra_clutter(self):
f = open(os.path.join(self.import_dir, b'the_album', b'alog.log'), 'w')
f.close()
config['clutter'] = ['*.log']
config['import']['move'] = True
self.assertExists(os.path.join(self.import_dir, b'the_album'))
self.importer.run()
self.assertNotExists(os.path.join(self.import_dir, b'the_album'))
def test_threaded_import_move_arrives(self):
config['import']['move'] = True
config['import']['threaded'] = True
self.importer.run()
for mediafile in self.import_media:
self.assert_file_in_lib(
b'Tag Artist', b'Tag Album',
util.bytestring_path('{0}.mp3'.format(mediafile.title)))
def test_threaded_import_move_deletes_import(self):
config['import']['move'] = True
config['threaded'] = True
self.importer.run()
for mediafile in self.import_media:
self.assertNotExists(mediafile.path)
def test_import_without_delete_retains_files(self):
config['import']['delete'] = False
self.importer.run()
for mediafile in self.import_media:
self.assertExists(mediafile.path)
def test_import_with_delete_removes_files(self):
config['import']['delete'] = True
self.importer.run()
for mediafile in self.import_media:
self.assertNotExists(mediafile.path)
def test_import_with_delete_prunes_directory_empty(self):
config['import']['delete'] = True
self.assertExists(os.path.join(self.import_dir, b'the_album'))
self.importer.run()
self.assertNotExists(os.path.join(self.import_dir, b'the_album'))
@unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks")
def test_import_link_arrives(self):
config['import']['link'] = True
self.importer.run()
for mediafile in self.import_media:
filename = os.path.join(
self.libdir,
b'Tag Artist', b'Tag Album',
util.bytestring_path('{0}.mp3'.format(mediafile.title))
)
self.assertExists(filename)
self.assertTrue(os.path.islink(filename))
self.assert_equal_path(
util.bytestring_path(os.readlink(filename)),
mediafile.path
)
@unittest.skipUnless(_common.HAVE_HARDLINK, "need hardlinks")
def test_import_hardlink_arrives(self):
config['import']['hardlink'] = True
self.importer.run()
for mediafile in self.import_media:
filename = os.path.join(
self.libdir,
b'Tag Artist', b'Tag Album',
util.bytestring_path('{0}.mp3'.format(mediafile.title))
)
self.assertExists(filename)
s1 = os.stat(mediafile.path)
s2 = os.stat(filename)
self.assertTrue(
(s1[stat.ST_INO], s1[stat.ST_DEV]) ==
(s2[stat.ST_INO], s2[stat.ST_DEV])
)
def create_archive(session):
(handle, path) = mkstemp(dir=py3_path(session.temp_dir))
os.close(handle)
archive = ZipFile(py3_path(path), mode='w')
archive.write(os.path.join(_common.RSRC, b'full.mp3'),
'full.mp3')
archive.close()
path = bytestring_path(path)
return path
class RmTempTest(unittest.TestCase, ImportHelper, _common.Assertions):
"""Tests that temporarily extracted archives are properly removed
after usage.
"""
def setUp(self):
self.setup_beets()
self.want_resume = False
self.config['incremental'] = False
self._old_home = None
def tearDown(self):
self.teardown_beets()
def test_rm(self):
zip_path = create_archive(self)
archive_task = importer.ArchiveImportTask(zip_path)
archive_task.extract()
tmp_path = archive_task.toppath
self._setup_import_session(autotag=False, import_dir=tmp_path)
self.assertExists(tmp_path)
archive_task.finalize(self)
self.assertNotExists(tmp_path)
class ImportZipTest(unittest.TestCase, ImportHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
def test_import_zip(self):
zip_path = create_archive(self)
self.assertEqual(len(self.lib.items()), 0)
self.assertEqual(len(self.lib.albums()), 0)
self._setup_import_session(autotag=False, import_dir=zip_path)
self.importer.run()
self.assertEqual(len(self.lib.items()), 1)
self.assertEqual(len(self.lib.albums()), 1)
class ImportTarTest(ImportZipTest):
def create_archive(self):
(handle, path) = mkstemp(dir=self.temp_dir)
os.close(handle)
archive = TarFile(py3_path(path), mode='w')
archive.add(os.path.join(_common.RSRC, b'full.mp3'),
'full.mp3')
archive.close()
return path
@unittest.skipIf(not has_program('unrar'), u'unrar program not found')
class ImportRarTest(ImportZipTest):
def create_archive(self):
return os.path.join(_common.RSRC, b'archive.rar')
@unittest.skip('Implement me!')
class ImportPasswordRarTest(ImportZipTest):
def create_archive(self):
return os.path.join(_common.RSRC, b'password.rar')
class ImportSingletonTest(_common.TestCase, ImportHelper):
"""Test ``APPLY`` and ``ASIS`` choices for an import session with singletons
config set to True.
"""
def setUp(self):
self.setup_beets()
self._create_import_dir(1)
self._setup_import_session()
config['import']['singletons'] = True
self.matcher = AutotagStub().install()
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_apply_asis_adds_track(self):
self.assertEqual(self.lib.items().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.items().get().title, u'Tag Title 1')
def test_apply_asis_does_not_add_album(self):
self.assertEqual(self.lib.albums().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.albums().get(), None)
def test_apply_asis_adds_singleton_path(self):
self.assert_lib_dir_empty()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assert_file_in_lib(b'singletons', b'Tag Title 1.mp3')
def test_apply_candidate_adds_track(self):
self.assertEqual(self.lib.items().get(), None)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().title, u'Applied Title 1')
def test_apply_candidate_does_not_add_album(self):
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.albums().get(), None)
def test_apply_candidate_adds_singleton_path(self):
self.assert_lib_dir_empty()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assert_file_in_lib(b'singletons', b'Applied Title 1.mp3')
def test_skip_does_not_add_first_track(self):
self.importer.add_choice(importer.action.SKIP)
self.importer.run()
self.assertEqual(self.lib.items().get(), None)
def test_skip_adds_other_tracks(self):
self._create_import_dir(2)
self.importer.add_choice(importer.action.SKIP)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(len(self.lib.items()), 1)
def test_import_single_files(self):
resource_path = os.path.join(_common.RSRC, b'empty.mp3')
single_path = os.path.join(self.import_dir, b'track_2.mp3')
shutil.copy(resource_path, single_path)
import_files = [
os.path.join(self.import_dir, b'the_album'),
single_path
]
self._setup_import_session(singletons=False)
self.importer.paths = import_files
self.importer.add_choice(importer.action.ASIS)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(len(self.lib.items()), 2)
self.assertEqual(len(self.lib.albums()), 2)
def test_set_fields(self):
genre = u"\U0001F3B7 Jazz"
collection = u"To Listen"
config['import']['set_fields'] = {
u'collection': collection,
u'genre': genre
}
# As-is item import.
self.assertEqual(self.lib.albums().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
for item in self.lib.items():
item.load() # TODO: Not sure this is necessary.
self.assertEqual(item.genre, genre)
self.assertEqual(item.collection, collection)
# Remove item from library to test again with APPLY choice.
item.remove()
# Autotagged.
self.assertEqual(self.lib.albums().get(), None)
self.importer.clear_choices()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
for item in self.lib.items():
item.load()
self.assertEqual(item.genre, genre)
self.assertEqual(item.collection, collection)
class ImportTest(_common.TestCase, ImportHelper):
"""Test APPLY, ASIS and SKIP choices.
"""
def setUp(self):
self.setup_beets()
self._create_import_dir(1)
self._setup_import_session()
self.matcher = AutotagStub().install()
self.matcher.macthin = AutotagStub.GOOD
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_apply_asis_adds_album(self):
self.assertEqual(self.lib.albums().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.albums().get().album, u'Tag Album')
def test_apply_asis_adds_tracks(self):
self.assertEqual(self.lib.items().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.items().get().title, u'Tag Title 1')
def test_apply_asis_adds_album_path(self):
self.assert_lib_dir_empty()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assert_file_in_lib(
b'Tag Artist', b'Tag Album', b'Tag Title 1.mp3')
def test_apply_candidate_adds_album(self):
self.assertEqual(self.lib.albums().get(), None)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.albums().get().album, u'Applied Album')
def test_apply_candidate_adds_tracks(self):
self.assertEqual(self.lib.items().get(), None)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().title, u'Applied Title 1')
def test_apply_candidate_adds_album_path(self):
self.assert_lib_dir_empty()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assert_file_in_lib(
b'Applied Artist', b'Applied Album', b'Applied Title 1.mp3')
def test_apply_from_scratch_removes_other_metadata(self):
config['import']['from_scratch'] = True
for mediafile in self.import_media:
mediafile.genre = u'Tag Genre'
mediafile.save()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().genre, u'')
def test_apply_from_scratch_keeps_format(self):
config['import']['from_scratch'] = True
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().format, u'MP3')
def test_apply_from_scratch_keeps_bitrate(self):
config['import']['from_scratch'] = True
bitrate = 80000
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().bitrate, bitrate)
def test_apply_with_move_deletes_import(self):
config['import']['move'] = True
import_file = os.path.join(
self.import_dir, b'the_album', b'track_1.mp3')
self.assertExists(import_file)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertNotExists(import_file)
def test_apply_with_delete_deletes_import(self):
config['import']['delete'] = True
import_file = os.path.join(self.import_dir,
b'the_album', b'track_1.mp3')
self.assertExists(import_file)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertNotExists(import_file)
def test_skip_does_not_add_track(self):
self.importer.add_choice(importer.action.SKIP)
self.importer.run()
self.assertEqual(self.lib.items().get(), None)
def test_skip_non_album_dirs(self):
self.assertTrue(os.path.isdir(
os.path.join(self.import_dir, b'the_album')))
self.touch(b'cruft', dir=self.import_dir)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(len(self.lib.albums()), 1)
def test_unmatched_tracks_not_added(self):
self._create_import_dir(2)
self.matcher.matching = self.matcher.MISSING
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(len(self.lib.items()), 1)
def test_empty_directory_warning(self):
import_dir = os.path.join(self.temp_dir, b'empty')
self.touch(b'non-audio', dir=import_dir)
self._setup_import_session(import_dir=import_dir)
with capture_log() as logs:
self.importer.run()
import_dir = displayable_path(import_dir)
self.assertIn(u'No files imported from {0}'.format(import_dir), logs)
def test_empty_directory_singleton_warning(self):
import_dir = os.path.join(self.temp_dir, b'empty')
self.touch(b'non-audio', dir=import_dir)
self._setup_import_session(import_dir=import_dir, singletons=True)
with capture_log() as logs:
self.importer.run()
import_dir = displayable_path(import_dir)
self.assertIn(u'No files imported from {0}'.format(import_dir), logs)
def test_asis_no_data_source(self):
self.assertEqual(self.lib.items().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
with self.assertRaises(AttributeError):
self.lib.items().get().data_source
def test_set_fields(self):
genre = u"\U0001F3B7 Jazz"
collection = u"To Listen"
config['import']['set_fields'] = {
u'collection': collection,
u'genre': genre
}
# As-is album import.
self.assertEqual(self.lib.albums().get(), None)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
for album in self.lib.albums():
album.load() # TODO: Not sure this is necessary.
self.assertEqual(album.genre, genre)
self.assertEqual(album.collection, collection)
# Remove album from library to test again with APPLY choice.
album.remove()
# Autotagged.
self.assertEqual(self.lib.albums().get(), None)
self.importer.clear_choices()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
for album in self.lib.albums():
album.load()
self.assertEqual(album.genre, genre)
self.assertEqual(album.collection, collection)
class ImportTracksTest(_common.TestCase, ImportHelper):
"""Test TRACKS and APPLY choice.
"""
def setUp(self):
self.setup_beets()
self._create_import_dir(1)
self._setup_import_session()
self.matcher = AutotagStub().install()
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_apply_tracks_adds_singleton_track(self):
self.assertEqual(self.lib.items().get(), None)
self.assertEqual(self.lib.albums().get(), None)
self.importer.add_choice(importer.action.TRACKS)
self.importer.add_choice(importer.action.APPLY)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().title, u'Applied Title 1')
self.assertEqual(self.lib.albums().get(), None)
def test_apply_tracks_adds_singleton_path(self):
self.assert_lib_dir_empty()
self.importer.add_choice(importer.action.TRACKS)
self.importer.add_choice(importer.action.APPLY)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assert_file_in_lib(b'singletons', b'Applied Title 1.mp3')
class ImportCompilationTest(_common.TestCase, ImportHelper):
"""Test ASIS import of a folder containing tracks with different artists.
"""
def setUp(self):
self.setup_beets()
self._create_import_dir(3)
self._setup_import_session()
self.matcher = AutotagStub().install()
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_asis_homogenous_sets_albumartist(self):
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.albums().get().albumartist, u'Tag Artist')
for item in self.lib.items():
self.assertEqual(item.albumartist, u'Tag Artist')
def test_asis_heterogenous_sets_various_albumartist(self):
self.import_media[0].artist = u'Other Artist'
self.import_media[0].save()
self.import_media[1].artist = u'Another Artist'
self.import_media[1].save()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.albums().get().albumartist,
u'Various Artists')
for item in self.lib.items():
self.assertEqual(item.albumartist, u'Various Artists')
def test_asis_heterogenous_sets_sompilation(self):
self.import_media[0].artist = u'Other Artist'
self.import_media[0].save()
self.import_media[1].artist = u'Another Artist'
self.import_media[1].save()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
for item in self.lib.items():
self.assertTrue(item.comp)
def test_asis_sets_majority_albumartist(self):
self.import_media[0].artist = u'Other Artist'
self.import_media[0].save()
self.import_media[1].artist = u'Other Artist'
self.import_media[1].save()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.albums().get().albumartist, u'Other Artist')
for item in self.lib.items():
self.assertEqual(item.albumartist, u'Other Artist')
def test_asis_albumartist_tag_sets_albumartist(self):
self.import_media[0].artist = u'Other Artist'
self.import_media[1].artist = u'Another Artist'
for mediafile in self.import_media:
mediafile.albumartist = u'Album Artist'
mediafile.mb_albumartistid = u'Album Artist ID'
mediafile.save()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.albums().get().albumartist, u'Album Artist')
self.assertEqual(self.lib.albums().get().mb_albumartistid,
u'Album Artist ID')
for item in self.lib.items():
self.assertEqual(item.albumartist, u'Album Artist')
self.assertEqual(item.mb_albumartistid, u'Album Artist ID')
class ImportExistingTest(_common.TestCase, ImportHelper):
"""Test importing files that are already in the library directory.
"""
def setUp(self):
self.setup_beets()
self._create_import_dir(1)
self.matcher = AutotagStub().install()
self._setup_import_session()
self.setup_importer = self.importer
self.setup_importer.default_choice = importer.action.APPLY
self._setup_import_session(import_dir=self.libdir)
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_does_not_duplicate_item(self):
self.setup_importer.run()
self.assertEqual(len((self.lib.items())), 1)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(len((self.lib.items())), 1)
def test_does_not_duplicate_album(self):
self.setup_importer.run()
self.assertEqual(len((self.lib.albums())), 1)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(len((self.lib.albums())), 1)
def test_does_not_duplicate_singleton_track(self):
self.setup_importer.add_choice(importer.action.TRACKS)
self.setup_importer.add_choice(importer.action.APPLY)
self.setup_importer.run()
self.assertEqual(len((self.lib.items())), 1)
self.importer.add_choice(importer.action.TRACKS)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(len((self.lib.items())), 1)
def test_asis_updates_metadata(self):
self.setup_importer.run()
medium = MediaFile(self.lib.items().get().path)
medium.title = u'New Title'
medium.save()
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assertEqual(self.lib.items().get().title, u'New Title')
def test_asis_updated_moves_file(self):
self.setup_importer.run()
medium = MediaFile(self.lib.items().get().path)
medium.title = u'New Title'
medium.save()
old_path = os.path.join(b'Applied Artist', b'Applied Album',
b'Applied Title 1.mp3')
self.assert_file_in_lib(old_path)
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assert_file_in_lib(b'Applied Artist', b'Applied Album',
b'New Title.mp3')
self.assert_file_not_in_lib(old_path)
def test_asis_updated_without_copy_does_not_move_file(self):
self.setup_importer.run()
medium = MediaFile(self.lib.items().get().path)
medium.title = u'New Title'
medium.save()
old_path = os.path.join(b'Applied Artist', b'Applied Album',
b'Applied Title 1.mp3')
self.assert_file_in_lib(old_path)
config['import']['copy'] = False
self.importer.add_choice(importer.action.ASIS)
self.importer.run()
self.assert_file_not_in_lib(b'Applied Artist', b'Applied Album',
b'New Title.mp3')
self.assert_file_in_lib(old_path)
def test_outside_file_is_copied(self):
config['import']['copy'] = False
self.setup_importer.run()
self.assert_equal_path(self.lib.items().get().path,
self.import_media[0].path)
config['import']['copy'] = True
self._setup_import_session()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
new_path = os.path.join(b'Applied Artist', b'Applied Album',
b'Applied Title 1.mp3')
self.assert_file_in_lib(new_path)
self.assert_equal_path(self.lib.items().get().path,
os.path.join(self.libdir, new_path))
def test_outside_file_is_moved(self):
config['import']['copy'] = False
self.setup_importer.run()
self.assert_equal_path(self.lib.items().get().path,
self.import_media[0].path)
self._setup_import_session(move=True)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertNotExists(self.import_media[0].path)
class GroupAlbumsImportTest(_common.TestCase, ImportHelper):
def setUp(self):
self.setup_beets()
self._create_import_dir(3)
self.matcher = AutotagStub().install()
self.matcher.matching = AutotagStub.NONE
self._setup_import_session()
# Split tracks into two albums and use both as-is
self.importer.add_choice(importer.action.ALBUMS)
self.importer.add_choice(importer.action.ASIS)
self.importer.add_choice(importer.action.ASIS)
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_add_album_for_different_artist_and_different_album(self):
self.import_media[0].artist = u"Artist B"
self.import_media[0].album = u"Album B"
self.import_media[0].save()
self.importer.run()
albums = set([album.album for album in self.lib.albums()])
self.assertEqual(albums, set(['Album B', 'Tag Album']))
def test_add_album_for_different_artist_and_same_albumartist(self):
self.import_media[0].artist = u"Artist B"
self.import_media[0].albumartist = u"Album Artist"
self.import_media[0].save()
self.import_media[1].artist = u"Artist C"
self.import_media[1].albumartist = u"Album Artist"
self.import_media[1].save()
self.importer.run()
artists = set([album.albumartist for album in self.lib.albums()])
self.assertEqual(artists, set(['Album Artist', 'Tag Artist']))
def test_add_album_for_same_artist_and_different_album(self):
self.import_media[0].album = u"Album B"
self.import_media[0].save()
self.importer.run()
albums = set([album.album for album in self.lib.albums()])
self.assertEqual(albums, set(['Album B', 'Tag Album']))
def test_add_album_for_same_album_and_different_artist(self):
self.import_media[0].artist = u"Artist B"
self.import_media[0].save()
self.importer.run()
artists = set([album.albumartist for album in self.lib.albums()])
self.assertEqual(artists, set(['Artist B', 'Tag Artist']))
def test_incremental(self):
config['import']['incremental'] = True
self.import_media[0].album = u"Album B"
self.import_media[0].save()
self.importer.run()
albums = set([album.album for album in self.lib.albums()])
self.assertEqual(albums, set(['Album B', 'Tag Album']))
class GlobalGroupAlbumsImportTest(GroupAlbumsImportTest):
def setUp(self):
super(GlobalGroupAlbumsImportTest, self).setUp()
self.importer.clear_choices()
self.importer.default_choice = importer.action.ASIS
config['import']['group_albums'] = True
class ChooseCandidateTest(_common.TestCase, ImportHelper):
def setUp(self):
self.setup_beets()
self._create_import_dir(1)
self._setup_import_session()
self.matcher = AutotagStub().install()
self.matcher.matching = AutotagStub.BAD
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def test_choose_first_candidate(self):
self.importer.add_choice(1)
self.importer.run()
self.assertEqual(self.lib.albums().get().album, u'Applied Album M')
def test_choose_second_candidate(self):
self.importer.add_choice(2)
self.importer.run()
self.assertEqual(self.lib.albums().get().album, u'Applied Album MM')
class InferAlbumDataTest(_common.TestCase):
def setUp(self):
super(InferAlbumDataTest, self).setUp()
i1 = _common.item()
i2 = _common.item()
i3 = _common.item()
i1.title = u'first item'
i2.title = u'second item'
i3.title = u'third item'
i1.comp = i2.comp = i3.comp = False
i1.albumartist = i2.albumartist = i3.albumartist = ''
i1.mb_albumartistid = i2.mb_albumartistid = i3.mb_albumartistid = ''
self.items = [i1, i2, i3]
self.task = importer.ImportTask(paths=['a path'], toppath='top path',
items=self.items)
def test_asis_homogenous_single_artist(self):
self.task.set_choice(importer.action.ASIS)
self.task.align_album_level_fields()
self.assertFalse(self.items[0].comp)
self.assertEqual(self.items[0].albumartist, self.items[2].artist)
def test_asis_heterogenous_va(self):
self.items[0].artist = u'another artist'
self.items[1].artist = u'some other artist'
self.task.set_choice(importer.action.ASIS)
self.task.align_album_level_fields()
self.assertTrue(self.items[0].comp)
self.assertEqual(self.items[0].albumartist, u'Various Artists')
def test_asis_comp_applied_to_all_items(self):
self.items[0].artist = u'another artist'
self.items[1].artist = u'some other artist'
self.task.set_choice(importer.action.ASIS)
self.task.align_album_level_fields()
for item in self.items:
self.assertTrue(item.comp)
self.assertEqual(item.albumartist, u'Various Artists')
def test_asis_majority_artist_single_artist(self):
self.items[0].artist = u'another artist'
self.task.set_choice(importer.action.ASIS)
self.task.align_album_level_fields()
self.assertFalse(self.items[0].comp)
self.assertEqual(self.items[0].albumartist, self.items[2].artist)
def test_asis_track_albumartist_override(self):
self.items[0].artist = u'another artist'
self.items[1].artist = u'some other artist'
for item in self.items:
item.albumartist = u'some album artist'
item.mb_albumartistid = u'some album artist id'
self.task.set_choice(importer.action.ASIS)
self.task.align_album_level_fields()
self.assertEqual(self.items[0].albumartist,
u'some album artist')
self.assertEqual(self.items[0].mb_albumartistid,
u'some album artist id')
def test_apply_gets_artist_and_id(self):
self.task.set_choice(AlbumMatch(0, None, {}, set(), set())) # APPLY
self.task.align_album_level_fields()
self.assertEqual(self.items[0].albumartist, self.items[0].artist)
self.assertEqual(self.items[0].mb_albumartistid,
self.items[0].mb_artistid)
def test_apply_lets_album_values_override(self):
for item in self.items:
item.albumartist = u'some album artist'
item.mb_albumartistid = u'some album artist id'
self.task.set_choice(AlbumMatch(0, None, {}, set(), set())) # APPLY
self.task.align_album_level_fields()
self.assertEqual(self.items[0].albumartist,
u'some album artist')
self.assertEqual(self.items[0].mb_albumartistid,
u'some album artist id')
def test_small_single_artist_album(self):
self.items = [self.items[0]]
self.task.items = self.items
self.task.set_choice(importer.action.ASIS)
self.task.align_album_level_fields()
self.assertFalse(self.items[0].comp)
def test_album_info(*args, **kwargs):
"""Create an AlbumInfo object for testing.
"""
track_info = TrackInfo(
title=u'new title',
track_id=u'trackid',
index=0,
)
album_info = AlbumInfo(
artist=u'artist',
album=u'album',
tracks=[track_info],
album_id=u'albumid',
artist_id=u'artistid',
)
return iter([album_info])
@patch('beets.autotag.mb.match_album', Mock(side_effect=test_album_info))
class ImportDuplicateAlbumTest(unittest.TestCase, TestHelper,
_common.Assertions):
def setUp(self):
self.setup_beets()
# Original album
self.add_album_fixture(albumartist=u'artist', album=u'album')
# Create import session
self.importer = self.create_importer()
config['import']['autotag'] = True
def tearDown(self):
self.teardown_beets()
def test_remove_duplicate_album(self):
item = self.lib.items().get()
self.assertEqual(item.title, u't\xeftle 0')
self.assertExists(item.path)
self.importer.default_resolution = self.importer.Resolution.REMOVE
self.importer.run()
self.assertNotExists(item.path)
self.assertEqual(len(self.lib.albums()), 1)
self.assertEqual(len(self.lib.items()), 1)
item = self.lib.items().get()
self.assertEqual(item.title, u'new title')
def test_no_autotag_keeps_duplicate_album(self):
config['import']['autotag'] = False
item = self.lib.items().get()
self.assertEqual(item.title, u't\xeftle 0')
self.assertExists(item.path)
# Imported item has the same artist and album as the one in the
# library.
import_file = os.path.join(self.importer.paths[0],
b'album 0', b'track 0.mp3')
import_file = MediaFile(import_file)
import_file.artist = item['artist']
import_file.albumartist = item['artist']
import_file.album = item['album']
import_file.title = 'new title'
self.importer.default_resolution = self.importer.Resolution.REMOVE
self.importer.run()
self.assertExists(item.path)
self.assertEqual(len(self.lib.albums()), 2)
self.assertEqual(len(self.lib.items()), 2)
def test_keep_duplicate_album(self):
self.importer.default_resolution = self.importer.Resolution.KEEPBOTH
self.importer.run()
self.assertEqual(len(self.lib.albums()), 2)
self.assertEqual(len(self.lib.items()), 2)
def test_skip_duplicate_album(self):
item = self.lib.items().get()
self.assertEqual(item.title, u't\xeftle 0')
self.importer.default_resolution = self.importer.Resolution.SKIP
self.importer.run()
self.assertEqual(len(self.lib.albums()), 1)
self.assertEqual(len(self.lib.items()), 1)
item = self.lib.items().get()
self.assertEqual(item.title, u't\xeftle 0')
def test_merge_duplicate_album(self):
self.importer.default_resolution = self.importer.Resolution.MERGE
self.importer.run()
self.assertEqual(len(self.lib.albums()), 1)
def test_twice_in_import_dir(self):
self.skipTest('write me')
def add_album_fixture(self, **kwargs):
# TODO move this into upstream
album = super(ImportDuplicateAlbumTest, self).add_album_fixture()
album.update(kwargs)
album.store()
return album
def test_track_info(*args, **kwargs):
return iter([TrackInfo(
artist=u'artist', title=u'title',
track_id=u'new trackid', index=0,)])
@patch('beets.autotag.mb.match_track', Mock(side_effect=test_track_info))
class ImportDuplicateSingletonTest(unittest.TestCase, TestHelper,
_common.Assertions):
def setUp(self):
self.setup_beets()
# Original file in library
self.add_item_fixture(artist=u'artist', title=u'title',
mb_trackid='old trackid')
# Import session
self.importer = self.create_importer()
config['import']['autotag'] = True
config['import']['singletons'] = True
def tearDown(self):
self.teardown_beets()
def test_remove_duplicate(self):
item = self.lib.items().get()
self.assertEqual(item.mb_trackid, u'old trackid')
self.assertExists(item.path)
self.importer.default_resolution = self.importer.Resolution.REMOVE
self.importer.run()
self.assertNotExists(item.path)
self.assertEqual(len(self.lib.items()), 1)
item = self.lib.items().get()
self.assertEqual(item.mb_trackid, u'new trackid')
def test_keep_duplicate(self):
self.assertEqual(len(self.lib.items()), 1)
self.importer.default_resolution = self.importer.Resolution.KEEPBOTH
self.importer.run()
self.assertEqual(len(self.lib.items()), 2)
def test_skip_duplicate(self):
item = self.lib.items().get()
self.assertEqual(item.mb_trackid, u'old trackid')
self.importer.default_resolution = self.importer.Resolution.SKIP
self.importer.run()
self.assertEqual(len(self.lib.items()), 1)
item = self.lib.items().get()
self.assertEqual(item.mb_trackid, u'old trackid')
def test_twice_in_import_dir(self):
self.skipTest('write me')
def add_item_fixture(self, **kwargs):
# Move this to TestHelper
item = self.add_item_fixtures()[0]
item.update(kwargs)
item.store()
return item
class TagLogTest(_common.TestCase):
def test_tag_log_line(self):
sio = StringIO()
handler = logging.StreamHandler(sio)
session = _common.import_session(loghandler=handler)
session.tag_log('status', 'path')
self.assertIn('status path', sio.getvalue())
def test_tag_log_unicode(self):
sio = StringIO()
handler = logging.StreamHandler(sio)
session = _common.import_session(loghandler=handler)
session.tag_log('status', u'caf\xe9') # send unicode
self.assertIn(u'status caf\xe9', sio.getvalue())
class ResumeImportTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
@patch('beets.plugins.send')
def test_resume_album(self, plugins_send):
self.importer = self.create_importer(album_count=2)
self.config['import']['resume'] = True
# Aborts import after one album. This also ensures that we skip
# the first album in the second try.
def raise_exception(event, **kwargs):
if event == 'album_imported':
raise importer.ImportAbort
plugins_send.side_effect = raise_exception
self.importer.run()
self.assertEqual(len(self.lib.albums()), 1)
self.assertIsNotNone(self.lib.albums(u'album:album 0').get())
self.importer.run()
self.assertEqual(len(self.lib.albums()), 2)
self.assertIsNotNone(self.lib.albums(u'album:album 1').get())
@patch('beets.plugins.send')
def test_resume_singleton(self, plugins_send):
self.importer = self.create_importer(item_count=2)
self.config['import']['resume'] = True
self.config['import']['singletons'] = True
# Aborts import after one track. This also ensures that we skip
# the first album in the second try.
def raise_exception(event, **kwargs):
if event == 'item_imported':
raise importer.ImportAbort
plugins_send.side_effect = raise_exception
self.importer.run()
self.assertEqual(len(self.lib.items()), 1)
self.assertIsNotNone(self.lib.items(u'title:track 0').get())
self.importer.run()
self.assertEqual(len(self.lib.items()), 2)
self.assertIsNotNone(self.lib.items(u'title:track 1').get())
class IncrementalImportTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.config['import']['incremental'] = True
def tearDown(self):
self.teardown_beets()
def test_incremental_album(self):
importer = self.create_importer(album_count=1)
importer.run()
# Change album name so the original file would be imported again
# if incremental was off.
album = self.lib.albums().get()
album['album'] = 'edited album'
album.store()
importer = self.create_importer(album_count=1)
importer.run()
self.assertEqual(len(self.lib.albums()), 2)
def test_incremental_item(self):
self.config['import']['singletons'] = True
importer = self.create_importer(item_count=1)
importer.run()
# Change track name so the original file would be imported again
# if incremental was off.
item = self.lib.items().get()
item['artist'] = 'edited artist'
item.store()
importer = self.create_importer(item_count=1)
importer.run()
self.assertEqual(len(self.lib.items()), 2)
def test_invalid_state_file(self):
importer = self.create_importer()
with open(self.config['statefile'].as_filename(), 'wb') as f:
f.write(b'000')
importer.run()
self.assertEqual(len(self.lib.albums()), 1)
def _mkmp3(path):
shutil.copyfile(os.path.join(_common.RSRC, b'min.mp3'), path)
class AlbumsInDirTest(_common.TestCase):
def setUp(self):
super(AlbumsInDirTest, self).setUp()
# create a directory structure for testing
self.base = os.path.abspath(os.path.join(self.temp_dir, b'tempdir'))
os.mkdir(self.base)
os.mkdir(os.path.join(self.base, b'album1'))
os.mkdir(os.path.join(self.base, b'album2'))
os.mkdir(os.path.join(self.base, b'more'))
os.mkdir(os.path.join(self.base, b'more', b'album3'))
os.mkdir(os.path.join(self.base, b'more', b'album4'))
_mkmp3(os.path.join(self.base, b'album1', b'album1song1.mp3'))
_mkmp3(os.path.join(self.base, b'album1', b'album1song2.mp3'))
_mkmp3(os.path.join(self.base, b'album2', b'album2song.mp3'))
_mkmp3(os.path.join(self.base, b'more', b'album3', b'album3song.mp3'))
_mkmp3(os.path.join(self.base, b'more', b'album4', b'album4song.mp3'))
def test_finds_all_albums(self):
albums = list(albums_in_dir(self.base))
self.assertEqual(len(albums), 4)
def test_separates_contents(self):
found = []
for _, album in albums_in_dir(self.base):
found.append(re.search(br'album(.)song', album[0]).group(1))
self.assertTrue(b'1' in found)
self.assertTrue(b'2' in found)
self.assertTrue(b'3' in found)
self.assertTrue(b'4' in found)
def test_finds_multiple_songs(self):
for _, album in albums_in_dir(self.base):
n = re.search(br'album(.)song', album[0]).group(1)
if n == b'1':
self.assertEqual(len(album), 2)
else:
self.assertEqual(len(album), 1)
class MultiDiscAlbumsInDirTest(_common.TestCase):
def create_music(self, files=True, ascii=True):
"""Create some music in multiple album directories.
`files` indicates whether to create the files (otherwise, only
directories are made). `ascii` indicates ACII-only filenames;
otherwise, we use Unicode names.
"""
self.base = os.path.abspath(os.path.join(self.temp_dir, b'tempdir'))
os.mkdir(self.base)
name = b'CAT' if ascii else util.bytestring_path(u'C\xc1T')
name_alt_case = b'CAt' if ascii else util.bytestring_path(u'C\xc1t')
self.dirs = [
# Nested album, multiple subdirs.
# Also, false positive marker in root dir, and subtitle for disc 3.
os.path.join(self.base, b'ABCD1234'),
os.path.join(self.base, b'ABCD1234', b'cd 1'),
os.path.join(self.base, b'ABCD1234', b'cd 3 - bonus'),
# Nested album, single subdir.
# Also, punctuation between marker and disc number.
os.path.join(self.base, b'album'),
os.path.join(self.base, b'album', b'cd _ 1'),
# Flattened album, case typo.
# Also, false positive marker in parent dir.
os.path.join(self.base, b'artist [CD5]'),
os.path.join(self.base, b'artist [CD5]', name + b' disc 1'),
os.path.join(self.base, b'artist [CD5]',
name_alt_case + b' disc 2'),
# Single disc album, sorted between CAT discs.
os.path.join(self.base, b'artist [CD5]', name + b'S'),
]
self.files = [
os.path.join(self.base, b'ABCD1234', b'cd 1', b'song1.mp3'),
os.path.join(self.base, b'ABCD1234',
b'cd 3 - bonus', b'song2.mp3'),
os.path.join(self.base, b'ABCD1234',
b'cd 3 - bonus', b'song3.mp3'),
os.path.join(self.base, b'album', b'cd _ 1', b'song4.mp3'),
os.path.join(self.base, b'artist [CD5]', name + b' disc 1',
b'song5.mp3'),
os.path.join(self.base, b'artist [CD5]',
name_alt_case + b' disc 2', b'song6.mp3'),
os.path.join(self.base, b'artist [CD5]', name + b'S',
b'song7.mp3'),
]
if not ascii:
self.dirs = [self._normalize_path(p) for p in self.dirs]
self.files = [self._normalize_path(p) for p in self.files]
for path in self.dirs:
os.mkdir(util.syspath(path))
if files:
for path in self.files:
_mkmp3(util.syspath(path))
def _normalize_path(self, path):
"""Normalize a path's Unicode combining form according to the
platform.
"""
path = path.decode('utf-8')
norm_form = 'NFD' if sys.platform == 'darwin' else 'NFC'
path = unicodedata.normalize(norm_form, path)
return path.encode('utf-8')
def test_coalesce_nested_album_multiple_subdirs(self):
self.create_music()
albums = list(albums_in_dir(self.base))
self.assertEqual(len(albums), 4)
root, items = albums[0]
self.assertEqual(root, self.dirs[0:3])
self.assertEqual(len(items), 3)
def test_coalesce_nested_album_single_subdir(self):
self.create_music()
albums = list(albums_in_dir(self.base))
root, items = albums[1]
self.assertEqual(root, self.dirs[3:5])
self.assertEqual(len(items), 1)
def test_coalesce_flattened_album_case_typo(self):
self.create_music()
albums = list(albums_in_dir(self.base))
root, items = albums[2]
self.assertEqual(root, self.dirs[6:8])
self.assertEqual(len(items), 2)
def test_single_disc_album(self):
self.create_music()
albums = list(albums_in_dir(self.base))
root, items = albums[3]
self.assertEqual(root, self.dirs[8:])
self.assertEqual(len(items), 1)
def test_do_not_yield_empty_album(self):
self.create_music(files=False)
albums = list(albums_in_dir(self.base))
self.assertEqual(len(albums), 0)
def test_single_disc_unicode(self):
self.create_music(ascii=False)
albums = list(albums_in_dir(self.base))
root, items = albums[3]
self.assertEqual(root, self.dirs[8:])
self.assertEqual(len(items), 1)
def test_coalesce_multiple_unicode(self):
self.create_music(ascii=False)
albums = list(albums_in_dir(self.base))
self.assertEqual(len(albums), 4)
root, items = albums[0]
self.assertEqual(root, self.dirs[0:3])
self.assertEqual(len(items), 3)
class ReimportTest(unittest.TestCase, ImportHelper, _common.Assertions):
"""Test "re-imports", in which the autotagging machinery is used for
music that's already in the library.
This works by importing new database entries for the same files and
replacing the old data with the new data. We also copy over flexible
attributes and the added date.
"""
def setUp(self):
self.setup_beets()
# The existing album.
album = self.add_album_fixture()
album.added = 4242.0
album.foo = u'bar' # Some flexible attribute.
album.store()
item = album.items().get()
item.baz = u'qux'
item.added = 4747.0
item.store()
# Set up an import pipeline with a "good" match.
self.matcher = AutotagStub().install()
self.matcher.matching = AutotagStub.GOOD
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def _setup_session(self, singletons=False):
self._setup_import_session(self._album().path, singletons=singletons)
self.importer.add_choice(importer.action.APPLY)
def _album(self):
return self.lib.albums().get()
def _item(self):
return self.lib.items().get()
def test_reimported_album_gets_new_metadata(self):
self._setup_session()
self.assertEqual(self._album().album, u'\xe4lbum')
self.importer.run()
self.assertEqual(self._album().album, u'the album')
def test_reimported_album_preserves_flexattr(self):
self._setup_session()
self.importer.run()
self.assertEqual(self._album().foo, u'bar')
def test_reimported_album_preserves_added(self):
self._setup_session()
self.importer.run()
self.assertEqual(self._album().added, 4242.0)
def test_reimported_album_preserves_item_flexattr(self):
self._setup_session()
self.importer.run()
self.assertEqual(self._item().baz, u'qux')
def test_reimported_album_preserves_item_added(self):
self._setup_session()
self.importer.run()
self.assertEqual(self._item().added, 4747.0)
def test_reimported_item_gets_new_metadata(self):
self._setup_session(True)
self.assertEqual(self._item().title, u't\xeftle 0')
self.importer.run()
self.assertEqual(self._item().title, u'full')
def test_reimported_item_preserves_flexattr(self):
self._setup_session(True)
self.importer.run()
self.assertEqual(self._item().baz, u'qux')
def test_reimported_item_preserves_added(self):
self._setup_session(True)
self.importer.run()
self.assertEqual(self._item().added, 4747.0)
def test_reimported_item_preserves_art(self):
self._setup_session()
art_source = os.path.join(_common.RSRC, b'abbey.jpg')
replaced_album = self._album()
replaced_album.set_art(art_source)
replaced_album.store()
old_artpath = replaced_album.artpath
self.importer.run()
new_album = self._album()
new_artpath = new_album.art_destination(art_source)
self.assertEqual(new_album.artpath, new_artpath)
self.assertExists(new_artpath)
if new_artpath != old_artpath:
self.assertNotExists(old_artpath)
class ImportPretendTest(_common.TestCase, ImportHelper):
""" Test the pretend commandline option
"""
def __init__(self, method_name='runTest'):
super(ImportPretendTest, self).__init__(method_name)
self.matcher = None
def setUp(self):
super(ImportPretendTest, self).setUp()
self.setup_beets()
self.__create_import_dir()
self.__create_empty_import_dir()
self._setup_import_session()
config['import']['pretend'] = True
self.matcher = AutotagStub().install()
self.io.install()
def tearDown(self):
self.teardown_beets()
self.matcher.restore()
def __create_import_dir(self):
self._create_import_dir(1)
resource_path = os.path.join(_common.RSRC, b'empty.mp3')
single_path = os.path.join(self.import_dir, b'track_2.mp3')
shutil.copy(resource_path, single_path)
self.import_paths = [
os.path.join(self.import_dir, b'the_album'),
single_path
]
self.import_files = [
displayable_path(
os.path.join(self.import_paths[0], b'track_1.mp3')),
displayable_path(single_path)
]
def __create_empty_import_dir(self):
path = os.path.join(self.temp_dir, b'empty')
os.makedirs(path)
self.empty_path = path
def __run(self, import_paths, singletons=True):
self._setup_import_session(singletons=singletons)
self.importer.paths = import_paths
with capture_log() as logs:
self.importer.run()
logs = [line for line in logs if not line.startswith('Sending event:')]
self.assertEqual(len(self.lib.items()), 0)
self.assertEqual(len(self.lib.albums()), 0)
return logs
def test_import_singletons_pretend(self):
logs = self.__run(self.import_paths)
self.assertEqual(logs, [
'Singleton: %s' % displayable_path(self.import_files[0]),
'Singleton: %s' % displayable_path(self.import_paths[1])])
def test_import_album_pretend(self):
logs = self.__run(self.import_paths, singletons=False)
self.assertEqual(logs, [
'Album: %s' % displayable_path(self.import_paths[0]),
' %s' % displayable_path(self.import_files[0]),
'Album: %s' % displayable_path(self.import_paths[1]),
' %s' % displayable_path(self.import_paths[1])])
def test_import_pretend_empty(self):
logs = self.__run([self.empty_path])
self.assertEqual(logs, [u'No files imported from {0}'
.format(displayable_path(self.empty_path))])
# Helpers for ImportMusicBrainzIdTest.
def mocked_get_release_by_id(id_, includes=[], release_status=[],
release_type=[]):
"""Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list
of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in
the release title and artist name, so that ID_RELEASE_0 is a closer match
to the items created by ImportHelper._create_import_dir()."""
# Map IDs to (release title, artist), so the distances are different.
releases = {ImportMusicBrainzIdTest.ID_RELEASE_0: ('VALID_RELEASE_0',
'TAG ARTIST'),
ImportMusicBrainzIdTest.ID_RELEASE_1: ('VALID_RELEASE_1',
'DISTANT_MATCH')}
return {
'release': {
'title': releases[id_][0],
'id': id_,
'medium-list': [{
'track-list': [{
'id': 'baz',
'recording': {
'title': 'foo',
'id': 'bar',
'length': 59,
},
'position': 9,
'number': 'A2'
}],
'position': 5,
}],
'artist-credit': [{
'artist': {
'name': releases[id_][1],
'id': 'some-id',
},
}],
'release-group': {
'id': 'another-id',
}
}
}
def mocked_get_recording_by_id(id_, includes=[], release_status=[],
release_type=[]):
"""Mimic musicbrainzngs.get_recording_by_id, accepting only a restricted
list of MB ids (ID_RECORDING_0, ID_RECORDING_1). The returned dict differs
only in the recording title and artist name, so that ID_RECORDING_0 is a
closer match to the items created by ImportHelper._create_import_dir()."""
# Map IDs to (recording title, artist), so the distances are different.
releases = {ImportMusicBrainzIdTest.ID_RECORDING_0: ('VALID_RECORDING_0',
'TAG ARTIST'),
ImportMusicBrainzIdTest.ID_RECORDING_1: ('VALID_RECORDING_1',
'DISTANT_MATCH')}
return {
'recording': {
'title': releases[id_][0],
'id': id_,
'length': 59,
'artist-credit': [{
'artist': {
'name': releases[id_][1],
'id': 'some-id',
},
}],
}
}
@patch('musicbrainzngs.get_recording_by_id',
Mock(side_effect=mocked_get_recording_by_id))
@patch('musicbrainzngs.get_release_by_id',
Mock(side_effect=mocked_get_release_by_id))
class ImportMusicBrainzIdTest(_common.TestCase, ImportHelper):
"""Test the --musicbrainzid argument."""
MB_RELEASE_PREFIX = 'https://musicbrainz.org/release/'
MB_RECORDING_PREFIX = 'https://musicbrainz.org/recording/'
ID_RELEASE_0 = '00000000-0000-0000-0000-000000000000'
ID_RELEASE_1 = '11111111-1111-1111-1111-111111111111'
ID_RECORDING_0 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
ID_RECORDING_1 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
def setUp(self):
self.setup_beets()
self._create_import_dir(1)
def tearDown(self):
self.teardown_beets()
def test_one_mbid_one_album(self):
self.config['import']['search_ids'] = \
[self.MB_RELEASE_PREFIX + self.ID_RELEASE_0]
self._setup_import_session()
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.albums().get().album, 'VALID_RELEASE_0')
def test_several_mbid_one_album(self):
self.config['import']['search_ids'] = \
[self.MB_RELEASE_PREFIX + self.ID_RELEASE_0,
self.MB_RELEASE_PREFIX + self.ID_RELEASE_1]
self._setup_import_session()
self.importer.add_choice(2) # Pick the 2nd best match (release 1).
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.albums().get().album, 'VALID_RELEASE_1')
def test_one_mbid_one_singleton(self):
self.config['import']['search_ids'] = \
[self.MB_RECORDING_PREFIX + self.ID_RECORDING_0]
self._setup_import_session(singletons=True)
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().title, 'VALID_RECORDING_0')
def test_several_mbid_one_singleton(self):
self.config['import']['search_ids'] = \
[self.MB_RECORDING_PREFIX + self.ID_RECORDING_0,
self.MB_RECORDING_PREFIX + self.ID_RECORDING_1]
self._setup_import_session(singletons=True)
self.importer.add_choice(2) # Pick the 2nd best match (recording 1).
self.importer.add_choice(importer.action.APPLY)
self.importer.run()
self.assertEqual(self.lib.items().get().title, 'VALID_RECORDING_1')
def test_candidates_album(self):
"""Test directly ImportTask.lookup_candidates()."""
task = importer.ImportTask(paths=self.import_dir,
toppath='top path',
items=[_common.item()])
task.search_ids = [self.MB_RELEASE_PREFIX + self.ID_RELEASE_0,
self.MB_RELEASE_PREFIX + self.ID_RELEASE_1,
'an invalid and discarded id']
task.lookup_candidates()
self.assertEqual(set(['VALID_RELEASE_0', 'VALID_RELEASE_1']),
set([c.info.album for c in task.candidates]))
def test_candidates_singleton(self):
"""Test directly SingletonImportTask.lookup_candidates()."""
task = importer.SingletonImportTask(toppath='top path',
item=_common.item())
task.search_ids = [self.MB_RECORDING_PREFIX + self.ID_RECORDING_0,
self.MB_RECORDING_PREFIX + self.ID_RECORDING_1,
'an invalid and discarded id']
task.lookup_candidates()
self.assertEqual(set(['VALID_RECORDING_0', 'VALID_RECORDING_1']),
set([c.info.title for c in task.candidates]))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "f3904a88740a7eff93443749396aad3e",
"timestamp": "",
"source": "github",
"line_count": 1965,
"max_line_length": 80,
"avg_line_length": 35.44885496183206,
"alnum_prop": 0.6032128859985357,
"repo_name": "shamangeorge/beets",
"id": "3418d4628f1c8aadd0bf2dd93eaa1340aafb25f3",
"size": "70328",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3539"
},
{
"name": "HTML",
"bytes": "7094"
},
{
"name": "JavaScript",
"bytes": "86092"
},
{
"name": "Python",
"bytes": "2027754"
},
{
"name": "Shell",
"bytes": "7448"
}
],
"symlink_target": ""
}
|
import requests
import random
from django.conf import settings
from celery.task import Task
from celery.utils.log import get_task_logger
from .models import Account, Recharge
logger = get_task_logger(__name__)
def get_token():
"""
Returns the last token entry
"""
account = Account.objects.order_by('created_at').last()
return account.token
def normalize_msisdn(msisdn, country_code='27'):
"""
Normalizes msisdn using provided country code.
Country code defaults to '27' (South Africa)
e.g. '082 111 2222' -> '+27821112222'
"""
# Don't touch shortcodes
if len(msisdn) <= 5:
return msisdn
# Strip everything not a digit or '+'
msisdn = ''.join([c for c in msisdn if c.isdigit() or c == '+'])
# Standardise start of msisdn
if msisdn.startswith('00'):
return '+' + country_code + msisdn[2:]
if msisdn.startswith('0'):
return '+' + country_code + msisdn[1:]
if msisdn.startswith('+'):
return msisdn
if msisdn.startswith(country_code):
return '+' + msisdn
return msisdn
def lookup_network_code(msisdn):
"""
Determines the network operator based on the first digits
in the msisdn.
"""
mtn = ['+27603', '+27604', '+27605',
'+27630', '+27631', '+27632',
'+27710',
'+27717', '+27718', '+27719',
'+27810',
'+2783', '+2773', '+2778']
cellc = ['+27610', '+27611', '+27612', '+27613',
'+27615', '+27616', '+27617',
'+27618', '+27619', '+27620', '+27621', '+27622', '+27623',
'+27624', '+27625', '+27626', '+27627',
'+2784', '+2774']
telkom = ['+27614',
'+27811', '+27812', '+27813', '+27814', '+27815', '+27816',
'+27817']
vodacom = ['+27606', '+27607', '+27608', '+27609',
'+27711', '+27712', '+27713', '+27714', '+27715', '+27716',
'+27818',
'+2782', '+2772', '+2776', '+2779']
if msisdn[0:5] in mtn or msisdn[0:6] in mtn:
return "MTN"
elif msisdn[0:5] in cellc or msisdn[0:6] in cellc:
return "CELLC"
elif msisdn[0:5] in telkom or msisdn[0:6] in telkom:
return "TELKOM"
elif msisdn[0:5] in vodacom or msisdn[0:6] in vodacom:
return "VOD"
else:
return False
class ReadyRecharge(Task):
"""
Task to set the normalise the msisdn and attempt to set the
network operator based on the leading msisdn characters
"""
name = "recharges.tasks.ready_recharge"
def run(self, recharge_id, **kwargs):
l = self.get_logger(**kwargs)
recharge = Recharge.objects.get(id=recharge_id)
# Normalize the msisdn
recharge.msisdn = normalize_msisdn(recharge.msisdn, '27')
recharge.save()
# Set the network operator
network = lookup_network_code(recharge.msisdn)
if not network:
# If no network is found, mark the recharge unrecoverable
l.info("Marking recharge as unrecoverable")
recharge.status = 4
recharge.status_message = "Network lookup failed"
recharge.save()
return "Mobile network operator could not be determined for "\
"%s" % recharge.msisdn
else:
recharge.network_code = network
recharge.status = 0
recharge.save()
return "Recharge ready to process"
ready_recharge = ReadyRecharge()
class HotsocketLogin(Task):
"""
Task to get the username and password verified then produce a token
"""
name = "recharges.tasks.hotsocket_login"
def prep_login_data(self):
"""
Constructs the dict needed for hotsocket login
"""
login_data = {'username': settings.HOTSOCKET_API_USERNAME,
'password': settings.HOTSOCKET_API_PASSWORD,
'as_json': True}
return login_data
def request_hotsocket_login(self):
"""
Hotsocket login via post request
"""
login_data = self.prep_login_data()
login_post = requests.post("%s/login" %
settings.HOTSOCKET_API_ENDPOINT,
data=login_data)
return login_post.json()
def run(self, **kwargs):
l = self.get_logger(**kwargs)
login_result = self.request_hotsocket_login()
status = login_result["response"]["status"]
# Check the result
if status == settings.HOTSOCKET_CODES["LOGIN_SUCCESSFUL"]:
l.info("Successful login to hotsocket")
Account.objects.create(token=login_result["response"]["token"])
return True
else:
l.error("Failed login to hotsocket")
return False
hotsocket_login = HotsocketLogin()
class HotsocketProcessQueue(Task):
"""
Task to get the get all unprocessed recharges and create tasks to
submit them to hotsocket
"""
name = "recharges.tasks.hotsocket_process_queue"
def run(self, **kwargs):
"""
Returns the number of submitted requests
"""
l = self.get_logger(**kwargs)
l.info("Looking up the unprocessed requests")
queued = Recharge.objects.filter(status=0)
for recharge in queued:
hotsocket_get_airtime.apply_async(args=[recharge.id])
return "%s requests queued to Hotsocket" % queued.count()
hotsocket_process_queue = HotsocketProcessQueue()
class HotsocketGetAirtime(Task):
"""
Task to make hotsocket post request to load airtime, saves hotsocket ref
to the recharge model and update status
"""
name = "recharges.tasks.hotsocket_get_airtime"
def prep_hotsocket_data(self, recharge):
"""
Constructs the dict needed to make a hotsocket airtime request
msisdn needs no + for HS
denomination needs to be in cents for HS
"""
recharge.reference = random.randint(1, 2147483647) # max integer val
recharge.save()
hotsocket_data = {
'username': settings.HOTSOCKET_API_USERNAME,
'password': settings.HOTSOCKET_API_PASSWORD,
'as_json': True,
'token': get_token(),
'recipient_msisdn': recharge.msisdn[1:],
'product_code': recharge.product_code,
'network_code': recharge.network_code,
'denomination': int(recharge.amount*100),
'reference': recharge.reference
}
return hotsocket_data
def request_hotsocket_recharge(self, recharge):
"""
Makes hotsocket airtime request
"""
hotsocket_data = self.prep_hotsocket_data(recharge)
recharge_post = requests.post("%s/recharge" %
settings.HOTSOCKET_API_ENDPOINT,
data=hotsocket_data)
return recharge_post.json()
def run(self, recharge_id, **kwargs):
"""
Returns the recharge model entry
"""
l = self.get_logger(**kwargs)
recharge = Recharge.objects.get(id=recharge_id)
status = recharge.status
if status == 0:
# Set status to In Process
recharge.status = 1
recharge.save()
l.info("Making hotsocket recharge request")
result = self.request_hotsocket_recharge(recharge)
if "hotsocket_ref" in result["response"]:
recharge.hotsocket_ref = result["response"]["hotsocket_ref"]
recharge.save()
hotsocket_check_status.apply_async(args=[recharge_id],
countdown=5*60)
return "Recharge for %s: Queued at Hotsocket "\
"#%s" % (recharge.msisdn, recharge.hotsocket_ref)
else:
if "message" in result["response"]:
l.info("Hotsocket error: %s" % (
result["response"]["message"]))
recharge.status_message = result["response"]["message"]
else:
recharge.status_message = "Unknown Hotsocket error"
recharge.status = 3
recharge.save()
return "Recharge for %s: Hotsocket failure" % (
recharge.msisdn)
elif status == 1:
return "airtime request for %s already in process by another"\
" worker" % recharge.msisdn
elif status == 2:
return "airtime request for %s is successful" % recharge.msisdn
elif status == 3:
return "airtime request for %s failed" % recharge.msisdn
elif status == 4:
return "airtime request for %s is unrecoverable" % recharge.msisdn
hotsocket_get_airtime = HotsocketGetAirtime()
class HotsocketCheckStatus(Task):
"""
Task to check hotsocket recharge request and set the recharge model
status to successful if the airtime has been loaded to the user's phone.
"""
name = "recharges.tasks.hotsocket_check_status"
def prep_hotsocket_status_dict(self, recharge_id):
"""
Constructs the dict needed to make a hotsocket recharge status request
"""
recharge = Recharge.objects.get(id=recharge_id)
hotsocket_data = {
'username': settings.HOTSOCKET_API_USERNAME,
'as_json': True,
'token': get_token(),
'reference': recharge.reference,
}
return hotsocket_data
def request_hotsocket_status(self, recharge_id):
"""
Makes the POST request to the Hotsocket API
"""
hotsocket_data = self.prep_hotsocket_status_dict(recharge_id)
recharge_status_post = requests.post("%s/status" %
settings.HOTSOCKET_API_ENDPOINT,
data=hotsocket_data)
return recharge_status_post.json()
def run(self, recharge_id, **kwargs):
l = self.get_logger(**kwargs)
l.info("Looking up Hotsocket status")
hs_status = self.request_hotsocket_status(recharge_id)
hs_status_code = hs_status["response"]["status"]
if hs_status_code == "0000":
# recharge status lookup successful
hs_recharge_status_cd = hs_status["response"]["recharge_status_cd"]
recharge = Recharge.objects.get(id=recharge_id)
if hs_recharge_status_cd == 3:
# Success
recharge.status = 2
recharge.status_message = hs_status["response"][
"recharge_status"]
recharge.save()
return "Recharge for %s successful" % recharge.msisdn
elif hs_recharge_status_cd == 2:
# Failed
recharge.status = 3
recharge.status_message = hs_status["response"][
"recharge_status"]
recharge.save()
return "Recharge for %s failed. Reason: %s" % (
recharge.msisdn, hs_status["response"]["recharge_status"])
elif hs_recharge_status_cd == 1:
# Pre-submission error.
recharge.status = 4
recharge.status_message = hs_status["response"][
"recharge_status"]
recharge.save()
return "Recharge pre-submission for %s errored" % (
recharge.msisdn)
elif hs_recharge_status_cd == 0:
# Submitted, not yet successful.
recharge.status = 1
recharge.status_message = hs_status["response"][
"recharge_status"]
recharge.save()
# requeue in 5 mins
self.retry(args=[recharge_id], countdown=5*60)
return "Recharge for %s pending. Check requeued." % (
recharge.msisdn,)
elif hs_status_code == 887:
# invalid token
pass
elif hs_status_code == 889:
# expired token
pass
elif hs_status_code == 5000:
# system error
pass
elif hs_status_code == 6011:
# invalid product
pass
elif hs_status_code == 6012:
# invalid network code
pass
elif hs_status_code == 6013:
# non-numeric msisdn
pass
elif hs_status_code == 6014:
# malformed msisdn
pass
elif hs_status_code == 6016:
# duplicate reference
pass
elif hs_status_code == 6017:
# non-numeric reference
pass
elif hs_status_code == 6020:
# invalid network + product + denomination combination
pass
return "recharge is successful"
hotsocket_check_status = HotsocketCheckStatus()
|
{
"content_hash": "74801d85b78699c837e0cfed0d71376e",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 79,
"avg_line_length": 35.60382513661202,
"alnum_prop": 0.5528355460056787,
"repo_name": "westerncapelabs/gopherairtime",
"id": "f26d25d30c978dc504f2ff89f7f0112783909783",
"size": "13031",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recharges/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57941"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
import sys
import unittest2
from mock import MagicMock, Mock, patch
import oriskami
from oriskami.test.helper import OriskamiUnitTestCase
VALID_API_METHODS = ('get', 'post', 'delete')
class HttpClientTests(OriskamiUnitTestCase):
def setUp(self):
super(HttpClientTests, self).setUp()
self.original_filters = oriskami.http_client.warnings.filters[:]
oriskami.http_client.warnings.simplefilter('ignore')
def tearDown(self):
oriskami.http_client.warnings.filters = self.original_filters
super(HttpClientTests, self).tearDown()
def check_default(self, none_libs, expected):
for lib in none_libs:
setattr(oriskami.http_client, lib, None)
inst = oriskami.http_client.new_default_http_client()
self.assertTrue(isinstance(inst, expected))
def test_new_default_http_client_urlfetch(self):
self.check_default((),
oriskami.http_client.UrlFetchClient)
def test_new_default_http_client_requests(self):
self.check_default(('urlfetch',),
oriskami.http_client.RequestsClient)
def test_new_default_http_client_pycurl(self):
self.check_default(('urlfetch', 'requests',),
oriskami.http_client.PycurlClient)
def test_new_default_http_client_urllib2(self):
self.check_default(('urlfetch', 'requests', 'pycurl'),
oriskami.http_client.Urllib2Client)
class ClientTestBase():
@property
def request_mock(self):
return self.request_mocks[self.request_client.name]
@property
def valid_url(self, path='/foo'):
return 'https://api.oriskami.com%s' % (path,)
def make_request(self, method, url, headers, post_data):
client = self.request_client(verify_ssl_certs=True)
return client.request(method, url, headers, post_data)
def mock_response(self, body, code):
raise NotImplementedError(
'You must implement this in your test subclass')
def mock_error(self, error):
raise NotImplementedError(
'You must implement this in your test subclass')
def check_call(self, meth, abs_url, headers, params):
raise NotImplementedError(
'You must implement this in your test subclass')
def test_request(self):
self.mock_response(self.request_mock, '{"foo": "baz"}', 200)
for meth in VALID_API_METHODS:
abs_url = self.valid_url
data = ''
if meth != 'post':
abs_url = '%s?%s' % (abs_url, data)
data = None
headers = {'my-header': 'header val'}
body, code, _ = self.make_request(
meth, abs_url, headers, data)
self.assertEqual(200, code)
self.assertEqual('{"foo": "baz"}', body)
self.check_call(self.request_mock, meth, abs_url,
data, headers)
def test_exception(self):
self.mock_error(self.request_mock)
self.assertRaises(oriskami.error.APIConnectionError,
self.make_request,
'get', self.valid_url, {}, None)
class RequestsVerify(object):
def __eq__(self, other):
return other and other.endswith('oriskami/data/ca-certificates.crt')
class RequestsClientTests(OriskamiUnitTestCase, ClientTestBase):
request_client = oriskami.http_client.RequestsClient
def setUp(self):
super(RequestsClientTests, self).setUp()
self.session = MagicMock()
def test_timeout(self):
headers = {'my-header': 'header val'}
data = ''
self.mock_response(self.request_mock, '{"foo": "baz"}', 200)
self.make_request('POST', self.valid_url,
headers, data, timeout=5)
self.check_call(None, 'POST', self.valid_url,
data, headers, timeout=5)
def make_request(self, method, url, headers, post_data, timeout=80):
client = self.request_client(verify_ssl_certs=True,
timeout=timeout,
proxy='http://slap/')
return client.request(method, url, headers, post_data)
def mock_response(self, mock, body, code):
result = Mock()
result.content = body
result.status_code = code
self.session.request = MagicMock(return_value=result)
mock.Session = MagicMock(return_value=self.session)
def mock_error(self, mock):
mock.exceptions.RequestException = Exception
self.session.request.side_effect = mock.exceptions.RequestException()
mock.Session = MagicMock(return_value=self.session)
# Note that unlike other modules, we don't use the "mock" argument here
# because we need to run the request call against the internal mock
# session.
def check_call(self, mock, meth, url, post_data, headers, timeout=80):
self.session.request. \
assert_called_with(meth, url,
headers=headers,
data=post_data,
verify=RequestsVerify(),
proxies={"http": "http://slap/",
"https": "http://slap/"},
timeout=timeout)
class UrlFetchClientTests(OriskamiUnitTestCase, ClientTestBase):
request_client = oriskami.http_client.UrlFetchClient
def mock_response(self, mock, body, code):
result = Mock()
result.content = body
result.status_code = code
mock.fetch = Mock(return_value=result)
def mock_error(self, mock):
mock.Error = mock.InvalidURLError = Exception
mock.fetch.side_effect = mock.InvalidURLError()
def check_call(self, mock, meth, url, post_data, headers):
mock.fetch.assert_called_with(
url=url,
method=meth,
headers=headers,
validate_certificate=True,
deadline=55,
payload=post_data
)
class Urllib2ClientTests(OriskamiUnitTestCase, ClientTestBase):
request_client = oriskami.http_client.Urllib2Client
def make_request(self, method, url, headers, post_data, proxy=None):
self.client = self.request_client(verify_ssl_certs=True,
proxy=proxy)
self.proxy = proxy
return self.client.request(method, url, headers, post_data)
def mock_response(self, mock, body, code):
response = Mock
response.read = Mock(return_value=body)
response.code = code
response.info = Mock(return_value={})
self.request_object = Mock()
mock.Request = Mock(return_value=self.request_object)
mock.urlopen = Mock(return_value=response)
opener = Mock
opener.open = Mock(return_value=response)
mock.build_opener = Mock(return_value=opener)
mock.build_opener.open = opener.open
mock.ProxyHandler = Mock(return_value=opener)
mock.urlopen = Mock(return_value=response)
def mock_error(self, mock):
mock.urlopen.side_effect = ValueError
mock.build_opener().open.side_effect = ValueError
mock.build_opener.reset_mock()
def check_call(self, mock, meth, url, post_data, headers):
if sys.version_info >= (3, 0) and isinstance(post_data, basestring):
post_data = post_data.encode('utf-8')
mock.Request.assert_called_with(url, post_data, headers)
if (self.client._proxy):
self.assertTrue(type(self.client._proxy) is dict)
mock.ProxyHandler.assert_called_with(self.client._proxy)
mock.build_opener.open.assert_called_with(self.request_object)
self.assertTrue(not mock.urlopen.called)
if (not self.client._proxy):
mock.urlopen.assert_called_with(self.request_object)
self.assertTrue(not mock.build_opener.called)
self.assertTrue(not mock.build_opener.open.called)
class Urllib2ClientHttpsProxyTests(Urllib2ClientTests):
def make_request(self, method, url, headers, post_data, proxy=None):
return super(Urllib2ClientHttpsProxyTests, self).make_request(
method, url, headers, post_data,
{"http": "http://slap/",
"https": "http://slap/"})
class Urllib2ClientHttpProxyTests(Urllib2ClientTests):
def make_request(self, method, url, headers, post_data, proxy=None):
return super(Urllib2ClientHttpProxyTests, self).make_request(
method, url, headers, post_data,
"http://slap/")
class PycurlClientTests(OriskamiUnitTestCase, ClientTestBase):
request_client = oriskami.http_client.PycurlClient
def make_request(self, method, url, headers, post_data, proxy=None):
self.client = self.request_client(verify_ssl_certs=True,
proxy=proxy)
self.proxy = proxy
return self.client.request(method, url, headers, post_data)
@property
def request_mock(self):
if not hasattr(self, 'curl_mock'):
lib_mock = self.request_mocks[self.request_client.name]
self.curl_mock = Mock()
lib_mock.Curl = Mock(return_value=self.curl_mock)
return self.curl_mock
def setUp(self):
super(PycurlClientTests, self).setUp()
self.sio_patcher = patch('oriskami.util.StringIO.StringIO')
sio_mock = Mock()
self.sio_patcher.start().return_value = sio_mock
self.sio_getvalue = sio_mock.getvalue
def tearDown(self):
super(PycurlClientTests, self).tearDown()
self.sio_patcher.stop()
def mock_response(self, mock, body, code):
self.sio_getvalue.return_value = body
mock.getinfo.return_value = code
def mock_error(self, mock):
class FakeException(BaseException):
def __getitem__(self, i):
return 'foo'
oriskami.http_client.pycurl.error = FakeException
mock.perform.side_effect = oriskami.http_client.pycurl.error
def check_call(self, mock, meth, url, post_data, headers):
lib_mock = self.request_mocks[self.request_client.name]
# A note on methodology here: we don't necessarily need to verify
# _every_ call to setopt, but check a few of them to make sure the
# right thing is happening. Keep an eye specifically on conditional
# statements where things are more likely to go wrong.
self.curl_mock.setopt.assert_any_call(lib_mock.NOSIGNAL, 1)
self.curl_mock.setopt.assert_any_call(lib_mock.URL,
oriskami.util.utf8(url))
if meth == 'get':
self.curl_mock.setopt.assert_any_call(lib_mock.HTTPGET, 1)
elif meth == 'post':
self.curl_mock.setopt.assert_any_call(lib_mock.POST, 1)
else:
self.curl_mock.setopt.assert_any_call(lib_mock.CUSTOMREQUEST,
meth.upper())
self.curl_mock.perform.assert_any_call()
class APIEncodeTest(OriskamiUnitTestCase):
def test_encode_dict(self):
body = {
'foo': {
'dob': {
'month': 1,
},
'name': 'bat'
},
}
values = [t for t in oriskami.api_requestor._api_encode(body)]
self.assertTrue(('foo[dob][month]', 1) in values)
self.assertTrue(('foo[name]', 'bat') in values)
def test_encode_array(self):
body = {
'foo': [{
'dob': {
'month': 1,
},
'name': 'bat'
}],
}
values = [t for t in oriskami.api_requestor._api_encode(body)]
self.assertTrue(('foo[][dob][month]', 1) in values)
self.assertTrue(('foo[][name]', 'bat') in values)
if __name__ == '__main__':
unittest2.main()
|
{
"content_hash": "04e06ebce60e8b4e6168dff72d9abf8c",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 77,
"avg_line_length": 33.77715877437326,
"alnum_prop": 0.5929407883885865,
"repo_name": "ubivar/ubivar-python",
"id": "e1f1630bd3121d088cd30317317a5727edfb2964",
"size": "12126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oriskami/test/test_http_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134900"
}
],
"symlink_target": ""
}
|
import argparse
from Bio import SeqIO
from Bio import Seq
from Bio import SeqRecord
ap = argparse.ArgumentParser(description="Count masked characters.")
ap.add_argument("fastain", help="Input gap-bed file.")
args = ap.parse_args()
fastaFile = open(args.fastain)
for record in SeqIO.parse(fastaFile, "fasta") :
seq = record.seq.tostring()
nLower = seq.count("a") + seq.count("t") + seq.count("g") + seq.count("c")
nN = seq.count("N")
if (nN == 0):
print "{} {:2.2f}".format(record.id, float(nLower)/len(seq))
|
{
"content_hash": "fd68cfbe929710047dbdf232cdfed08c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 24.59090909090909,
"alnum_prop": 0.6580406654343808,
"repo_name": "yunlongliukm/chm1_scripts",
"id": "3382edaaffa7521f34e8ea0acd1480bf275a0f9b",
"size": "564",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PrintMaskedContent.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "50662"
},
{
"name": "Java",
"bytes": "488"
},
{
"name": "Makefile",
"bytes": "15037"
},
{
"name": "Python",
"bytes": "375549"
},
{
"name": "R",
"bytes": "50744"
},
{
"name": "Shell",
"bytes": "22590"
}
],
"symlink_target": ""
}
|
"""
Web Scraper Project
Scrape data from a regularly updated website livingsocial.com and
save to a database (postgres).
Scrapy item part - defines container for scraped data.
"""
from scrapy.item import Item, Field
class LivingSocialDeal(Item):
"""Livingsocial container (dictionary-like object) for scraped data"""
title = Field()
link = Field()
location = Field()
original_price = Field()
price = Field()
end_date = Field()
|
{
"content_hash": "0aa3074923d5067efd67af24fd9e0e74",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7008733624454149,
"repo_name": "enilsen16/python",
"id": "49ebceae95746472d56ac0720f7bd5f2bdbb418d",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrape/living_social/scraper_app/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30999"
}
],
"symlink_target": ""
}
|
import copy
from oslo_log import log as logging
from tempest_lib.services.compute.agents_client import AgentsClient
from tempest_lib.services.compute.aggregates_client import AggregatesClient
from tempest_lib.services.compute.availability_zone_client import \
AvailabilityZoneClient
from tempest_lib.services.compute.baremetal_nodes_client import \
BaremetalNodesClient
from tempest_lib.services.compute.certificates_client import \
CertificatesClient
from tempest_lib.services.compute.extensions_client import \
ExtensionsClient
from tempest_lib.services.compute.fixed_ips_client import FixedIPsClient
from tempest_lib.services.compute.flavors_client import FlavorsClient
from tempest_lib.services.compute.floating_ip_pools_client import \
FloatingIPPoolsClient
from tempest_lib.services.compute.floating_ips_bulk_client import \
FloatingIPsBulkClient
from tempest_lib.services.compute.hosts_client import HostsClient
from tempest_lib.services.compute.hypervisor_client import \
HypervisorClient
from tempest_lib.services.identity.v2.token_client import TokenClient
from tempest_lib.services.identity.v3.token_client import V3TokenClient
from tempest.common import cred_provider
from tempest.common import negative_rest_client
from tempest import config
from tempest import exceptions
from tempest import manager
from tempest.services.baremetal.v1.json.baremetal_client import \
BaremetalClient
from tempest.services import botoclients
from tempest.services.compute.json.floating_ips_client import \
FloatingIPsClient
from tempest.services.compute.json.images_client import ImagesClient
from tempest.services.compute.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClient
from tempest.services.compute.json.interfaces_client import \
InterfacesClient
from tempest.services.compute.json.keypairs_client import KeyPairsClient
from tempest.services.compute.json.limits_client import LimitsClient
from tempest.services.compute.json.migrations_client import \
MigrationsClient
from tempest.services.compute.json.networks_client import NetworksClient \
as ComputeNetworksClient
from tempest.services.compute.json.quota_classes_client import \
QuotaClassesClient
from tempest.services.compute.json.quotas_client import QuotasClient
from tempest.services.compute.json.security_group_default_rules_client import \
SecurityGroupDefaultRulesClient
from tempest.services.compute.json.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClient
from tempest.services.compute.json.server_groups_client import \
ServerGroupsClient
from tempest.services.compute.json.servers_client import ServersClient
from tempest.services.compute.json.services_client import ServicesClient
from tempest.services.compute.json.snapshots_client import \
SnapshotsClient as ComputeSnapshotsClient
from tempest.services.compute.json.tenant_networks_client import \
TenantNetworksClient
from tempest.services.compute.json.tenant_usages_client import \
TenantUsagesClient
from tempest.services.compute.json.versions_client import VersionsClient
from tempest.services.compute.json.volumes_client import \
VolumesClient as ComputeVolumesClient
from tempest.services.data_processing.v1_1.data_processing_client import \
DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClient
from tempest.services.database.json.limits_client import \
DatabaseLimitsClient
from tempest.services.database.json.versions_client import \
DatabaseVersionsClient
from tempest.services.identity.v2.json.identity_client import \
IdentityClient
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClient
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClient
from tempest.services.identity.v3.json.identity_client import \
IdentityV3Client
from tempest.services.identity.v3.json.policy_client import PolicyClient
from tempest.services.identity.v3.json.region_client import RegionClient
from tempest.services.identity.v3.json.service_client import \
ServiceClient
from tempest.services.image.v1.json.image_client import ImageClient
from tempest.services.image.v2.json.image_client import ImageClientV2
from tempest.services.messaging.json.messaging_client import \
MessagingClient
from tempest.services.network.json.network_client import NetworkClient
from tempest.services.network.json.networks_client import NetworksClient
from tempest.services.network.json.subnets_client import SubnetsClient
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClient
from tempest.services.volume.json.admin.volume_hosts_client import \
VolumeHostsClient
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClient
from tempest.services.volume.json.admin.volume_services_client import \
VolumesServicesClient
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClient
from tempest.services.volume.json.availability_zone_client import \
VolumeAvailabilityZoneClient
from tempest.services.volume.json.backups_client import BackupsClient
from tempest.services.volume.json.extensions_client import \
ExtensionsClient as VolumeExtensionClient
from tempest.services.volume.json.qos_client import QosSpecsClient
from tempest.services.volume.json.snapshots_client import SnapshotsClient
from tempest.services.volume.json.volumes_client import VolumesClient
from tempest.services.volume.v2.json.admin.volume_hosts_client import \
VolumeHostsV2Client
from tempest.services.volume.v2.json.admin.volume_quotas_client import \
VolumeQuotasV2Client
from tempest.services.volume.v2.json.admin.volume_services_client import \
VolumesServicesV2Client
from tempest.services.volume.v2.json.admin.volume_types_client import \
VolumeTypesV2Client
from tempest.services.volume.v2.json.availability_zone_client import \
VolumeV2AvailabilityZoneClient
from tempest.services.volume.v2.json.backups_client import BackupsClientV2
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2Client as VolumeV2ExtensionClient
from tempest.services.volume.v2.json.qos_client import QosSpecsV2Client
from tempest.services.volume.v2.json.snapshots_client import \
SnapshotsV2Client
from tempest.services.volume.v2.json.volumes_client import VolumesV2Client
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_compute_clients()
self._set_database_clients()
self._set_identity_clients()
self._set_volume_clients()
self._set_object_storage_clients()
self.baremetal_client = BaremetalClient(
self.auth_provider,
CONF.baremetal.catalog_type,
CONF.identity.region,
endpoint_type=CONF.baremetal.endpoint_type,
**self.default_params_with_timeout_values)
self.network_client = NetworkClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.networks_client = NetworksClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.subnets_client = SubnetsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.messaging_client = MessagingClient(
self.auth_provider,
CONF.messaging.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClient(
self.auth_provider,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**self.default_params_with_timeout_values)
if CONF.service_available.glance:
self.image_client = ImageClient(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.image_client_v2 = ImageClientV2(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.orchestration_client = OrchestrationClient(
self.auth_provider,
CONF.orchestration.catalog_type,
CONF.orchestration.region or CONF.identity.region,
endpoint_type=CONF.orchestration.endpoint_type,
build_interval=CONF.orchestration.build_interval,
build_timeout=CONF.orchestration.build_timeout,
**self.default_params)
self.data_processing_client = DataProcessingClient(
self.auth_provider,
CONF.data_processing.catalog_type,
CONF.identity.region,
endpoint_type=CONF.data_processing.endpoint_type,
**self.default_params_with_timeout_values)
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
# Generating EC2 credentials in tempest is only supported
# with identity v2
if CONF.identity_feature_enabled.api_v2 and \
CONF.identity.auth_version == 'v2':
# EC2 and S3 clients, if used, will check configured AWS
# credentials and generate new ones if needed
self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
self.s3_client = botoclients.ObjectClientS3(self.identity_client)
def _set_compute_clients(self):
params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
params.update(self.default_params)
self.agents_client = AgentsClient(self.auth_provider, **params)
self.compute_networks_client = ComputeNetworksClient(
self.auth_provider, **params)
self.migrations_client = MigrationsClient(self.auth_provider,
**params)
self.security_group_default_rules_client = (
SecurityGroupDefaultRulesClient(self.auth_provider, **params))
self.certificates_client = CertificatesClient(self.auth_provider,
**params)
self.servers_client = ServersClient(
self.auth_provider,
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
self.server_groups_client = ServerGroupsClient(
self.auth_provider, **params)
self.limits_client = LimitsClient(self.auth_provider, **params)
self.images_client = ImagesClient(self.auth_provider, **params)
self.keypairs_client = KeyPairsClient(self.auth_provider, **params)
self.quotas_client = QuotasClient(self.auth_provider, **params)
self.quota_classes_client = QuotaClassesClient(self.auth_provider,
**params)
self.flavors_client = FlavorsClient(self.auth_provider, **params)
self.extensions_client = ExtensionsClient(self.auth_provider,
**params)
self.floating_ip_pools_client = FloatingIPPoolsClient(
self.auth_provider, **params)
self.floating_ips_bulk_client = FloatingIPsBulkClient(
self.auth_provider, **params)
self.floating_ips_client = FloatingIPsClient(self.auth_provider,
**params)
self.security_group_rules_client = SecurityGroupRulesClient(
self.auth_provider, **params)
self.security_groups_client = SecurityGroupsClient(
self.auth_provider, **params)
self.interfaces_client = InterfacesClient(self.auth_provider,
**params)
self.fixed_ips_client = FixedIPsClient(self.auth_provider,
**params)
self.availability_zone_client = AvailabilityZoneClient(
self.auth_provider, **params)
self.aggregates_client = AggregatesClient(self.auth_provider,
**params)
self.services_client = ServicesClient(self.auth_provider, **params)
self.tenant_usages_client = TenantUsagesClient(self.auth_provider,
**params)
self.hosts_client = HostsClient(self.auth_provider, **params)
self.hypervisor_client = HypervisorClient(self.auth_provider,
**params)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClient(self.auth_provider, **params)
self.tenant_networks_client = \
TenantNetworksClient(self.auth_provider, **params)
self.baremetal_nodes_client = BaremetalNodesClient(
self.auth_provider, **params)
# NOTE: The following client needs special timeout values because
# the API is a proxy for the other component.
params_volume = copy.deepcopy(params)
params_volume.update({
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
})
self.volumes_extensions_client = ComputeVolumesClient(
self.auth_provider, **params_volume)
self.compute_versions_client = VersionsClient(self.auth_provider,
**params_volume)
self.snapshots_extensions_client = ComputeSnapshotsClient(
self.auth_provider, **params_volume)
def _set_database_clients(self):
self.database_flavors_client = DatabaseFlavorsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_limits_client = DatabaseLimitsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_versions_client = DatabaseVersionsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region
}
params.update(self.default_params_with_timeout_values)
params_v2_admin = params.copy()
params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
# Client uses admin endpoint type of Keystone API v2
self.identity_client = IdentityClient(self.auth_provider,
**params_v2_admin)
params_v2_public = params.copy()
params_v2_public['endpoint_type'] = (
CONF.identity.v2_public_endpoint_type)
# Client uses public endpoint type of Keystone API v2
self.identity_public_client = IdentityClient(self.auth_provider,
**params_v2_public)
params_v3 = params.copy()
params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
# Client uses the endpoint type of Keystone API v3
self.identity_v3_client = IdentityV3Client(self.auth_provider,
**params_v3)
self.endpoints_client = EndPointClient(self.auth_provider,
**params)
self.service_client = ServiceClient(self.auth_provider, **params)
self.policy_client = PolicyClient(self.auth_provider, **params)
self.region_client = RegionClient(self.auth_provider, **params)
self.credentials_client = CredentialsClient(self.auth_provider,
**params)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
# API version is marked as enabled
if CONF.identity_feature_enabled.api_v2:
if CONF.identity.uri:
self.token_client = TokenClient(
CONF.identity.uri, **self.default_params)
else:
msg = 'Identity v2 API enabled, but no identity.uri set'
raise exceptions.InvalidConfiguration(msg)
if CONF.identity_feature_enabled.api_v3:
if CONF.identity.uri_v3:
self.token_v3_client = V3TokenClient(
CONF.identity.uri_v3, **self.default_params)
else:
msg = 'Identity v3 API enabled, but no identity.uri_v3 set'
raise exceptions.InvalidConfiguration(msg)
def _set_volume_clients(self):
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(self.default_params)
self.volume_qos_client = QosSpecsClient(self.auth_provider,
**params)
self.volume_qos_v2_client = QosSpecsV2Client(
self.auth_provider, **params)
self.volume_services_v2_client = VolumesServicesV2Client(
self.auth_provider, **params)
self.backups_client = BackupsClient(self.auth_provider, **params)
self.backups_v2_client = BackupsClientV2(self.auth_provider,
**params)
self.snapshots_client = SnapshotsClient(self.auth_provider,
**params)
self.snapshots_v2_client = SnapshotsV2Client(self.auth_provider,
**params)
self.volumes_client = VolumesClient(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volumes_v2_client = VolumesV2Client(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volume_types_client = VolumeTypesClient(self.auth_provider,
**params)
self.volume_services_client = VolumesServicesClient(
self.auth_provider, **params)
self.volume_hosts_client = VolumeHostsClient(self.auth_provider,
**params)
self.volume_hosts_v2_client = VolumeHostsV2Client(
self.auth_provider, **params)
self.volume_quotas_client = VolumeQuotasClient(self.auth_provider,
**params)
self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
**params)
self.volumes_extension_client = VolumeExtensionClient(
self.auth_provider, **params)
self.volumes_v2_extension_client = VolumeV2ExtensionClient(
self.auth_provider, **params)
self.volume_availability_zone_client = \
VolumeAvailabilityZoneClient(self.auth_provider, **params)
self.volume_v2_availability_zone_client = \
VolumeV2AvailabilityZoneClient(self.auth_provider, **params)
self.volume_types_v2_client = VolumeTypesV2Client(
self.auth_provider, **params)
def _set_object_storage_clients(self):
params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
params.update(self.default_params_with_timeout_values)
self.account_client = AccountClient(self.auth_provider, **params)
self.container_client = ContainerClient(self.auth_provider, **params)
self.object_client = ObjectClient(self.auth_provider, **params)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, service=None):
super(AdminManager, self).__init__(
credentials=cred_provider.get_configured_credentials(
'identity_admin'),
service=service)
|
{
"content_hash": "1fca76d7fdc8913ff81fb21b057a97d2",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 79,
"avg_line_length": 49.063025210084035,
"alnum_prop": 0.6657960092489509,
"repo_name": "izadorozhna/tempest",
"id": "84e85446ef2522cba83b047ca530472f9d43fdbe",
"size": "23990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/clients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2827292"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
urlpatterns = [
url(r'^school_json/$', 'visit.views.school_json', name='school-json'),
url(r'^school_json/(?P<school_id>\d+)/(?P<staff_id>\d+)/$', 'visit.views.school_json', name='school-json'),
url(r'^school_json/(?P<school_id>\d+)/(?P<staff_id>\d+)/(?P<key>[\w-]+)/$', 'visit.views.school_json', name='school-json'),
url(r'^visits_json/$', 'visit.views.visits_json', name='visits-json'),
url(r'^report/$', 'visit.views.report', name='report'),
url(r'^report/admin/$', 'visit.views.admin_report', name='admin-report'),
url(r'^report/data/$', 'visit.views.data_directory', name='data-directory'),
url(r'^report/(?P<key>[\w-]+)/$', 'visit.views.report', name='report-key'),
url(r'^payroll_report/$', 'visit.views.payroll_report', name='payroll-report'),
url(r'^import_upload/$', 'visit.views.import_upload', name='import-upload'),
url(r'^import_data/$', 'visit.views.import_data', name='import-data'),
url(r'^student-visits/$', 'visit.views.student_visits_report', name='student-visits'),
# url(r'^submit/$', 'visit.views.submit', name='submit'),
# url(r'^submit/(?P<key>[\w-]+)/$', 'visit.views.submit', name='submit'),
url(r'^myvisits/$', 'visit.views.my_visits', name='my-visits'),
url(r'^visit/(?P<key>[\w-]+)/$', 'visit.views.visit_detail', name='visit-detail'),
url(r'^log/$', 'visit.views.visit_log', name='visit-log'),
url(r'^log/(?P<key>[\w-]+)/$', 'visit.views.visit_log', name='visit-log'),
url(r'^register/$', 'visit.views.register', name='register'),
url(r'^register/done/$', 'visit.views.register_done', name='register-done'),
]
|
{
"content_hash": "36b7a7fdc30dec6fbf39ab78b100703c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 127,
"avg_line_length": 66.56,
"alnum_prop": 0.6195913461538461,
"repo_name": "koebbe/homeworks",
"id": "0c20cdfdce272c54f7e8b284caaae6d1c0ae3706",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visit/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44210"
},
{
"name": "HTML",
"bytes": "69003"
},
{
"name": "JavaScript",
"bytes": "124572"
},
{
"name": "Python",
"bytes": "223075"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newsroom', '0008_article_author_feedback'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='author_feedback',
new_name='editor_feedback',
),
]
|
{
"content_hash": "6f2ab66e2e5fad4b572efd6f04edb481",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 21.1875,
"alnum_prop": 0.5752212389380531,
"repo_name": "groundupnews/gu",
"id": "e421be6cc7251a2ec0f7355d092d7237bdba750c",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newsroom/migrations/0009_auto_20190806_1359.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222991"
},
{
"name": "HTML",
"bytes": "563742"
},
{
"name": "JavaScript",
"bytes": "790912"
},
{
"name": "PHP",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "598998"
},
{
"name": "Roff",
"bytes": "888"
},
{
"name": "Shell",
"bytes": "803"
},
{
"name": "XSLT",
"bytes": "870"
}
],
"symlink_target": ""
}
|
"""
@package mi.core.instrument.test.test_chunker
@file mi/core/instrument/test/test_chunker.py
@author Steve Foley
@brief Test cases for the base chunker module
"""
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import unittest
import re
from functools import partial
from mi.core.unit_test import MiUnitTest, MiUnitTestCase
from nose.plugins.attrib import attr
from mi.core.unit_test import IonUnitTestCase
from ooi.logging import log
from mi.core.exceptions import SampleException
from mi.core.instrument.chunker import StringChunker
@attr('UNIT', group='mi')
class UnitTestStringChunker(MiUnitTestCase):
"""
Test the basic functionality of the chunker system via unit tests
"""
# For testing, use PAR sensor data here...short and easy to work with...
# But cheat with the checksum. Make it easy to recognize which sample
SAMPLE_1 = "SATPAR0229,10.01,2206748111,111"
SAMPLE_2 = "SATPAR0229,10.02,2206748222,222"
SAMPLE_3 = "SATPAR0229,10.03,2206748333,333"
FRAGMENT_1 = "SATPAR0229,10.01,"
FRAGMENT_2 = "2206748544,123"
FRAGMENT_SAMPLE = FRAGMENT_1+FRAGMENT_2
MULTI_SAMPLE_1 = "%s\r\n%s" % (SAMPLE_1,
SAMPLE_2)
TIMESTAMP_1 = 3569168821.102485
TIMESTAMP_2 = 3569168822.202485
TIMESTAMP_3 = 3569168823.302485
@staticmethod
def sieve_function(raw_data):
""" The method that splits samples
"""
return_list = []
pattern = r'SATPAR(?P<sernum>\d{4}),(?P<timer>\d{1,7}.\d\d),(?P<counts>\d{10}),(?P<checksum>\d{1,3})'
regex = re.compile(pattern)
for match in regex.finditer(raw_data):
return_list.append((match.start(), match.end()))
log.debug("Sieving: %s...%s",
raw_data[match.start():match.start()+5],
raw_data[match.end()-5:match.end()])
return return_list
def setUp(self):
""" Setup a chunker for use in tests """
self._chunker = StringChunker(UnitTestStringChunker.sieve_function)
def _display_chunk_list(self, data, chunk_list):
""" Display the data as viewed through the chunk list """
data_list = []
if chunk_list == None:
return data_list
for (s, e, t) in chunk_list:
data_list.append(data[s:e])
return data_list
def test_sieve(self):
"""
Do a quick test of the sieve to make sure it does what we want.
"""
self.assertEquals([(0,31)],
UnitTestStringChunker.sieve_function(self.SAMPLE_1))
self.assertEquals([],
UnitTestStringChunker.sieve_function(self.FRAGMENT_1))
self.assertEquals([(0,31), (33, 64)],
UnitTestStringChunker.sieve_function(self.MULTI_SAMPLE_1))
def test_regex_sieve(self):
"""
Do a test of the regex based sieve to make sure it does what we want.
"""
pattern = r'SATPAR(?P<sernum>\d{4}),(?P<timer>\d{1,7}.\d\d),(?P<counts>\d{10}),(?P<checksum>\d{1,3})'
regex = re.compile(pattern)
self._chunker = StringChunker(partial(self._chunker.regex_sieve_function, regex_list=[regex]))
self.assertEquals([(0,31)],
self._chunker.regex_sieve_function(self.SAMPLE_1, [regex]))
self.assertEquals([],
self._chunker.regex_sieve_function(self.FRAGMENT_1, [regex]))
self.assertEquals([(0,31), (33, 64)],
self._chunker.regex_sieve_function(self.MULTI_SAMPLE_1, [regex]))
def test_generate_data_lists(self):
sample_string = "Foo%sBar%sBat" % (self.SAMPLE_1, self.SAMPLE_2)
self._chunker.add_chunk(sample_string, self.TIMESTAMP_1)
lists = self._chunker._generate_data_lists(self.TIMESTAMP_1)
log.debug("Data chunk list: %s",
self._display_chunk_list(sample_string,
lists['data_chunk_list']))
self.assertEquals(lists['data_chunk_list'], [(3,34, self.TIMESTAMP_1),
(37, 68, self.TIMESTAMP_1)])
log.debug("Non-data chunk list: %s",
self._display_chunk_list(sample_string,
lists['non_data_chunk_list']))
self.assertEquals(lists['non_data_chunk_list'],
[(0, 3, self.TIMESTAMP_1),
(34, 37, self.TIMESTAMP_1)])
def test_clean_chunk_list(self):
test_str = "abcdefghijklmnopqrstuvwxyz"
short_test_str = test_str[10:]
test_list = [(3, 5, self.TIMESTAMP_1),
(8, 12, self.TIMESTAMP_2),
(20, 25, self.TIMESTAMP_3)]
log.debug("Test string: %s", test_str)
log.debug("Raw list: %s", self._display_chunk_list(test_str, test_list))
result = self._chunker._clean_chunk_list(test_list, 10)
log.debug("Shortened test string: %s", short_test_str)
log.debug("Cleaned list: %s", self._display_chunk_list(short_test_str,
result))
self.assertEquals(result, [(0, 2, self.TIMESTAMP_2),
(10, 15, self.TIMESTAMP_3)])
def test_add_get_simple(self):
"""
Add a simple string of data to the buffer, get the next chunk out
"""
self._chunker.add_chunk(self.SAMPLE_1, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, self.TIMESTAMP_1)
self.assertEquals(result, self.SAMPLE_1)
# It got cleared at the last fetch...
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, None)
self.assertEquals(result, None)
(time, result) = self._chunker.get_next_non_data()
self.assertEquals(time, None)
self.assertEquals(result, None)
def test_no_clean_data(self):
"""
Test an add/get without cleaning
"""
self._chunker.add_chunk(self.SAMPLE_1, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data(clean=False)
self.assertEquals(result, self.SAMPLE_1)
self.assertEquals(time, self.TIMESTAMP_1)
# It did NOT get cleared at the last fetch...
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, self.SAMPLE_1)
self.assertEquals(time, self.TIMESTAMP_1)
# and now it did
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, None)
self.assertEquals(result, None)
def test_add_many_get_simple(self):
"""
Add a few simple strings of data to the buffer, get the chunks out
"""
self._chunker.add_chunk(self.SAMPLE_1, self.TIMESTAMP_1)
self._chunker.add_chunk(self.SAMPLE_2, self.TIMESTAMP_2)
self._chunker.add_chunk(self.SAMPLE_3, self.TIMESTAMP_3)
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, self.TIMESTAMP_1)
self.assertEquals(result, self.SAMPLE_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, self.TIMESTAMP_2)
self.assertEquals(result, self.SAMPLE_2)
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, self.TIMESTAMP_3)
self.assertEquals(result, self.SAMPLE_3)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, None)
self.assertEquals(time, None)
def test_get_non_data(self):
"""
Get some non-data blocks
"""
self._chunker.add_chunk("Foo", self.TIMESTAMP_1)
self.assertEquals(len(self._chunker.nondata_chunk_list), 1)
self.assertEquals(len(self._chunker.data_chunk_list), 0)
self._chunker.add_chunk(self.SAMPLE_1, self.TIMESTAMP_2)
self.assertEquals(len(self._chunker.nondata_chunk_list), 1)
self.assertEquals(len(self._chunker.data_chunk_list), 1)
self._chunker.add_chunk("Bar", self.TIMESTAMP_2)
self._chunker.add_chunk("Bat", self.TIMESTAMP_3)
self.assertEquals(len(self._chunker.nondata_chunk_list), 2)
self.assertEquals(len(self._chunker.data_chunk_list), 1)
self._chunker.add_chunk(self.SAMPLE_2, self.TIMESTAMP_2)
self.assertEquals(len(self._chunker.nondata_chunk_list), 2)
self.assertEquals(len(self._chunker.data_chunk_list), 2)
self._chunker.add_chunk("Baz", self.TIMESTAMP_1)
self.assertEquals(len(self._chunker.nondata_chunk_list), 3)
self.assertEquals(len(self._chunker.data_chunk_list), 2)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, self.SAMPLE_1)
self.assertEquals(time, self.TIMESTAMP_2)
(time, result) = self._chunker.get_next_non_data()
self.assertEquals(result, "BarBat")
self.assertEquals(time, self.TIMESTAMP_2)
(time, result) = self._chunker.get_next_non_data()
self.assertEquals(result, "Baz")
self.assertEquals(time, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, None)
self.assertEquals(time, None)
def test_get_next_data_with_indices(self):
"""
Get the next data point with indices
"""
self._chunker.add_chunk("Foo", self.TIMESTAMP_1)
self.assertEquals(len(self._chunker.nondata_chunk_list), 1)
self.assertEquals(len(self._chunker.data_chunk_list), 0)
self._chunker.add_chunk(self.SAMPLE_1, self.TIMESTAMP_2)
self.assertEquals(len(self._chunker.nondata_chunk_list), 1)
self.assertEquals(len(self._chunker.data_chunk_list), 1)
self._chunker.add_chunk("Bar", self.TIMESTAMP_2)
self._chunker.add_chunk("Bat", self.TIMESTAMP_3)
(time, result, start, end) = self._chunker.get_next_data_with_index()
self.assertEquals(result, self.SAMPLE_1)
self.assertEquals(start, 3)
self.assertEquals(end, 34)
def test_add_get_fragment(self):
"""
Add some fragments of a string, then verify that value is stitched together
"""
# Add a part of a sample
self._chunker.add_chunk(self.FRAGMENT_1, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, None)
self.assertEquals(result, None)
self.assertEquals(len(self._chunker.nondata_chunk_list), 1)
self.assertEquals(len(self._chunker.data_chunk_list), 0)
# add the rest of the sample
self._chunker.add_chunk(self.FRAGMENT_2, self.TIMESTAMP_2)
self.assertEquals(len(self._chunker.nondata_chunk_list), 0)
self.assertEquals(len(self._chunker.data_chunk_list), 1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, self.FRAGMENT_SAMPLE)
self.assertEquals(time, self.TIMESTAMP_1)
def test_add_multiple_in_one(self):
"""
Test multiple data bits input in a single sample. They will ultimately
need to be split apart.
"""
self._chunker.add_chunk(self.MULTI_SAMPLE_1, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, self.SAMPLE_1)
self.assertEquals(time, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(time, self.TIMESTAMP_1)
self.assertEquals(result, self.SAMPLE_2)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, None)
self.assertEquals(time, None)
def test_get_raw(self):
"""
Test the ability to get raw data, but not totally hose data strings
"""
# Put some data fragments in
self._chunker.add_chunk("Foo", self.TIMESTAMP_1)
self._chunker.add_chunk(self.SAMPLE_1, self.TIMESTAMP_2)
self._chunker.add_chunk(self.FRAGMENT_1, self.TIMESTAMP_2)
self._chunker.add_chunk(self.FRAGMENT_2, self.TIMESTAMP_3)
self._chunker.add_chunk("Baz", self.TIMESTAMP_1)
# Get a raw chunk out
(time, result) = self._chunker.get_next_raw()
self.assertEquals(result, "Foo")
self.assertEquals(time, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_raw()
self.assertEquals(result, self.SAMPLE_1)
self.assertEquals(time, self.TIMESTAMP_2)
(time, result) = self._chunker.get_next_raw()
self.assertEquals(result, self.FRAGMENT_1) # Fragments got ripped up
self.assertEquals(time, self.TIMESTAMP_2)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, None)
self.assertEquals(time, None)
def test_funky_chunks(self):
def funky_sieve(data):
return [(3,6),(0,3)]
self._chunker = StringChunker(funky_sieve)
self._chunker.add_chunk("BarFoo", self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, "Bar")
self.assertEquals(time, self.TIMESTAMP_1)
(time, result) = self._chunker.get_next_data()
self.assertEquals(result, "Foo")
self.assertEquals(time, self.TIMESTAMP_1)
def test_overlap(self):
self.assertFalse(StringChunker.overlaps([(0, 5)]))
self.assertFalse(StringChunker.overlaps([]))
self.assertTrue(StringChunker.overlaps([(0, 5), (3, 6)]))
self.assertTrue(StringChunker.overlaps([(0, 5), (5, 7), (6, 8)]))
self.assertTrue(StringChunker.overlaps([(0, 5), (6, 9), (5, 7)]))
def overlap_sieve(data):
return [(0,3),(2,6)]
self._chunker = StringChunker(overlap_sieve)
self.assertRaises(SampleException,
self._chunker.add_chunk, "foobar", self.TIMESTAMP_1)
@unittest.skip("Write this when a binary chunker is needed")
@attr('UNIT', group='mi')
class UnitTestBinaryChunker(MiUnitTestCase):
"""
Test the basic functionality of the chunker system via unit tests
"""
SAMPLE_1 = []
SAMPLE_2 = []
SAMPLE_3 = []
FRAGMENT_1 = []
FRAGMENT_2 = []
MULTI_SAMPLE_1 = []
def test_add_get_simple(self):
"""
Add a simple string of data to the buffer, get the next chunk out
"""
pass
def test_add_get_many_simple(self):
"""
Add a few simple strings of data to the buffer, get the chunks out
"""
# Add a sample,
# Add a sample
# Add another sample
# get some samples out,
# assert they were correct and in the right order
pass
def test_add_get_fragment(self):
"""
Add some fragments of a string, then verify that value is stitched together
"""
# Add a part of a sample
# confirm you cant get anything out
# add the rest of the sample
# confirm that the rest comes out
pass
def test_add_multiple_in_one(self):
"""
Test multiple data bits input in a single sample. They will ultimately
need to be split apart.
"""
pass
|
{
"content_hash": "6d86155a3d2bf50a490dd7d501a63867",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 109,
"avg_line_length": 41.04210526315789,
"alnum_prop": 0.5917542959733265,
"repo_name": "mikeh77/mi-instrument",
"id": "39403e0dcb93b329c2ae8b6c8de318d3caa6af50",
"size": "15619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/core/instrument/test/test_chunker.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7381630"
},
{
"name": "Shell",
"bytes": "22"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import re
import sqlite3
import helper
import addline
def plotWeakScaling(allsize=1000000,allgraphtype='SOFTWARE',alladditionalwhere=' AND total_time>0 ',suffix='',basesize=100000, title=''):
fig = plt.figure()
ax = fig.add_subplot(111)
#addline.addWeakScaling(axis=ax, algorithm='dynamic_nobarrier', optimistic='1', size=basesize, graphtype=allgraphtype, hostnamelike='e%',colorindex=0,linelabel='DynNoBarrier')
addline.addWeakScaling(axis=ax, algorithm='bitset', optimistic='1', size=basesize, graphtype=allgraphtype, hostnamelike='e%',colorindex=0,linelabel='Node-Lookup')
addline.addWeakScaling(axis=ax, algorithm='worksteal', optimistic='1', size=basesize, graphtype=allgraphtype, hostnamelike='e%',colorindex=1,linelabel='Worksteal')
addline.addWeakScaling(axis=ax, algorithm='locallist', optimistic='1', size=basesize, graphtype=allgraphtype, hostnamelike='e%',colorindex=2,linelabel='Scatter-Gather')
ax.plot([1,24],[1,1],'r--')
ax.legend(loc='upper right')
ax.minorticks_on()
filename = helper.plotdir + 'weakscaling_gt' + allgraphtype + '_n' + str(allsize)
plt.title('Speedup vs. Number of Threads',fontsize=helper.fontsize_label)
if(title!=''):
plt.suptitle(title,fontsize=helper.fontsize_title)
elif(suffix==''):
plt.suptitle('Weak Scaling for ' + allgraphtype + ' Graph (' + str(allsize) + 'nodes)',fontsize=helper.fontsize_title)
else:
plt.suptitle('Weak Scaling for ' + allgraphtype + ' Graph (' + str(allsize) + 'nodes, ' + suffix + ')',fontsize=helper.fontsize_title)
if(suffix!=''):
filename = filename + '_' + suffix
filename = filename + '.pdf'
plt.savefig(filename, format='pdf',bbox_inches='tight',dpi=1000)
print "File written to:\t", filename
if(helper.show):
plt.show()
############################################################
# Call Plotting functions
############################################################
plotWeakScaling(allsize=1000000,allgraphtype='SOFTWARE',title='Weak scaling for Software graph (basesize 100k nodes)') # software graph
plotWeakScaling(allsize=1000000,allgraphtype='RANDOMLIN8',suffix='deg8',title='Weak scaling for Random graph (basesize 100k nodes, degree 8)') # degree 8
plotWeakScaling(allsize=1000000,allgraphtype='RANDOMLIN16',suffix='deg16',title='Weak scaling for Random graph (basesize 100k nodes, degree 16)') # degree 16
plotWeakScaling(allsize=1000000,allgraphtype='RANDOMLIN32',suffix='deg32',title='Weak scaling for Random graph (basesize 100k nodes, degree 32)') # degree 32
# degree 64 has incomplete data
|
{
"content_hash": "d5aa2032db5cb9a76f8c683c1cd16d0a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 176,
"avg_line_length": 52.755102040816325,
"alnum_prop": 0.7148936170212766,
"repo_name": "walkevin/ParallelTopologicalSorting",
"id": "e2a18290ab133933a4792874d8e89c8bc137086c",
"size": "2585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "measurements/plotscripts/plot_weakscaling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "61734"
},
{
"name": "Makefile",
"bytes": "2429"
},
{
"name": "Python",
"bytes": "16942"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/base/shared_base_held_both.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","default_held")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "0a94dcf2e605d11567f071c431b08397",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.7047619047619048,
"repo_name": "anhstudios/swganh",
"id": "894a1589b450daecf98bec70aa952a4526a5de46",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/wearables/base/shared_base_held_both.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import os
from oslo_config import cfg
from neutron._i18n import _
from neutron.common import config
ROOT_HELPER_OPTS = [
cfg.StrOpt('root_helper', default='sudo',
help=_("Root helper application. "
"Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' "
"to use the real root filter facility. Change to 'sudo' "
"to skip the filtering and just run the command "
"directly.")),
cfg.BoolOpt('use_helper_for_ns_read',
default=True,
help=_("Use the root helper when listing the namespaces on a "
"system. This may not be required depending on the "
"security configuration. If the root helper is "
"not required, set this to False for a performance "
"improvement.")),
# We can't just use root_helper=sudo neutron-rootwrap-daemon $cfg because
# it isn't appropriate for long-lived processes spawned with create_process
# Having a bool use_rootwrap_daemon option precludes specifying the
# rootwrap daemon command, which may be necessary for Xen?
cfg.StrOpt('root_helper_daemon',
help=_('Root helper daemon application to use when possible.')),
]
AGENT_STATE_OPTS = [
cfg.FloatOpt('report_interval', default=30,
help=_('Seconds between nodes reporting state to server; '
'should be less than agent_down_time, best if it '
'is half or less than agent_down_time.')),
cfg.BoolOpt('log_agent_heartbeats', default=False,
help=_('Log agent heartbeats')),
]
INTERFACE_DRIVER_OPTS = [
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual interface.")),
]
IPTABLES_OPTS = [
cfg.BoolOpt('comment_iptables_rules', default=True,
help=_("Add comments to iptables rules. "
"Set to false to disallow the addition of comments to "
"generated iptables rules that describe each rule's "
"purpose. System must support the iptables comments "
"module for addition of comments.")),
]
PROCESS_MONITOR_OPTS = [
cfg.StrOpt('check_child_processes_action', default='respawn',
choices=['respawn', 'exit'],
help=_('Action to be executed when a child process dies')),
cfg.IntOpt('check_child_processes_interval', default=60,
help=_('Interval between checks of child process liveness '
'(seconds), use 0 to disable')),
]
AVAILABILITY_ZONE_OPTS = [
# The default AZ name "nova" is selected to match the default
# AZ name in Nova and Cinder.
cfg.StrOpt('availability_zone', max_length=255, default='nova',
help=_("Availability zone of this node")),
]
EXT_NET_BRIDGE_OPTS = [
cfg.StrOpt('external_network_bridge', default='br-ex',
deprecated_for_removal=True,
help=_("Name of bridge used for external network "
"traffic. This should be set to an empty value for the "
"Linux Bridge. When this parameter is set, each L3 "
"agent can be associated with no more than one external "
"network. This option is deprecated and will be removed "
"in the M release.")),
]
def get_log_args(conf, log_file_name, **kwargs):
cmd_args = []
if conf.debug:
cmd_args.append('--debug')
if (conf.log_dir or conf.log_file):
cmd_args.append('--log-file=%s' % log_file_name)
log_dir = None
if conf.log_dir and conf.log_file:
log_dir = os.path.dirname(
os.path.join(conf.log_dir, conf.log_file))
elif conf.log_dir:
log_dir = conf.log_dir
elif conf.log_file:
log_dir = os.path.dirname(conf.log_file)
if log_dir:
cmd_args.append('--log-dir=%s' % log_dir)
if kwargs.get('metadata_proxy_watch_log') is False:
cmd_args.append('--nometadata_proxy_watch_log')
else:
if conf.use_syslog:
cmd_args.append('--use-syslog')
if conf.syslog_log_facility:
cmd_args.append(
'--syslog-log-facility=%s' % conf.syslog_log_facility)
return cmd_args
def register_root_helper(conf):
conf.register_opts(ROOT_HELPER_OPTS, 'AGENT')
def register_agent_state_opts_helper(conf):
conf.register_opts(AGENT_STATE_OPTS, 'AGENT')
def register_interface_driver_opts_helper(conf):
conf.register_opts(INTERFACE_DRIVER_OPTS)
def register_iptables_opts(conf):
conf.register_opts(IPTABLES_OPTS, 'AGENT')
def register_process_monitor_opts(conf):
conf.register_opts(PROCESS_MONITOR_OPTS, 'AGENT')
def register_availability_zone_opts_helper(conf):
conf.register_opts(AVAILABILITY_ZONE_OPTS, 'AGENT')
def get_root_helper(conf):
return conf.AGENT.root_helper
def setup_conf():
bind_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
conf = cfg.ConfigOpts()
conf.register_opts(bind_opts)
return conf
# add a logging setup method here for convenience
setup_logging = config.setup_logging
|
{
"content_hash": "4b59fc6043074132a4b760787d85d48f",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 79,
"avg_line_length": 36.86,
"alnum_prop": 0.5943208536805933,
"repo_name": "bigswitch/neutron",
"id": "b6913d58d614d02ffa78da90727b981d03432dd1",
"size": "6165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/agent/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8468247"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
}
|
"""Stateful programmatic web browsing.
Stateful programmatic web browsing, after Andy Lester's Perl module
WWW::Mechanize.
mechanize.Browser implements the urllib2.OpenerDirector interface. Browser
objects have state, including navigation history, HTML form state, cookies,
etc. The set of features and URL schemes handled by Browser objects is
configurable. The library also provides an API that is mostly compatible with
urllib2: your urllib2 program will likely still work if you replace "urllib2"
with "mechanize" everywhere.
Features include: ftp:, http: and file: URL schemes, browser history, hyperlink
and HTML form support, HTTP cookies, HTTP-EQUIV and Refresh, Referer [sic]
header, robots.txt, redirections, proxies, and Basic and Digest HTTP
authentication.
Much of the code originally derived from Perl code by Gisle Aas (libwww-perl),
Johnny Lee (MSIE Cookie support) and last but not least Andy Lester
(WWW::Mechanize). urllib2 was written by Jeremy Hylton.
"""
import os
VERSION = open(os.path.join("mechanize", "_version.py")).\
readlines()[0].strip(' "\n')
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: BSD License
License :: OSI Approved :: Zope Public License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.4
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Topic :: Internet
Topic :: Internet :: File Transfer Protocol (FTP)
Topic :: Internet :: WWW/HTTP
Topic :: Internet :: WWW/HTTP :: Browsers
Topic :: Internet :: WWW/HTTP :: Indexing/Search
Topic :: Internet :: WWW/HTTP :: Site Management
Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking
Topic :: Software Development :: Libraries
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Software Development :: Testing
Topic :: Software Development :: Testing :: Traffic Generation
Topic :: System :: Archiving :: Mirroring
Topic :: System :: Networking :: Monitoring
Topic :: System :: Systems Administration
Topic :: Text Processing
Topic :: Text Processing :: Markup
Topic :: Text Processing :: Markup :: HTML
Topic :: Text Processing :: Markup :: XML
"""
def main():
try:
import setuptools
except ImportError:
import ez_setup
ez_setup.use_setuptools()
import setuptools
setuptools.setup(
name = "mechanize",
version = VERSION,
license = "BSD", # or ZPL 2.1
platforms = ["any"],
classifiers = [c for c in CLASSIFIERS.split("\n") if c],
install_requires = [],
zip_safe = True,
test_suite = "test",
author = "John J. Lee",
author_email = "jjl@pobox.com",
description = __doc__.split("\n", 1)[0],
long_description = __doc__.split("\n", 2)[-1],
url = "http://wwwsearch.sourceforge.net/mechanize/",
download_url = ("http://pypi.python.org/packages/source/m/mechanize/"
"mechanize-%s.tar.gz" % VERSION),
packages = ["mechanize"],
)
if __name__ == "__main__":
main()
|
{
"content_hash": "731e3b99041d184690d0420a2b3b78a1",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 36.15384615384615,
"alnum_prop": 0.6857142857142857,
"repo_name": "jjuanda/mechanize",
"id": "646c69d07edb68c6e1706c625e22c4cc6f1d24d2",
"size": "3312",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4291"
},
{
"name": "JavaScript",
"bytes": "348"
},
{
"name": "Python",
"bytes": "1342076"
}
],
"symlink_target": ""
}
|
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
from google.cloud.compute_v1.services import region_operations
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-compute",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class RegionTargetHttpsProxiesTransport(abc.ABC):
"""Abstract transport class for RegionTargetHttpsProxies."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
self._extended_operations_services: Dict[str, Any] = {}
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.delete: gapic_v1.method.wrap_method(
self.delete,
default_timeout=None,
client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get,
default_timeout=None,
client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert,
default_timeout=None,
client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list,
default_timeout=None,
client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch,
default_timeout=None,
client_info=client_info,
),
self.set_ssl_certificates: gapic_v1.method.wrap_method(
self.set_ssl_certificates,
default_timeout=None,
client_info=client_info,
),
self.set_url_map: gapic_v1.method.wrap_method(
self.set_url_map,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteRegionTargetHttpsProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetRegionTargetHttpsProxyRequest],
Union[compute.TargetHttpsProxy, Awaitable[compute.TargetHttpsProxy]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertRegionTargetHttpsProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListRegionTargetHttpsProxiesRequest],
Union[compute.TargetHttpsProxyList, Awaitable[compute.TargetHttpsProxyList]],
]:
raise NotImplementedError()
@property
def patch(
self,
) -> Callable[
[compute.PatchRegionTargetHttpsProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_ssl_certificates(
self,
) -> Callable[
[compute.SetSslCertificatesRegionTargetHttpsProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_url_map(
self,
) -> Callable[
[compute.SetUrlMapRegionTargetHttpsProxyRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
@property
def _region_operations_client(self) -> region_operations.RegionOperationsClient:
ex_op_service = self._extended_operations_services.get("region_operations")
if not ex_op_service:
ex_op_service = region_operations.RegionOperationsClient(
credentials=self._credentials,
transport=self.kind,
)
self._extended_operations_services["region_operations"] = ex_op_service
return ex_op_service
__all__ = ("RegionTargetHttpsProxiesTransport",)
|
{
"content_hash": "0e9385e656cc488bb69bb96a35fb8660",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 101,
"avg_line_length": 35.46341463414634,
"alnum_prop": 0.6079779917469051,
"repo_name": "googleapis/python-compute",
"id": "5186d99df1cec62c9b1bab7c0d698087e1563b25",
"size": "9324",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/region_target_https_proxies/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
"""
Running or runtime configuration related to bgp peers/neighbors.
"""
from abc import abstractmethod
import logging
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.lib.packet.bgp import BGPOptParamCapabilityEnhancedRouteRefresh
from ryu.lib.packet.bgp import BGPOptParamCapabilityMultiprotocol
from ryu.lib.packet.bgp import BGPOptParamCapabilityRouteRefresh
from ryu.lib.packet.bgp import BGP_CAP_ENHANCED_ROUTE_REFRESH
from ryu.lib.packet.bgp import BGP_CAP_MULTIPROTOCOL
from ryu.lib.packet.bgp import BGP_CAP_ROUTE_REFRESH
from ryu.services.protocols.bgp.base import OrderedDict
from ryu.services.protocols.bgp.rtconf.base import ADVERTISE_PEER_AS
from ryu.services.protocols.bgp.rtconf.base import BaseConf
from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6
from ryu.services.protocols.bgp.rtconf.base import CAP_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_RTC
from ryu.services.protocols.bgp.rtconf.base import compute_optional_conf
from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
from ryu.services.protocols.bgp.rtconf.base import ConfWithId
from ryu.services.protocols.bgp.rtconf.base import ConfWithIdListener
from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
from ryu.services.protocols.bgp.rtconf.base import ConfWithStatsListener
from ryu.services.protocols.bgp.rtconf.base import HOLD_TIME
from ryu.services.protocols.bgp.rtconf.base import MAX_PREFIXES
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import RTC_AS
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.base import validate
from ryu.services.protocols.bgp.rtconf.base import validate_med
from ryu.services.protocols.bgp.rtconf.base import validate_soo_list
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
LOG = logging.getLogger('bgpspeaker.rtconf.neighbor')
# Various neighbor settings.
REMOTE_AS = 'remote_as'
IP_ADDRESS = 'ip_address'
ENABLED = 'enabled'
CHANGES = 'changes'
LOCAL_ADDRESS = 'local_address'
LOCAL_PORT = 'local_port'
# Default value constants.
DEFAULT_CAP_GR_NULL = True
DEFAULT_CAP_REFRESH = True
DEFAULT_CAP_ENHANCED_REFRESH = False
DEFAULT_CAP_MBGP_IPV4 = True
DEFAULT_CAP_MBGP_VPNV4 = False
DEFAULT_CAP_MBGP_VPNV6 = False
DEFAULT_HOLD_TIME = 40
DEFAULT_ENABLED = True
DEFAULT_CAP_RTC = False
# Default value for *MAX_PREFIXES* setting is set to 0.
DEFAULT_MAX_PREFIXES = 0
DEFAULT_ADVERTISE_PEER_AS = False
@validate(name=ENABLED)
def validate_enabled(enabled):
if not isinstance(enabled, bool):
raise ConfigValueError(desc='Enable property is not an instance of '
'boolean')
return enabled
@validate(name=CHANGES)
def validate_changes(changes):
for k, v in changes.iteritems():
if k not in (MULTI_EXIT_DISC, ENABLED):
raise ConfigValueError(desc="Unknown field to change: %s" % k)
if k == MULTI_EXIT_DISC:
validate_med(v)
elif k == ENABLED:
validate_enabled(v)
return changes
@validate(name=IP_ADDRESS)
def validate_ip_address(ip_address):
if not is_valid_ipv4(ip_address):
raise ConfigValueError(desc='Invalid neighbor ip_address: %s' %
ip_address)
return ip_address
@validate(name=LOCAL_ADDRESS)
def validate_local_address(ip_address):
if not is_valid_ipv4(ip_address):
raise ConfigValueError(desc='Invalid local ip_address: %s' %
ip_address)
return ip_address
@validate(name=LOCAL_PORT)
def validate_local_port(port):
if not isinstance(port, (int, long)):
raise ConfigTypeError(desc='Invalid local port: %s' % port)
if port < 1025 or port > 65535:
raise ConfigValueError(desc='Invalid local port value: %s, has to be'
' between 1025 and 65535' % port)
return port
@validate(name=REMOTE_AS)
def validate_remote_as(asn):
if not is_valid_old_asn(asn):
raise ConfigValueError(desc='Invalid remote as value %s' % asn)
return asn
class NeighborConf(ConfWithId, ConfWithStats):
"""Class that encapsulates one neighbors' configuration."""
UPDATE_ENABLED_EVT = 'update_enabled_evt'
UPDATE_MED_EVT = 'update_med_evt'
VALID_EVT = frozenset([UPDATE_ENABLED_EVT, UPDATE_MED_EVT])
REQUIRED_SETTINGS = frozenset([REMOTE_AS, IP_ADDRESS])
OPTIONAL_SETTINGS = frozenset([CAP_REFRESH,
CAP_ENHANCED_REFRESH, CAP_MBGP_VPNV4,
CAP_MBGP_IPV4, CAP_MBGP_VPNV6,
CAP_RTC, RTC_AS, HOLD_TIME,
ENABLED, MULTI_EXIT_DISC, MAX_PREFIXES,
ADVERTISE_PEER_AS, SITE_OF_ORIGINS,
LOCAL_ADDRESS, LOCAL_PORT])
def __init__(self, **kwargs):
super(NeighborConf, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
self._settings[CAP_REFRESH] = compute_optional_conf(
CAP_REFRESH, DEFAULT_CAP_REFRESH, **kwargs)
self._settings[CAP_ENHANCED_REFRESH] = compute_optional_conf(
CAP_ENHANCED_REFRESH, DEFAULT_CAP_ENHANCED_REFRESH, **kwargs)
self._settings[CAP_MBGP_IPV4] = compute_optional_conf(
CAP_MBGP_IPV4, DEFAULT_CAP_MBGP_IPV4, **kwargs)
self._settings[CAP_MBGP_VPNV4] = compute_optional_conf(
CAP_MBGP_VPNV4, DEFAULT_CAP_MBGP_VPNV4, **kwargs)
self._settings[CAP_MBGP_VPNV6] = compute_optional_conf(
CAP_MBGP_VPNV6, DEFAULT_CAP_MBGP_VPNV6, **kwargs)
self._settings[HOLD_TIME] = compute_optional_conf(
HOLD_TIME, DEFAULT_HOLD_TIME, **kwargs)
self._settings[ENABLED] = compute_optional_conf(
ENABLED, DEFAULT_ENABLED, **kwargs)
self._settings[MAX_PREFIXES] = compute_optional_conf(
MAX_PREFIXES, DEFAULT_MAX_PREFIXES, **kwargs)
self._settings[ADVERTISE_PEER_AS] = compute_optional_conf(
ADVERTISE_PEER_AS, DEFAULT_ADVERTISE_PEER_AS, **kwargs)
# We do not have valid default MED value.
# If no MED attribute is provided then we do not have to use MED.
# If MED attribute is provided we have to validate it and use it.
med = kwargs.pop(MULTI_EXIT_DISC, None)
if med and validate_med(med):
self._settings[MULTI_EXIT_DISC] = med
# We do not have valid default SOO value.
# If no SOO attribute is provided then we do not have to use SOO.
# If SOO attribute is provided we have to validate it and use it.
soos = kwargs.pop(SITE_OF_ORIGINS, None)
if soos and validate_soo_list(soos):
self._settings[SITE_OF_ORIGINS] = soos
# We do not have valid default LOCAL_ADDRESS and LOCAL_PORT value.
# If no LOCAL_ADDRESS/PORT is provided then we will bind to system
# default.
self._settings[LOCAL_ADDRESS] = compute_optional_conf(
LOCAL_ADDRESS, None, **kwargs)
self._settings[LOCAL_PORT] = compute_optional_conf(
LOCAL_PORT, None, **kwargs)
# RTC configurations.
self._settings[CAP_RTC] = \
compute_optional_conf(CAP_RTC, DEFAULT_CAP_RTC, **kwargs)
# Default RTC_AS is local (router) AS.
from ryu.services.protocols.bgp.core_manager import \
CORE_MANAGER
default_rt_as = CORE_MANAGER.common_conf.local_as
self._settings[RTC_AS] = \
compute_optional_conf(RTC_AS, default_rt_as, **kwargs)
# Since ConfWithId' default values use str(self) and repr(self), we
# call super method after we have initialized other settings.
super(NeighborConf, self)._init_opt_settings(**kwargs)
@classmethod
def get_opt_settings(cls):
self_confs = super(NeighborConf, cls).get_opt_settings()
self_confs.update(NeighborConf.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(cls):
self_confs = super(NeighborConf, cls).get_req_settings()
self_confs.update(NeighborConf.REQUIRED_SETTINGS)
return self_confs
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(NeighborConf, cls).get_valid_evts()
self_valid_evts.update(NeighborConf.VALID_EVT)
return self_valid_evts
# =========================================================================
# Required attributes
# =========================================================================
@property
def remote_as(self):
return self._settings[REMOTE_AS]
@property
def ip_address(self):
return self._settings[IP_ADDRESS]
@property
def host_bind_ip(self):
return self._settings[LOCAL_ADDRESS]
@property
def host_bind_port(self):
return self._settings[LOCAL_PORT]
# =========================================================================
# Optional attributes with valid defaults.
# =========================================================================
@property
def hold_time(self):
return self._settings[HOLD_TIME]
@property
def cap_refresh(self):
return self._settings[CAP_REFRESH]
@property
def cap_enhanced_refresh(self):
return self._settings[CAP_ENHANCED_REFRESH]
@property
def cap_mbgp_ipv4(self):
return self._settings[CAP_MBGP_IPV4]
@property
def cap_mbgp_vpnv4(self):
return self._settings[CAP_MBGP_VPNV4]
@property
def cap_mbgp_vpnv6(self):
return self._settings[CAP_MBGP_VPNV6]
@property
def cap_rtc(self):
return self._settings[CAP_RTC]
@property
def enabled(self):
return self._settings[ENABLED]
@enabled.setter
def enabled(self, enable):
# Update enabled flag and notify listeners.
if self._settings[ENABLED] != enable:
self._settings[ENABLED] = enable
self._notify_listeners(NeighborConf.UPDATE_ENABLED_EVT,
enable)
# =========================================================================
# Optional attributes with no valid defaults.
# =========================================================================
@property
def multi_exit_disc(self):
# This property does not have any valid default. Hence if not set we
# return None.
return self._settings.get(MULTI_EXIT_DISC)
@multi_exit_disc.setter
def multi_exit_disc(self, value):
if self._settings.get(MULTI_EXIT_DISC) != value:
self._settings[MULTI_EXIT_DISC] = value
self._notify_listeners(NeighborConf.UPDATE_MED_EVT, value)
@property
def soo_list(self):
soos = self._settings.get(SITE_OF_ORIGINS)
if soos:
soos = list(soos)
else:
soos = []
return soos
@property
def rtc_as(self):
return self._settings[RTC_AS]
def exceeds_max_prefix_allowed(self, prefix_count):
allowed_max = self._settings[MAX_PREFIXES]
does_exceed = False
# Check if allowed max. is unlimited.
if allowed_max != 0:
# If max. prefix is limited, check if given exceeds this limit.
if prefix_count > allowed_max:
does_exceed = True
return does_exceed
def get_configured_capabilites(self):
"""Returns configured capabilities."""
capabilities = OrderedDict()
mbgp_caps = []
if self.cap_mbgp_ipv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_UC.afi, RF_IPv4_UC.safi))
if self.cap_mbgp_vpnv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_VPN.afi, RF_IPv4_VPN.safi))
if self.cap_mbgp_vpnv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_VPN.afi, RF_IPv6_VPN.safi))
if self.cap_rtc:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_RTC_UC.afi, RF_RTC_UC.safi))
if mbgp_caps:
capabilities[BGP_CAP_MULTIPROTOCOL] = mbgp_caps
if self.cap_refresh:
capabilities[BGP_CAP_ROUTE_REFRESH] = [
BGPOptParamCapabilityRouteRefresh()]
if self.cap_enhanced_refresh:
capabilities[BGP_CAP_ENHANCED_ROUTE_REFRESH] = [
BGPOptParamCapabilityEnhancedRouteRefresh()]
return capabilities
def __repr__(self):
return '<%s(%r, %r, %r)>' % (self.__class__.__name__,
self.remote_as,
self.ip_address,
self.enabled)
def __str__(self):
return 'Neighbor: %s' % (self.ip_address)
class NeighborsConf(BaseConf):
"""Container of all neighbor configurations."""
ADD_NEIGH_CONF_EVT = 'add_neigh_conf_evt'
REMOVE_NEIGH_CONF_EVT = 'remove_neigh_conf_evt'
VALID_EVT = frozenset([ADD_NEIGH_CONF_EVT, REMOVE_NEIGH_CONF_EVT])
def __init__(self):
super(NeighborsConf, self).__init__()
self._neighbors = {}
def _init_opt_settings(self, **kwargs):
pass
def update(self, **kwargs):
raise NotImplementedError('Use either add/remove_neighbor_conf'
' methods instead.')
@property
def rtc_as_set(self):
"""Returns current RTC AS configured for current neighbors.
"""
rtc_as_set = set()
for neigh in self._neighbors.itervalues():
rtc_as_set.add(neigh.rtc_as)
return rtc_as_set
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(NeighborsConf, cls).get_valid_evts()
self_valid_evts.update(NeighborsConf.VALID_EVT)
return self_valid_evts
def add_neighbor_conf(self, neigh_conf):
# Check if we already know this neighbor
if neigh_conf.ip_address in self._neighbors.keys():
message = 'Neighbor with given ip address already exists'
raise RuntimeConfigError(desc=message)
# Add this neighbor to known configured neighbors and generate update
# event
self._neighbors[neigh_conf.ip_address] = neigh_conf
self._notify_listeners(NeighborsConf.ADD_NEIGH_CONF_EVT, neigh_conf)
def remove_neighbor_conf(self, neigh_ip_address):
neigh_conf = self._neighbors.pop(neigh_ip_address, None)
if not neigh_conf:
raise RuntimeConfigError(desc='Tried to remove a neighbor that '
'does not exists')
else:
self._notify_listeners(NeighborsConf.REMOVE_NEIGH_CONF_EVT,
neigh_conf)
return neigh_conf
def get_neighbor_conf(self, neigh_ip_address):
return self._neighbors.get(neigh_ip_address, None)
def __repr__(self):
return '<%s(%r)>' % (self.__class__.__name__, self._neighbors)
def __str__(self):
return '\'Neighbors\': %s' % self._neighbors
@property
def settings(self):
return [neighbor.settings for _, neighbor in
self._neighbors.iteritems()]
class NeighborConfListener(ConfWithIdListener, ConfWithStatsListener):
"""Base listener for change events to a specific neighbors' configurations.
"""
def __init__(self, neigh_conf):
super(NeighborConfListener, self).__init__(neigh_conf)
neigh_conf.add_listener(NeighborConf.UPDATE_ENABLED_EVT,
self.on_update_enabled)
neigh_conf.add_listener(NeighborConf.UPDATE_MED_EVT,
self.on_update_med)
@abstractmethod
def on_update_enabled(self, evt):
raise NotImplementedError('This method should be overridden.')
def on_update_med(self, evt):
raise NotImplementedError('This method should be overridden.')
class NeighborsConfListener(BaseConfListener):
"""Base listener for change events to neighbor configuration container."""
def __init__(self, neighbors_conf):
super(NeighborsConfListener, self).__init__(neighbors_conf)
neighbors_conf.add_listener(NeighborsConf.ADD_NEIGH_CONF_EVT,
self.on_add_neighbor_conf)
neighbors_conf.add_listener(NeighborsConf.REMOVE_NEIGH_CONF_EVT,
self.on_remove_neighbor_conf)
@abstractmethod
def on_add_neighbor_conf(self, evt):
raise NotImplementedError('This method should be overridden.')
@abstractmethod
def on_remove_neighbor_conf(self, evt):
raise NotImplementedError('This method should be overridden.')
|
{
"content_hash": "59b628678ad37389d625ae2a9034b556",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 79,
"avg_line_length": 36.9538784067086,
"alnum_prop": 0.6280705735519374,
"repo_name": "o3project/ryu-oe",
"id": "8acd62ed918d19ac5d617e429fffea3540707071",
"size": "18240",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/services/protocols/bgp/rtconf/neighbors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "870216"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "4413203"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
import logging
import logging.handlers
import os
import glob
import sys
from datetime import datetime
import time
import smdpipeline
start_time = time.time()
REPO_PATH = './'
sys.path.append(REPO_PATH)
output_folder = './output'
input_folder = './input'
logs_folder = './logs'
if not os.path.exists(logs_folder):
os.makedirs(logs_folder)
logger = logging.getLogger(__name__)
fh = logging.handlers.RotatingFileHandler("".join([logs_folder,
'/',
datetime.strftime(
datetime.today(), "%m%d%Y_%H%M"),
'debug.log']
),
maxBytes=1000000,
backupCount=10)
fh.setLevel(logging.DEBUG)
fh2 = logging.handlers.RotatingFileHandler("".join([logs_folder,
'/',
datetime.strftime(
datetime.today(), "%m%d%Y_%H%M"),
'info.log']
),
maxBytes=1000000,
backupCount=10)
fh2.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(1)
fh.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
fh2.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
ch.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(fh)
root.addHandler(fh2)
root.addHandler(ch)
logger.info("Program started")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for txt_file in glob.iglob(os.path.join(input_folder, '*.txt')):
smdpipeline.survey_pipeline(txt_file, output_folder)
for subfolder in os.listdir(input_folder):
input_subfolder_path = os.path.join(input_folder, subfolder)
output_subfolder_path = os.path.join(output_folder, subfolder)
for txt_file in glob.iglob(os.path.join(input_subfolder_path, '*.txt')):
if not os.path.exists(output_subfolder_path):
os.makedirs(output_subfolder_path)
smdpipeline.survey_pipeline(txt_file, output_subfolder_path)
execution_time = int((time.time() - start_time) * 1000)
logger.info("Done! Execution time: {} milliseconds" .format(execution_time))
|
{
"content_hash": "b046148179b56ec8a2716a660c9bb65e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 89,
"avg_line_length": 35.526315789473685,
"alnum_prop": 0.5311111111111111,
"repo_name": "Bloodrammer/surveygen_smd",
"id": "5bbe632e8ca46d181d7b0d377139bae2f4a1c45b",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17340"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from .worker import function_test
function_test.delay(0, tuples=(1, 2, 3), lists=[1, 2, 3], dicts={'mess': None, True: 'clearly'})
function_test.delay(0, tuples=(1, True, {'mess': None}),
lists=[(1, 'rogério'), datetime.now()],
sets={1.1, False},
mixed={('any', 123): ['nice', 'huh?'], ('other', True): frozenset('abc')})
function_test.delay(50, something_wrong=True, smell={'all'}, can_handle=[])
|
{
"content_hash": "b9487c21cd19b491ad4913f36f9e6c8b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 96,
"avg_line_length": 49,
"alnum_prop": 0.5673469387755102,
"repo_name": "rsalmei/clearly",
"id": "71d069ff637dc03571fafc7961490b0da46703a4",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/examples/user_code/publisher_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "547"
},
{
"name": "Makefile",
"bytes": "2987"
},
{
"name": "Python",
"bytes": "112536"
}
],
"symlink_target": ""
}
|
import curses
from ...util.Configuration import Configuration
from ..uicomponents.BaseWindow import BaseWindow
from ..uicomponents.MetaButtonBox import MetaButtonBox
from ...util.funcutils import sfill
class ApplicationMenu(BaseWindow):
"""
The menu for login/logout, user account, quit application
"""
ITEM1 = 'LOGINOUT'
ITEM2 = 'CUACCOUNT'
ITEM3 = 'DUACCOUNT'
ITEM4 = 'LOCK'
ITEM5 = 'QUIT'
def __init__(self, parent, y, x, connected):
"""Create the menu"""
# Create the window
BaseWindow.__init__(self, parent, 10, 19 + 5, y, x, menu=True, modal=True)
self.window.attrset(Configuration.colourD)
self.window.border()
self.window.hline(2, 1, curses.ACS_BULLET, 19 + 3)
self.window.hline(5, 1, curses.ACS_BULLET, 19 + 3)
self.window.hline(7, 1, curses.ACS_BULLET, 19 + 3)
self.window.refresh()
self.window.attrset(0)
# Login/logout button
if connected:
name = 'Logout' + sfill(19 - 6, ' ')
else:
name = 'Login' + sfill(19 - 5, ' ')
self.items.append(MetaButtonBox(self, 1, 1, name, shortcut='L',
data=self.ITEM1,
colour=Configuration.colourB))
# Create user account button
self.items.append(MetaButtonBox(self, 3, 1, 'Create user account',
shortcut='n', data=self.ITEM2,
colour=Configuration.colourB))
# Delete user account button
self.items.append(MetaButtonBox(self, 4, 1, 'Delete user account',
shortcut='e', data=self.ITEM3,
colour=Configuration.colourB))
# Lock screen
name = 'Lock screen' + sfill(19 - 11, ' ')
self.items.append(MetaButtonBox(self, 6, 1, name, shortcut='k',
data=self.ITEM4,
colour=Configuration.colourB))
# Quit button
name = 'Quit' + sfill(19 - 4, ' ')
self.items.append(MetaButtonBox(self, 8, 1, name, shortcut='u',
data=self.ITEM5,
colour=Configuration.colourB))
# Ordered list of shortcut keys
self.shortcuts = ['L', 'n', 'e', 'k', 'u']
def start(self, timeout=-1):
"""See mother class"""
while True:
# Interaction loop
result = BaseWindow.start(self)
# Escape
if result is False or type(result) is int:
self.close()
return False
# Return the number
else:
self.close()
return self.items[self.index].get_data()
|
{
"content_hash": "3763f1629e4d8ca53de6d79669a79890",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 35.50617283950617,
"alnum_prop": 0.5125173852573018,
"repo_name": "thethythy/Mnemopwd",
"id": "774f6b5cb42c90e1c8a7f6883d20d2a31ab45ed3",
"size": "4278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnemopwd/client/uilayer/uiapplication/ApplicationMenu.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "580678"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.