commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
350bb01c653ab640e586531bba6e4527f4feb622
|
Fix the GzippedWhisperReader
|
graphite_api/finders/whisper.py
|
graphite_api/finders/whisper.py
|
from __future__ import absolute_import
import gzip
import os.path
import time
from structlog import get_logger
from ..carbonlink import CarbonLinkPool
from ..intervals import Interval, IntervalSet
from ..node import BranchNode, LeafNode
from .._vendor import whisper
from . import fs_to_metric, get_real_metric_path, match_entries
logger = get_logger()
class WhisperFinder(object):
def __init__(self, config):
self.directories = config['whisper']['directories']
self.carbonlink = None
if 'carbon' in config:
self.carbonlink = CarbonLinkPool(**config['carbon'])
else:
self.carbonlink = None
def find_nodes(self, query):
logger.debug("find_nodes", finder="whisper", start=query.startTime,
end=query.endTime, pattern=query.pattern)
clean_pattern = query.pattern.replace('\\', '')
pattern_parts = clean_pattern.split('.')
for root_dir in self.directories:
if not os.path.isdir(root_dir):
os.makedirs(root_dir)
for absolute_path in self._find_paths(root_dir, pattern_parts):
if os.path.basename(absolute_path).startswith('.'):
continue
relative_path = absolute_path[len(root_dir):].lstrip(os.sep)
metric_path = fs_to_metric(relative_path)
real_metric_path = get_real_metric_path(absolute_path,
metric_path)
metric_path_parts = metric_path.split('.')
for field_index in find_escaped_pattern_fields(query.pattern):
metric_path_parts[field_index] = pattern_parts[
field_index].replace('\\', '')
metric_path = '.'.join(metric_path_parts)
# Now we construct and yield an appropriate Node object
if os.path.isdir(absolute_path):
yield BranchNode(metric_path)
elif os.path.isfile(absolute_path):
if absolute_path.endswith('.wsp'):
reader = WhisperReader(absolute_path, real_metric_path,
self.carbonlink)
yield LeafNode(metric_path, reader)
elif absolute_path.endswith('.wsp.gz'):
reader = GzippedWhisperReader(absolute_path,
real_metric_path,
self.carbonlink)
yield LeafNode(metric_path, reader)
def _find_paths(self, current_dir, patterns):
"""Recursively generates absolute paths whose components
underneath current_dir match the corresponding pattern in
patterns"""
pattern = patterns[0]
patterns = patterns[1:]
entries = os.listdir(current_dir)
subdirs = [e for e in entries
if os.path.isdir(os.path.join(current_dir, e))]
matching_subdirs = match_entries(subdirs, pattern)
if patterns: # we've still got more directories to traverse
for subdir in matching_subdirs:
absolute_path = os.path.join(current_dir, subdir)
for match in self._find_paths(absolute_path, patterns):
yield match
else: # we've got the last pattern
files = [e for e in entries
if os.path.isfile(os.path.join(current_dir, e))]
matching_files = match_entries(files, pattern + '.*')
for _basename in matching_files + matching_subdirs:
yield os.path.join(current_dir, _basename)
class WhisperReader(object):
__slots__ = ('fs_path', 'real_metric_path', 'carbonlink')
def __init__(self, fs_path, real_metric_path, carbonlink=None):
self.fs_path = fs_path
self.real_metric_path = real_metric_path
self.carbonlink = carbonlink
def get_intervals(self):
start = time.time() - whisper.info(self.fs_path)['maxRetention']
end = max(os.stat(self.fs_path).st_mtime, start)
return IntervalSet([Interval(start, end)])
def fetch(self, startTime, endTime): # noqa
logger.debug("fetch", reader="whisper", path=self.fs_path,
metric_path=self.real_metric_path,
start=startTime, end=endTime)
data = whisper.fetch(self.fs_path, startTime, endTime)
if not data:
return None
time_info, values = data
start, end, step = time_info
if self.carbonlink:
cached_datapoints = self.carbonlink.query(self.real_metric_path)
if isinstance(cached_datapoints, dict):
cached_datapoints = cached_datapoints.items()
for timestamp, value in sorted(cached_datapoints):
interval = timestamp - (timestamp % step)
i = int(interval - start) // step
values[i] = value
return time_info, values
class GzippedWhisperReader(WhisperReader):
def get_intervals(self):
fh = gzip.GzipFile(self.fs_path, 'rb')
try:
info = whisper.__readHeader(fh) # evil, but necessary.
finally:
fh.close()
start = time.time() - info['maxRetention']
end = max(os.stat(self.fs_path).st_mtime, start)
return IntervalSet([Interval(start, end)])
def fetch(self, startTime, endTime):
logger.debug("fetch", reader="gzip_whisper", path=self.fs_path,
metric_path=self.real_metric_path,
start=startTime, end=endTime)
fh = gzip.GzipFile(self.fs_path, 'rb')
try:
return whisper.file_fetch(fh, startTime, endTime)
finally:
fh.close()
def find_escaped_pattern_fields(pattern_string):
pattern_parts = pattern_string.split('.')
for index, part in enumerate(pattern_parts):
if is_escaped_pattern(part):
yield index
def is_escaped_pattern(s):
for symbol in '*?[{':
i = s.find(symbol)
if i > 0:
if s[i-1] == '\\':
return True
return False
|
Python
| 0
|
@@ -5245,24 +5245,34 @@
info =
+getattr(
whisper
-.
+, '
__readHe
@@ -5275,16 +5275,18 @@
adHeader
+')
(fh) #
|
ca7b3b5e91c881c9b435b6391da16177427865bb
|
Remove deprecated unused option in seed job script (#22223)
|
.test-infra/jenkins/committers_list_generator/main.py
|
.test-infra/jenkins/committers_list_generator/main.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ldap
import logging
import jinja2
import os
import argparse
import sys
class CommittersGeneratorException(Exception):
pass
class _ApacheLDAPException(CommittersGeneratorException):
pass
_FILENAME = "Committers.groovy"
_PEOPLE_DN = "ou=people,dc=apache,dc=org"
_BEAM_DN = "cn=beam,ou=project,ou=groups,dc=apache,dc=org"
_GITHUB_USERNAME_ATTR = "githubUsername"
_DEFAULT_LDAP_URIS = "ldaps://ldap-us-ro.apache.org:636 ldaps://ldap-eu-ro.apache.org:636"
_DEFAULT_CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cert.pem")
def generate_groovy(output_dir, ldap_uris, cert_path):
logging.info(f"Generating {_FILENAME}")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
),
)
template = env.get_template(f"{_FILENAME}.template")
with open(os.path.join(output_dir, _FILENAME), "w") as file:
file.write(
template.render(
github_usernames=get_committers_github_usernames(
ldap_uris=ldap_uris,
cert_path=cert_path,
),
)
)
logging.info(f"{_FILENAME} saved into {output_dir}")
def get_committers_github_usernames(ldap_uris, cert_path):
connection = None
try:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_path)
ldap.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)
ldap.set_option(ldap.OPT_X_TLS_DEMAND, True)
ldap.set_option(ldap.OPT_REFERRALS, 0)
connection = ldap.initialize(ldap_uris)
people = connection.search_s(
_PEOPLE_DN,
ldap.SCOPE_ONELEVEL,
attrlist=[_GITHUB_USERNAME_ATTR],
)
if not people:
raise _ApacheLDAPException(f"LDAP server returned no people: {repr(people)}")
github_usernames = {
person_dn: data.get(_GITHUB_USERNAME_ATTR, [])
for person_dn, data in people
}
committers = connection.search_s(
_BEAM_DN,
ldap.SCOPE_BASE,
attrlist=["member"],
)
if not committers or "member" not in committers[0][1]:
raise _ApacheLDAPException(f"LDAP server returned no committers: {repr(committers)}")
committers_github_usernames = [
github_username.decode()
for committer_dn in committers[0][1]["member"]
for github_username in github_usernames[committer_dn.decode()]
]
logging.info(f"{len(committers_github_usernames)} committers' GitHub usernames fetched correctly")
return committers_github_usernames
except (ldap.LDAPError, _ApacheLDAPException) as e:
raise CommittersGeneratorException("Could not fetch LDAP data") from e
finally:
if connection is not None:
connection.unbind_s()
def _parse_args():
parser = argparse.ArgumentParser(
description="Generates groovy file containing beam committers' usernames."
)
parser.add_argument(
"-o", "--output-dir",
help="Path to the directory where the output groovy file will be saved",
metavar="DIR",
default=os.getcwd(),
)
parser.add_argument(
"-c", "--cert-path",
help="Path to the file containing SSL certificate of the LDAP server",
metavar="FILE",
default=_DEFAULT_CERT_PATH,
)
parser.add_argument(
"-u", "--ldap_uris",
help="Whitespace separated list of LDAP servers URIs",
default=_DEFAULT_LDAP_URIS,
)
return parser.parse_args()
if __name__ == "__main__":
try:
logging.getLogger().setLevel(logging.INFO)
args = _parse_args()
generate_groovy(args.output_dir, args.ldap_uris, args.cert_path)
except CommittersGeneratorException as e:
logging.exception("Couldn't generate the list of committers")
sys.exit(1)
|
Python
| 0
|
@@ -2189,71 +2189,8 @@
th)%0A
- ldap.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)%0A
|
480e55794c5f06129b8b2fb7ed02a787f70275e2
|
add --silent option to update-toplist
|
mygpo/directory/management/commands/update-toplist.py
|
mygpo/directory/management/commands/update-toplist.py
|
from datetime import datetime
from django.core.management.base import BaseCommand
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
def handle(self, *args, **options):
# couchdbkit doesn't preserve microseconds
started = datetime.utcnow().replace(microsecond=0)
podcasts = Podcast.all_podcasts()
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, podcast in enumerate(podcasts):
subscriber_count = self.get_subscriber_count(podcast.get_id())
self.update(podcast=podcast, started=started, subscriber_count=subscriber_count)
progress(n, total)
@repeat_on_conflict(['podcast'])
def update(self, podcast, started, subscriber_count):
# We've already updated this podcast
if started in [e.timestamp for e in podcast.subscribers]:
return
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp)
podcast.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
reduce = True,
group = True,
group_level = 2,
)
return x.count()
|
Python
| 0
|
@@ -22,16 +22,49 @@
datetime
+%0Afrom optparse import make_option
%0A%0Afrom d
@@ -320,24 +320,199 @@
eCommand):%0A%0A
+ option_list = BaseCommand.option_list + (%0A make_option('--silent', action='store_true', dest='silent',%0A default=False, help=%22Don't show any output%22),%0A )%0A%0A
def hand
@@ -536,24 +536,64 @@
*options):%0A%0A
+ silent = options.get('silent')%0A%0A
# co
@@ -1024,16 +1024,48 @@
_count)%0A
+%0A if not silent:%0A
|
006e6b67af6cfb2cca214666ac48dc9fd2cc0339
|
Update test values
|
scopus/tests/test_CitationOverview.py
|
scopus/tests/test_CitationOverview.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `CitationOverview` module."""
from collections import namedtuple
from nose.tools import assert_equal, assert_true
import scopus
co = scopus.CitationOverview("2-s2.0-84930616647", refresh=True,
start=2015, end=2017)
def test_authors():
Author = namedtuple('Author', 'name surname initials id url')
url = 'https://api.elsevier.com/content/author/author_id/7004212771'
expected = [Author(name='Kitchin J.R.', surname='Kitchin',
initials='J.R.', id='7004212771',
url=url)]
assert_equal(co.authors, expected)
def test_cc():
assert_equal(co.cc, [(2015, '0'), (2016, '4'), (2017, '2')])
def test_citationType_long():
assert_equal(co.citationType_long, 'Review')
def test_citationType_short():
assert_equal(co.citationType_short, 're')
def test_doi():
assert_equal(co.doi, '10.1021/acscatal.5b00538')
def test_endingPage():
assert_equal(co.endingPage, '3899')
def test_h_index():
assert_equal(co.h_index, '1')
def test_issn():
assert_equal(co.issn, '2155-5435')
def test_issueIdentifier():
assert_equal(co.issueIdentifier, '6')
def test_lcc():
assert_equal(co.lcc, '0')
def test_pcc():
assert_equal(co.pcc, '0')
def test_pii():
assert_equal(co.pii, None)
def test_publicationName():
assert_equal(co.publicationName, 'ACS Catalysis')
def test_rangeCount():
assert_equal(co.rangeCount, '6')
def test_rowTotal():
assert_equal(co.rowTotal, '6')
def test_scopus_id():
assert_equal(co.scopus_id, '84930616647')
def test_startingPage():
assert_equal(co.startingPage, '3894')
def test_title():
expected = 'Examples of effective data sharing in scientific publishing'
assert_equal(co.title, expected)
def test_url():
expected = 'https://api.elsevier.com/content/abstract/scopus_id/84930616647'
assert_equal(co.url, expected)
def test_volume():
assert_equal(co.volume, '5')
|
Python
| 0.000001
|
@@ -298,17 +298,17 @@
end=201
-7
+8
)%0A%0A%0Adef
@@ -719,16 +719,29 @@
17, '2')
+, (2018, '1')
%5D)%0A%0A%0Adef
@@ -1504,17 +1504,17 @@
Count, '
-6
+7
')%0A%0A%0Adef
@@ -1562,17 +1562,17 @@
Total, '
-6
+7
')%0A%0A%0Adef
|
ca8bbd03f57bf6e15abd406533dc7088d449e9ab
|
add published date to properties
|
scrapi/consumers/figshare/consumer.py
|
scrapi/consumers/figshare/consumer.py
|
"""
Figshare harvester of public projects for the SHARE Notification Service
Example API query: http://api.figshare.com/v1/articles/search?search_for=*&from_date=2015-2-1&end_date=2015-2-1
"""
from __future__ import unicode_literals
import time
import json
import logging
from dateutil.parser import parse
from datetime import date, timedelta
import requests
from nameparser import HumanName
from scrapi.linter import lint
from scrapi.linter.document import RawDocument, NormalizedDocument
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
NAME = 'figshare'
URL = 'http://api.figshare.com/v1/articles/search?search_for=*&from_date='
def consume(days_back=0):
start_date = date.today() - timedelta(days_back) - timedelta(1)
end_date = date.today() - timedelta(1)
search_url = '{0}{1}-{2}-{3}&end_date={4}-{5}-{6}'.format(URL,
start_date.year,
start_date.month,
start_date.day,
end_date.year,
end_date.month,
end_date.day)
records = get_records(search_url)
record_list = []
for record in records:
doc_id = record['article_id']
record_list.append(
RawDocument(
{
'doc': json.dumps(record),
'source': NAME,
'docID': unicode(doc_id),
'filetype': 'json'
}
)
)
return record_list
def get_records(search_url):
records = requests.get(search_url)
total_records = records.json()['items_found']
page = 1
all_records = []
while len(all_records) < total_records:
logger.info('Requesting records for url: {}&page={}'.format(search_url, str(page)))
record_list = records.json()['items']
for record in record_list:
if len(all_records) < total_records:
all_records.append(record)
page += 1
records = requests.get(search_url + '&page={}'.format(str(page)))
time.sleep(3)
return all_records
def get_contributors(record):
authors = record['authors']
contributor_list = []
for person in authors:
name = HumanName(person['author_name'])
contributor = {
'prefix': name.title,
'given': name.first,
'middle': name.middle,
'family': name.last,
'suffix': name.suffix,
'email': '',
'ORCID': '',
}
contributor_list.append(contributor)
return contributor_list
def get_ids(record):
# Right now, only take the last DOI - others in properties
doi = record['DOI']
try:
doi = doi.replace('http://dx.doi.org/', '')
except AttributeError:
for item in doi:
item.replace('http://dx.doi.org/', '')
doi = item
return {
'serviceID': unicode(record['article_id']),
'url': record['url'],
'doi': doi
}
def get_properties(record):
return {
'article_id': record['article_id'],
'defined_type': record['defined_type'],
'type': record['type'],
'links': record['links'],
'doi': record['DOI']
}
def normalize(raw_doc):
doc = raw_doc.get('doc')
record = json.loads(doc)
normalized_dict = {
'title': record['title'],
'contributors': get_contributors(record),
'properties': get_properties(record),
'description': record['description'],
'tags': [],
'id': get_ids(record),
'source': NAME,
'dateUpdated': unicode(parse(record['modified_date']).isoformat())
}
return NormalizedDocument(normalized_dict)
if __name__ == '__main__':
print(lint(consume, normalize))
|
Python
| 0
|
@@ -3510,24 +3510,75 @@
ecord%5B'DOI'%5D
+,%0A 'publishedDate': record%5B'published_date'%5D
%0A %7D%0A%0A%0Adef
|
4464b72eac2cc995a3276341f066bee30497d621
|
Bump version to 1.1.0 for release
|
globus_sdk/version.py
|
globus_sdk/version.py
|
# single source of truth for package version,
# see https://packaging.python.org/en/latest/single_source_version/
__version__ = "1.0.0"
|
Python
| 0
|
@@ -124,13 +124,13 @@
__ = %221.
-0
+1
.0%22%0A
|
bb9d1255548b46dc2ba7a85e26606b7dd4c926f3
|
Update original "Hello, World!" parser to latest coding, plus runTests
|
examples/greeting.py
|
examples/greeting.py
|
# greeting.py
#
# Demonstration of the pyparsing module, on the prototypical "Hello, World!"
# example
#
# Copyright 2003, by Paul McGuire
#
from pyparsing import Word, alphas
# define grammar
greet = Word( alphas ) + "," + Word( alphas ) + "!"
# input string
hello = "Hello, World!"
# parse input string
print(hello, "->", greet.parseString( hello ))
|
Python
| 0.000077
|
@@ -6,20 +6,18 @@
eting.py
-%0D
%0A#
-%0D
%0A# Demon
@@ -85,17 +85,16 @@
World!%22
-%0D
%0A# examp
@@ -95,20 +95,18 @@
example
-%0D
%0A#
-%0D
%0A# Copyr
@@ -115,16 +115,21 @@
ht 2003,
+ 2019
by Paul
@@ -140,17 +140,17 @@
uire
-%0D
%0A#
-%0D%0Afrom
+%0Aimport
pyp
@@ -160,30 +160,14 @@
ing
-import Word, alphas%0D%0A%0D
+as pp%0A
%0A# d
@@ -179,17 +179,16 @@
grammar
-%0D
%0Agreet =
@@ -188,29 +188,33 @@
greet =
+pp.
Word(
-
+pp.
alphas
-
) + %22,%22
@@ -219,31 +219,47 @@
%22 +
+pp.
Word(
-
+pp.
alphas
-
) +
-%22!%22%0D%0A%0D
+pp.oneOf(%22! ? .%22)%0A
%0A# i
@@ -269,17 +269,16 @@
t string
-%0D
%0Ahello =
@@ -297,11 +297,9 @@
ld!%22
-%0D%0A%0D
+%0A
%0A# p
@@ -315,17 +315,16 @@
t string
-%0D
%0Aprint(h
@@ -366,6 +366,145 @@
o ))
-%0D
%0A
+%0A# parse a bunch of input strings%0Agreet.runTests(%22%22%22%5C%0A Hello, World!%0A Ahoy, Matey!%0A Howdy, Pardner!%0A Morning, Neighbor!%0A %22%22%22)
|
04d0bb1bf71ee3a17efbb4bb15bb808cc832f04b
|
Update examples.py
|
examples/examples.py
|
examples/examples.py
|
from py_fuzz import *
print random_language(language="russian")
print random_ascii(
seed="this is a test", randomization="byte_jitter",
mutation_rate=0.25
)
print random_regex(
length=20, regex="[a-zA-Z]"
)
print random_utf8(
min_length=10,
max_length=50
)
print random_bytes()
print random_utf8()
print random_regex(regex="[a-zA-Z]")
with open("test.png", "wb") as dump:
dump.write(random_image())
with open("fake.png", 'wb') as dump:
dump.write(random_image(randomization="byte_jitter", height=300, width=500, mutation_rate=0))
with open("randomLenna.png", "wb") as dump:
dump.write("")
random_valid_image(seed="Lenna.png", mutation_rate=0.1)
|
Python
| 0
|
@@ -5,16 +5,26 @@
py_fuzz
+.generator
import
|
bc6c3834cd8383f7e1f9e109f0413bb6015a92bf
|
Remove unneeded datetime from view
|
go/scheduler/views.py
|
go/scheduler/views.py
|
import datetime
from django.views.generic import ListView
from go.scheduler.models import Task
class SchedulerListView(ListView):
paginate_by = 12
context_object_name = 'tasks'
template = 'scheduler/task_list.html'
def get_queryset(self):
now = datetime.datetime.utcnow()
return Task.objects.filter(
account_id=self.request.user_api.user_account_key
).order_by('-scheduled_for')
|
Python
| 0
|
@@ -1,20 +1,4 @@
-import datetime%0A
from
@@ -240,49 +240,8 @@
f):%0A
- now = datetime.datetime.utcnow()%0A
|
654034d3a0c6ec4e023af6118d6e628336bc39dd
|
Upgrade to Python 3
|
rpt2csv.py
|
rpt2csv.py
|
import sys
import csv
def convert(inputFile,outputFile):
"""
Convert a RPT file to a properly escaped CSV file
RPT files are usually sourced from old versions of Microsoft SQL Server Management Studio
RPT files are fixed width with column names on the first line, a second line with dashes and spaces,
and then on one row per record.
The column widths are calculated from the longest field in a column, so the format varies
depending on the results. Thankfully, we can reliably infer column widths by looking at the indexes
of spaces on the second line.
Here we chop each record at the index of the space on the second line and strip the result.
Note, if the source data has significant whitespace, the striping will remove this, but likely significant
whitespace was destroyed by the RPT field padding anyway.
"""
writer = csv.writer(outputFile)
fieldIndexes = []
headers = ""
for idx, val in enumerate(inputFile):
if(idx == 0):
headers = val.decode('utf-8-sig')
elif(idx == 1):
fieldIndexes = list(getFieldIndexes(val," "))
row = list(getFields(headers,fieldIndexes))
writer.writerow(row)
else:
row = list(getFields(val,fieldIndexes))
writer.writerow(row)
def getFieldIndexes(input, sep):
lastIndex = 0
for idx, c in enumerate(input):
if(c == sep):
yield (lastIndex,idx)
lastIndex = idx+1
yield lastIndex, len(input)
def getFields(input, indexes):
for index in indexes:
yield input[index[0]:index[1]].strip()
if __name__ == '__main__':
if(len(sys.argv) == 3):
with open(sys.argv[1]) as inputFile:
with open(sys.argv[2],'wb') as outputFile:
convert(inputFile,outputFile)
else:
print("Usage: rpt2csv.py inputFile outputFile")
|
Python
| 0.000672
|
@@ -14,16 +14,30 @@
port csv
+%0Aimport codecs
%0A%0Adef co
@@ -986,28 +986,8 @@
val
-.decode('utf-8-sig')
%0A%09%09e
@@ -1541,16 +1541,37 @@
.argv%5B1%5D
+,encoding='utf-8-sig'
) as inp
@@ -1605,17 +1605,27 @@
gv%5B2%5D,'w
-b
+',newline='
') as ou
|
7571b4519e54e2e747a21f7f900e486ccee19aa0
|
Update job_crud.py
|
examples/job_crud.py
|
examples/job_crud.py
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates, updates, and deletes a job object.
"""
from os import path
import yaml
from time import sleep
from kubernetes import client, config
JOB_NAME = "pi"
def create_job_object():
# Configureate Pod template container
container = client.V1Container(
name="pi",
image="perl",
command=["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"])
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": "pi"}),
spec=client.V1PodSpec(restart_policy="Never", containers=[container]))
# Create the specification of deployment
spec = client.V1JobSpec(
template=template,
backoff_limit=4)
# Instantiate the job object
job = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(name=JOB_NAME),
spec=spec)
return job
def create_job(api_instance, job):
api_response = api_instance.create_namespaced_job(
body=job,
namespace="default")
# Need to wait for a second for the job status to update
sleep(1)
print("Job created. status='%s'" % str(get_job_status(api_instance)))
def get_job_status(api_instance):
api_response = api_instance.read_namespaced_job_status(
name=JOB_NAME,
namespace="default")
return api_response.status
def update_job(api_instance, job):
# Update container image
job.spec.template.spec.containers[0].image = "perl"
api_response = api_instance.patch_namespaced_job(
name=JOB_NAME,
namespace="default",
body=job)
print("Job updated. status='%s'" % str(api_response.status))
def delete_job(api_instance):
api_response = api_instance.delete_namespaced_job(
name=JOB_NAME,
namespace="default",
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Job deleted. status='%s'" % str(api_response.status))
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
batch_v1 = client.BatchV1Api()
# Create a job object with client-python API. The job we
# created is same as the `pi-job.yaml` in the /examples folder.
job = create_job_object()
create_job(batch_v1, job)
update_job(batch_v1, job)
delete_job(batch_v1)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -1652,82 +1652,8 @@
t%22)%0A
- # Need to wait for a second for the job status to update%0A sleep(1)%0A
@@ -1750,32 +1750,91 @@
(api_instance):%0A
+ job_completed = False%0A while not job_completed:%0A
api_response
@@ -1877,32 +1877,36 @@
status(%0A
+
+
name=JOB_NAME,%0A
@@ -1896,32 +1896,36 @@
name=JOB_NAME,%0A
+
namespac
@@ -1933,24 +1933,153 @@
=%22default%22)%0A
+ if api_response.status.succeeded is not None or api_response.status.failed is not None:%0A job_completed = True%0A
return a
|
23b9ef799eb0d322c7dcfe796b196cce3cea6435
|
Add a ctx.log on finish
|
examples/har_dump.py
|
examples/har_dump.py
|
"""
This inline script can be used to dump flows as HAR files.
"""
import pprint
import json
import sys
from datetime import datetime
import pytz
import mitmproxy
from netlib import version
from netlib.http import cookies
HAR = {}
def start():
"""
Called once on script startup before any other events.
"""
if len(sys.argv) != 2:
raise ValueError(
'Usage: -s "har_dump.py filename" '
'(- will output to stdout, filenames ending with .zhar '
'will result in compressed har)'
)
HAR.update({
"log": {
"version": "1.2",
"creator": {
"name": "mitmproxy har_dump",
"version": "0.1",
"comment": "mitmproxy version %s" % version.MITMPROXY
},
"entries": []
}
})
def response(flow):
"""
Called when a server response has been received.
"""
# TODO: SSL and Connect Timings
# Calculate raw timings from timestamps.
# DNS timings can not be calculated for lack of a way to measure it.
# The same goes for HAR blocked.
# mitmproxy will open a server connection as soon as it receives the host
# and port from the client connection. So, the time spent waiting is actually
# spent waiting between request.timestamp_end and response.timestamp_start thus it
# correlates to HAR wait instead.
timings_raw = {
'send': flow.request.timestamp_end - flow.request.timestamp_start,
'receive': flow.response.timestamp_end - flow.response.timestamp_start,
'wait': flow.response.timestamp_start - flow.request.timestamp_end,
}
# HAR timings are integers in ms, so we re-encode the raw timings to that format.
timings = dict([(k, int(1000 * v)) for k, v in timings_raw.items()])
# full_time is the sum of all timings.
# Timings set to -1 will be ignored as per spec.
full_time = sum(v for v in timings.values() if v > -1)
started_date_time = format_datetime(datetime.utcfromtimestamp(flow.request.timestamp_start))
# Size calculations
response_body_size = len(flow.response.content)
response_body_decoded_size = len(flow.response.content)
response_body_compression = response_body_decoded_size - response_body_size
HAR["log"]["entries"].append({
"startedDateTime": started_date_time,
"time": full_time,
"request": {
"method": flow.request.method,
"url": flow.request.url,
"httpVersion": flow.request.http_version,
"cookies": format_request_cookies(flow.request.cookies.fields),
"headers": name_value(flow.request.headers),
"queryString": name_value(flow.request.query or {}),
"headersSize": len(str(flow.request.headers)),
"bodySize": len(flow.request.content),
},
"response": {
"status": flow.response.status_code,
"statusText": flow.response.reason,
"httpVersion": flow.response.http_version,
"cookies": format_response_cookies(flow.response.cookies.fields),
"headers": name_value(flow.response.headers),
"content": {
"size": response_body_size,
"compression": response_body_compression,
"mimeType": flow.response.headers.get('Content-Type', '')
},
"redirectURL": flow.response.headers.get('Location', ''),
"headersSize": len(str(flow.response.headers)),
"bodySize": response_body_size,
},
"cache": {},
"timings": timings,
})
def done():
"""
Called once on script shutdown, after any other events.
"""
dump_file = sys.argv[1]
if dump_file == '-':
mitmproxy.ctx.log(pprint.pformat(HAR))
# TODO: .zhar compression
else:
with open(dump_file, "w") as f:
f.write(json.dumps(HAR, indent=2))
# TODO: Log results via mitmproxy.ctx.log
def format_datetime(dt):
return dt.replace(tzinfo=pytz.timezone("UTC")).isoformat()
def format_cookies(cookies):
cookie_list = []
for name, value, attrs in cookies:
cookie_har = {
"name": name,
"value": value,
}
# HAR only needs some attributes
for key in ["path", "domain", "comment"]:
if key in attrs:
cookie_har[key] = attrs[key]
# These keys need to be boolean!
for key in ["httpOnly", "secure"]:
cookie_har[key] = bool(key in attrs)
# Expiration time needs to be formatted
expire_ts = cookies.get_expiration_ts(attrs)
if expire_ts:
cookie_har["expires"] = format_datetime(datetime.fromtimestamp(expire_ts))
cookie_list.append(cookie_har)
return cookie_list
def format_request_cookies(fields):
return format_cookies(cookies.group_cookies(fields))
def format_response_cookies(fields):
return format_cookies((c[0], c[1].value, c[1].attrs) for c in fields)
def name_value(obj):
"""
Convert (key, value) pairs to HAR format.
"""
return [{"name": k, "value": v} for k, v in obj.items()]
|
Python
| 0
|
@@ -3893,16 +3893,63 @@
else:%0A
+ json_dump = json.dumps(HAR, indent=2)%0A%0A
@@ -4008,77 +4008,103 @@
json
-.
+_
dump
-s(HAR, indent=2))%0A%0A # TODO: Log results via mitmproxy.ctx.log
+)%0A%0A mitmproxy.ctx.log(%22HAR log finished (wrote %25s bytes to file)%22 %25 len(json_dump))
%0A%0A%0Ad
|
f0df0e081aba7e9eb7a39088613d29c7c1e8a596
|
set integrationtime to 80% of sampling interval
|
examples/liveview.py
|
examples/liveview.py
|
#!/usr/bin/env python
""" File: example_liveview.py
Author: Andreas Poehlmann
Last change: 2013/02/27
Liveview example
"""
import oceanoptics
import time
import numpy as np
from gi.repository import Gtk, GLib
class mpl:
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
class DynamicPlotter(Gtk.Window):
def __init__(self, sample_interval=0.1, smoothing=1, oversampling=1, raw=False, size=(600,350)):
# Gtk stuff
Gtk.Window.__init__(self, title='Ocean Optics Spectrometer')
self.connect("destroy", lambda x : Gtk.main_quit())
self.set_default_size(*size)
# Data stuff
self.sample_interval = int(sample_interval*1000)
self.smoothing = int(smoothing)
self._sample_n = 0
self.raw = bool(raw)
self.spectrometer = oceanoptics.get_a_random_spectrometer()
self.wl = self.spectrometer.wavelengths()
self.sp = self.spectrometer.intensities()
self.sp = np.zeros((len(self.sp), int(oversampling)))
# MPL stuff
self.figure = mpl.Figure()
self.ax = self.figure.add_subplot(1, 1, 1)
self.ax.grid(True)
self.canvas = mpl.FigureCanvas(self.figure)
self.line, = self.ax.plot(self.wl, self.sp[:,0])
# Gtk stuff
self.add(self.canvas)
self.canvas.show()
self.show_all()
def update_plot(self):
# -> redraw on new spectrum
# -> average over self.sample_n spectra
# -> smooth if self.smoothing
# remark:
# > smoothing can be done after averaging
# get spectrum
sp = np.array(self.spectrometer.intensities(raw=self.raw))
self.sp[:,self._sample_n] = sp
self._sample_n += 1
self._sample_n %= self.sp.shape[1]
if self._sample_n != 0: # do not draw or average
return
# average!
sp = np.mean(self.sp, axis=1)
if self.smoothing > 1:
n = self.smoothing
kernel = np.ones((n,)) / n
sp = np.convolve(sp, kernel)[(n-1):]
self.line.set_ydata(sp)
self.ax.relim()
self.ax.autoscale_view(False, False, True)
self.canvas.draw()
return True
def run(self):
GLib.timeout_add(self.sample_interval, self.update_plot)
Gtk.main()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--raw', action='store_true', help='Show raw detector values')
parser.add_argument('-i', '--interval', type=float, default=0.1, metavar='SECONDS',
help='Update interval')
parser.add_argument('-s', '--smooth', type=int, default=1, metavar='N',
help='Number of spectrum points to average over')
parser.add_argument('-O', '--oversample', type=int, default=1, metavar='N',
help='Average together successive spectra')
args = parser.parse_args()
m = DynamicPlotter(sample_interval=args.interval, raw=args.raw, smoothing=args.smooth,
oversampling=args.oversample)
m.run()
|
Python
| 0
|
@@ -941,24 +941,110 @@
ctrometer()%0A
+ self.spectrometer.integration_time(time_us=(sample_interval * 0.8 * 1000000))%0A
self
|
739abd7de8d4f33557265481e1ceeff71f305dfb
|
Initialize TensorFlow.
|
runppo2.py
|
runppo2.py
|
#!/usr/bin/env python
import sys
import os.path as osp
import numpy as np
import argparse
import tensorflow as tf
from belief import BeliefDriveItEnv
from policy import DriveItPolicy
from car import Car
from autopilot import LaneFollowingPilot, ReflexPilot, SharpPilot
from PositionTracking import TruePosition
from utils import Color
sys.path.append(osp.join(osp.dirname(osp.abspath(__file__)), "openai"))
from baselines.ppo2 import ppo2
from baselines.common import set_global_seeds
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
def load_model(model_file, checkpoint_path):
import cloudpickle
with open(model_file, 'rb') as f:
make_model = cloudpickle.load(f)
model = make_model()
model.load(checkpoint_path)
return model
def render_one(model, time_limit, nbots, nenvs, nframes, seed, record=False):
from vec_frame_stack_1 import VecFrameStack
env0 = create_env(time_limit, nbots, seed)
env = VecFrameStack(env0, nframes)
obs = np.zeros((nenvs,) + env.observation_space.shape)
steps = 0
reward = 0
done = False
o = env.reset()
if record:
env0.render()
input('Start recorder and press enter...')
while not done:
env0.render()
steps += 1
obs[0,:] = o
a, _, _, _ = model.step(obs)
o, r, done, info = env.step(a[0])
reward += r
print((steps, reward, info))
input('Done. Press enter to close.')
pilots = (LaneFollowingPilot, ReflexPilot, SharpPilot)
rank = np.random.randint(0, len(pilots))
bot_colors = [Color.orange, Color.purple, Color.navy]
def create_env(time_limit, nbots, seed):
cars = [Car.HighPerf(Color.green, v_max=2.0)]
for i in range(nbots):
cars.append(Car.Simple(bot_colors[i], v_max=1.2))
bots = [pilots[(i + rank) % len(pilots)](cars[i], cars) for i in range(1, len(cars))]
env = BeliefDriveItEnv(cars[0], bots, time_limit, noisy=True, random_position=True, bot_speed_deviation=0.15)
env.seed(seed)
# env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
return env
def create_venv(time_limit, nbots, nenvs, nframes, seed):
from vec_frame_stack import VecFrameStack
def make_env(rank):
def env_fn():
return create_env(time_limit, nbots, seed + rank)
return env_fn
env = SubprocVecEnv([make_env(i) for i in range(nenvs)])
set_global_seeds(seed)
env = VecFrameStack(env, nframes)
return env
def run_many(model, time_limit, nbots, nenvs, nframes, seed):
env = create_venv(time_limit, nbots, nenvs, nframes, seed)
o = env.reset()
steps = 0
reward = np.zeros(nenvs)
done = np.zeros(nenvs, dtype=bool)
while not done.all():
steps += 1
a, _, _, _ = model.step(o)
o, r, done, info = env.step(a)
reward += r
print((steps, reward, info))
print(np.mean(reward))
env.close()
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-s', '--seed', help='RNG seed', type=int, default=0)
parser.add_argument('-l', '--log-dir', type=str, default='metrics')
parser.add_argument('-b', '--batch-name', type=str, default=None)
parser.add_argument('-m', '--model', type=str, default='make_model.pkl')
parser.add_argument('-c', '--checkpoint', type=str)
parser.add_argument('-e', '--envs', help='number of environments', type=int, default=24)
parser.add_argument('-f', '--frames', help='number of frames', type=int, default=4)
parser.add_argument('-t', '--time-limit', type=int, default=4)
parser.add_argument('-n', '--number-bots', type=int, default=1)
parser.add_argument('-r', '--render', action='store_true', default=False)
parser.add_argument('-v', '--video-record', action='store_true', default=False)
args = parser.parse_args()
model_dir = osp.join(args.log_dir, args.batch_name)
model_file = osp.join(model_dir, args.model)
checkpoint_path = osp.join(model_dir, 'checkpoints', args.checkpoint)
assert(args.number_bots <= len(bot_colors))
with tf.Session() as sess:
model = load_model(model_file, checkpoint_path)
if args.render:
render_one(model, args.time_limit, args.number_bots, args.envs, args.frames, args.seed, args.video_record)
else:
run_many(model, args.time_limit, args.number_bots, args.envs, args.frames, args.seed)
sess.close()
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -4216,39 +4216,477 @@
-with tf.Session() as sess:%0A
+import gym%0A import logging%0A import multiprocessing%0A%0A ncpu = multiprocessing.cpu_count()%0A if sys.platform == 'darwin': ncpu //= 2%0A config = tf.ConfigProto(allow_soft_placement=True,%0A intra_op_parallelism_threads=ncpu,%0A inter_op_parallelism_threads=ncpu)%0A config.gpu_options.allow_growth = True #pylint: disable=E1101%0A gym.logger.setLevel(logging.WARN)%0A tf.Session(config=config).__enter__()%0A%0A
@@ -4733,20 +4733,16 @@
t_path)%0A
-
if a
@@ -4761,20 +4761,16 @@
-
-
render_o
@@ -4868,20 +4868,16 @@
record)%0A
-
else
@@ -4886,20 +4886,16 @@
-
-
run_many
@@ -4977,29 +4977,8 @@
d)%0A%0A
- sess.close()%0A
%0Aif
|
3967aa7a472bff0c289ea07e5a0cdd033e1e4abc
|
Fix an #include in the benchmarks to use the correct filename for Boost 1.0.2.
|
extras/benchmark/boost_di_source_generator.py
|
extras/benchmark/boost_di_source_generator.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def generate_files(injection_graph):
file_content_by_name = dict()
for node_id in injection_graph.nodes_iter():
deps = injection_graph.successors(node_id)
file_content_by_name['component%s.h' % node_id] = _generate_component_header(node_id, deps)
file_content_by_name['component%s.cpp' % node_id] = _generate_component_source(node_id, deps)
[toplevel_node] = [node_id
for node_id in injection_graph.nodes_iter()
if not injection_graph.predecessors(node_id)]
file_content_by_name['main.cpp'] = _generate_main(injection_graph, toplevel_node)
return file_content_by_name
def _generate_component_header(component_index, deps):
fields = ''.join(['std::shared_ptr<Interface%s> x%s;\n' % (dep, dep)
for dep in deps])
component_deps = ''.join([', std::shared_ptr<Interface%s>' % dep for dep in deps])
include_directives = ''.join(['#include "component%s.h"\n' % index for index in deps])
template = """
#ifndef COMPONENT{component_index}_H
#define COMPONENT{component_index}_H
#include <boost/di.hpp>
#include <boost/di/extension/scopes/scoped.hpp>
#include <memory>
// Example include that the code might use
#include <vector>
namespace di = boost::di;
{include_directives}
struct Interface{component_index} {{
virtual ~Interface{component_index}() = default;
}};
struct X{component_index} : public Interface{component_index} {{
{fields}
BOOST_DI_INJECT(X{component_index}{component_deps});
virtual ~X{component_index}() = default;
}};
auto x{component_index}Component = [] {{
return di::make_injector(di::bind<Interface{component_index}>().to<X{component_index}>().in(di::extension::scoped));
}};
#endif // COMPONENT{component_index}_H
"""
return template.format(**locals())
def _generate_component_source(component_index, deps):
param_initializers = ', '.join('x%s(x%s)' % (dep, dep)
for dep in deps)
if param_initializers:
param_initializers = ': ' + param_initializers
component_deps = ', '.join('std::shared_ptr<Interface%s> x%s' % (dep, dep)
for dep in deps)
template = """
#include "component{component_index}.h"
X{component_index}::X{component_index}({component_deps})
{param_initializers} {{
}}
"""
return template.format(**locals())
def _generate_main(injection_graph, toplevel_component):
include_directives = ''.join('#include "component%s.h"\n' % index
for index in injection_graph.nodes_iter())
injector_params = ', '.join('x%sComponent()' % index
for index in injection_graph.nodes_iter())
template = """
{include_directives}
#include "component{toplevel_component}.h"
#include <ctime>
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <chrono>
using namespace std;
void f() {{
auto injector = di::make_injector({injector_params});
injector.create<std::shared_ptr<Interface{toplevel_component}>>();
}}
int main(int argc, char* argv[]) {{
if (argc != 2) {{
std::cout << "Need to specify num_loops as argument." << std::endl;
exit(1);
}}
size_t num_loops = std::atoi(argv[1]);
double perRequestTime = 0;
std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < num_loops; i++) {{
f();
}}
perRequestTime += std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
std::cout << std::fixed;
std::cout << std::setprecision(15);
std::cout << "Total for setup = " << 0 << std::endl;
std::cout << "Full injection time = " << perRequestTime / num_loops << std::endl;
std::cout << "Total per request = " << perRequestTime / num_loops << std::endl;
return 0;
}}
"""
return template.format(**locals())
|
Python
| 0
|
@@ -1762,16 +1762,22 @@
s/scoped
+_scope
.hpp%3E%0A#i
|
3eccd311883183457c7deafeeada8d5e270f2cff
|
Remove ipdb
|
scorecard/profiles.py
|
scorecard/profiles.py
|
import requests
import json
API_URL = 'https://data.municipalmoney.org.za/api/cubes/'
Q4 = [10, 11, 12]
current_month = 12
def get_quarter_results(results, amount_field='amount.sum'):
return sum([r[amount_field] for r in results['cells'] if r['financial_period.period'] in Q4 and r[amount_field]])
def amount_from_results(item, results, line_items):
"""
Returns the summed value from the results we received from the API.
If the 'cells' list in the results is empty, no value was returned,
and for now, we return zero in that case.
We should be returning None, and checking for None values in the ratio calculation.
"""
try:
return results[item]['cells'][0][line_items[item]['aggregate']]
except IndexError:
return 0
def get_profile(geo_code, geo_level, profile_name=None):
api_query_string = '{cube}/aggregate?aggregates={aggregate}&cut={cut}&drilldown=item.code|item.label|financial_period.period&page=0&pagesize=300000'
line_items = {
'op_exp_actual': {
'cube': 'incexp',
'aggregate': 'amount.sum',
'cut': {
'item.code': '4600',
'amount_type.label': 'Audited Actual',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
'period_length.length': 'year'
}
},
'op_exp_budget': {
'cube': 'incexp',
'aggregate': 'amount.sum',
'cut': {
'item.code': '4600',
'amount_type.label': 'Adjusted Budget',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
}
},
'cash_flow': {
'cube': 'cflow',
'aggregate': 'amount.sum',
'cut': {
'item.code': '4200',
'amount_type.label': 'Audited Actual',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
'period_length.length': 'year'
}
},
'cap_exp_actual': {
'cube': 'capital',
'aggregate': 'asset_register_summary.sum',
'cut': {
'item.code': '4100',
'amount_type.label': 'Audited Actual',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
'period_length.length': 'year'
}
},
'cap_exp_budget': {
'cube': 'capital',
'aggregate': 'asset_register_summary.sum',
'cut': {
'item.code': '4100',
'amount_type.label': 'Adjusted Budget',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
}
},
'rep_maint': {
'cube': 'repmaint',
'aggregate': 'amount.sum',
'cut': {
'item.code': '5005',
'amount_type.label': 'Audited Actual',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
'period_length.length': 'year'
}
},
'ppe': {
'cube': 'bsheet',
'aggregate': 'amount.sum',
'cut': {
'item.code': '1300',
'amount_type.label': 'Audited Actual',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
'period_length.length': 'year',
}
},
'invest_prop': {
'cube': 'bsheet',
'aggregate': 'amount.sum',
'cut': {
'item.code': '1401',
'amount_type.label': 'Audited Actual',
'financial_year_end.year': 2015,
'demarcation.code': str(geo_code),
'period_length.length': 'year',
}
}
}
results = {}
for item, details in line_items.iteritems():
url = API_URL + api_query_string.format(
cube=details['cube'],
aggregate=details['aggregate'],
cut='|'.join('{!s}:{!r}'.format(k, v) for (k, v) in details['cut'].iteritems()).replace("'", '"')
)
results[item] = requests.get(url, verify=False).json()
import ipdb; ipdb.set_trace()
op_exp_actual = amount_from_results('op_exp_actual', results, line_items)
op_exp_budget = amount_from_results('op_exp_budget', results, line_items)
cap_exp_actual = amount_from_results('cap_exp_actual', results, line_items)
cap_exp_budget = amount_from_results('cap_exp_budget', results, line_items)
cash_flow = amount_from_results('cash_flow', results, line_items)
rep_maint = amount_from_results('rep_maint', results, line_items)
ppe = amount_from_results('ppe', results, line_items)
invest_prop = amount_from_results('invest_prop', results, line_items)
cash_coverage = cash_flow / (op_exp_actual / 12)
op_budget_diff = (op_exp_actual - op_exp_budget) / op_exp_budget if op_exp_budget else 'N/A'
cap_budget_diff = (cap_exp_actual - cap_exp_budget) / cap_exp_budget if cap_exp_budget else 'N/A'
rep_maint_perc_ppe = rep_maint * 12 / (ppe + invest_prop)
return {
'cash_coverage': cash_coverage,
'op_budget_diff': op_budget_diff,
'cap_budget_diff': cap_budget_diff,
'rep_maint_perc_ppe': rep_maint_perc_ppe}
|
Python
| 0.000003
|
@@ -4361,42 +4361,8 @@
)%0A%0A%0A
- import ipdb; ipdb.set_trace()%0A
|
4d5edd17d7382108b90d3f60f2f11317da228603
|
Add kafka start/stop script
|
script/kafkaServer.py
|
script/kafkaServer.py
|
#!/bin/python
from __future__ import print_function
import subprocess
import sys
import json
from util import appendline, get_ip_address
if __name__ == "__main__":
# start server one by one
if len(sys.argv) < 2 or sys.argv[1] not in ['start', 'stop']:
sys.stderr.write("Usage: python %s start or stop\n" % (sys.argv[0]))
sys.exit(1)
else:
config = json.load(open('cluster-config.json'))
if sys.argv[1] == 'start':
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-start.sh ')
else:
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-stop.sh ')
|
Python
| 0.000001
|
@@ -554,18 +554,18 @@
start.sh
-
'
+%5D
)%0A%09%09else
@@ -699,12 +699,12 @@
-stop.sh
-
'
+%5D
)%0A
|
f66ad05b9b85dbb34f4922fb8ea0d606718fbd8a
|
Revert last commit
|
apts/equipment.py
|
apts/equipment.py
|
import igraph as ig
import pandas as pd
#import seaborn as sns
# TODO: make this global and configurable in all apts
#sns.set_style('whitegrid')
from .constants import NodeLabels
from .utils import Utils, Constants
from .models.optical import *
from .optics import *
class Equipment:
"""
This class represents all possessed astronomical equipment. Allows to compute all possible
hardware configuration. It uses directed graph for internal processing.
"""
def __init__(self):
self.connection_garph = ig.Graph(directed=True)
# Register standard input and outputs
self.add_vertex(Constants.SPACE_ID)
self.add_vertex(Constants.EYE_ID)
self.add_vertex(Constants.IMAGE_ID)
def _get_paths(self, output_id):
# Connect all outputs with inputs
self._connect()
# Find input and output nodes
space_node = self.connection_garph.vs.find(name=Constants.SPACE_ID)
output_node = self.connection_garph.vs.find(name=output_id)
results = []
results_set = set()
for optical_path in Utils.find_all_paths(self.connection_garph, space_node.index, output_node.index):
result = [self.connection_garph.vs.find(
name=id)[NodeLabels.EQUIPMENT] for id in optical_path]
op = OpticalPath.from_path(
[item for item in result if item is not None])
if op.elements() not in results_set:
results_set.add(op.elements())
results.append(op)
return results
def get_zooms(self, node_id):
result = [OpticsUtils.compute_zoom(path)
for path in self._get_paths(node_id)]
result.sort()
return result
def data(self):
columns = ['label', 'type', 'zoom', 'useful_zoom',
'fov', 'range', 'brightness', 'elements']
def append(result_data, columns, paths):
for path in paths:
data = [[path.label(),
path.output.output_type(),
path.zoom().magnitude,
path.zoom() < path.telescope.max_useful_zoom(),
path.fov().magnitude,
path.telescope.max_range(),
path.brightness().magnitude,
path.length()]]
result_data = result_data.append(pd.DataFrame(
data, columns=columns), ignore_index=True)
return result_data
# TODO: This is still not best way to add data
result_data = pd.DataFrame(columns=columns)
result_data = append(result_data, columns,
self._get_paths(Constants.EYE_ID))
result_data = append(result_data, columns,
self._get_paths(Constants.IMAGE_ID))
return result_data
def plot(self, to_plot):
data = self.data()[[to_plot, 'type', 'label']].sort_values(by=to_plot)
pd.DataFrame([{row[1]:row[0]} for row in data.values], index=data['label'].values).plot(
kind="bar", stacked=True, title=to_plot)
def plot_connection_garph(self):
return ig.plot(self.connection_garph)
def _connect(self):
for out_node in self.connection_garph.vs.select(node_type=OpticalType.OUTPUT):
# Get output type
connection_type = out_node[NodeLabels.CONNECTION_TYPE]
for in_node in self.connection_garph.vs.select(node_type=OpticalType.INPUT, connection_type=connection_type):
# Connect all outputs with all inputs, excluding connecting part to itself
out_id = OpticalEqipment.get_parent_id(
out_node[NodeLabels.NAME])
in_id = OpticalEqipment.get_parent_id(in_node[NodeLabels.NAME])
if out_id != in_id:
self.add_edge(out_node, in_node)
def add_vertex(self, node_name, equipment=None, node_type=OpticalType.GENERIC, connection_type=None):
"""
Add single node to graph. Return new vertex.
"""
self.connection_garph.add_vertex(node_name)
node = self.connection_garph.vs.find(name=node_name)
if equipment is not None:
node_type = equipment.type()
node_label = "\n".join([equipment.get_name(), equipment.label()])
elif node_type == OpticalType.GENERIC:
node_label = node_name
else:
node_label = ""
node[NodeLabels.TYPE] = node_type
node[NodeLabels.LABEL] = node_label
node[NodeLabels.COLOR] = Constants.COLORS[node_type]
node[NodeLabels.EQUIPMENT] = equipment
node[NodeLabels.CONNECTION_TYPE] = connection_type
return node
def add_edge(self, node_from, node_to):
# Add edge if only it doesn't exist
# TODO: are_connected is the right method?
if not self.connection_garph.are_connected(node_from, node_to):
self.connection_garph.add_edge(node_from, node_to)
def register(self, optical_eqipment):
"""
Register any optical equipment in a optical graph.
"""
optical_eqipment.register(self)
|
Python
| 0
|
@@ -33,17 +33,16 @@
s as pd%0A
-#
import s
@@ -109,17 +109,16 @@
ll apts%0A
-#
sns.set_
|
ba6d8b2da886762535186a54160795f6531cbf95
|
fix mat_id type to int
|
script/mesh_to_vtk.py
|
script/mesh_to_vtk.py
|
#!/usr/bin/python
# 04.10.2005
# last revision: 03.09.2007
import sys
import os.path as op
if (len( sys.argv ) == 3):
filename_in = sys.argv[1];
filename_out = sys.argv[2];
else:
print 'Usage: ', sys.argv[0], 'file.mesh file.vtk'
sys.exit()
if (filename_in == '-'):
file_in = sys.stdin
else:
file_in = open( filename_in, "r" );
if (filename_out == '-'):
file_out = sys.stdout
else:
file_out = open( filename_out, "w" );
file_out.write( r"""# vtk DataFile Version 2.0
generated by %s
ASCII
DATASET UNSTRUCTURED_GRID
""" % op.basename( sys.argv[0] ) )
##
# 1. pass.
n_nod = 0
n_els = {'Edges' : 0, 'Quadrilaterals' : 0, 'Triangles' : 0,
'Tetrahedra' : 0, 'Hexahedra' : 0 }
sizes = {'Edges' : 3, 'Quadrilaterals' : 5, 'Triangles' : 4,
'Tetrahedra' : 5, 'Hexahedra' : 9 }
cell_types = {'Edges' : 3, 'Quadrilaterals' : 9, 'Triangles' : 5,
'Tetrahedra' : 10, 'Hexahedra' : 12 }
keys = n_els.keys()
while 1:
line = file_in.readline().split()
if not len( line ):
break
elif (line[0] == 'Dimension'):
if len( line ) == 2:
dim = int( line[1] )
else:
dim = int( file_in.readline() )
elif (line[0] == 'Vertices'):
n_nod = int( file_in.readline() )
elif (line[0] in keys):
n_els[line[0]] += int( file_in.readline() )
n_el = sum( n_els.values() )
total_size = sum( [sizes[ii] * n_els[ii] for ii in n_els.keys()] )
#print n_nod, n_el, n_els, total_size
if (filename_in != '-'):
file_in.close()
file_in = open( filename_in, "r" );
end_node_line = (3 - dim) * '0.0 ' + '\n'
##
# 2. pass.
can_cells = 0
ct = []
mat_ids = []
while 1:
line = file_in.readline().split()
if not len( line ):
break
elif (line[0] == 'Vertices'):
n_nod = int( file_in.readline() )
file_out.write( 'POINTS %d float\n' % n_nod )
for ii in range( n_nod ):
line = file_in.readline().split()
line[-1] = end_node_line
file_out.write( ' '.join( line ) )
file_out.write( 'CELLS %d %d\n' % (n_el, total_size) )
can_cells = 1
elif (line[0] in keys):
if not can_cells:
raise IOError
nn = int( file_in.readline() )
ct += [cell_types[line[0]]] * nn
size = [str( sizes[line[0]] - 1 )]
for ii in range( nn ):
line = file_in.readline().split()
mat_ids.append( line[-1] )
aux = [str( int( ii ) - 1) for ii in line[:-1]] + ['\n']
file_out.write( ' '.join( size + aux ) )
file_out.write( 'CELL_TYPES %d\n' % n_el )
file_out.write( ''.join( ['%d\n' % ii for ii in ct] ) )
file_out.write( 'CELL_DATA %d\n' % n_el )
file_out.write( '\nSCALARS mat_id float 1\n' )
file_out.write( 'LOOKUP_TABLE default\n' )
for row in mat_ids:
file_out.write( '%s\n' % row )
if (filename_in != '-'):
file_in.close()
if (filename_out != '-'):
file_out.close()
|
Python
| 0.000005
|
@@ -2751,20 +2751,18 @@
mat_id
-floa
+in
t 1%5Cn' )
|
ebfaf30fca157e83ea9e4bf33173221fc9525caf
|
Fix emplorrs demo salary db error
|
demo/examples/employees/forms.py
|
demo/examples/employees/forms.py
|
from datetime import date
from django import forms
from django.utils import timezone
from .models import Employee, DeptManager, Title, Salary
class ChangeManagerForm(forms.Form):
manager = forms.ModelChoiceField(queryset=Employee.objects.all()[:100])
def __init__(self, *args, **kwargs):
self.department = kwargs.pop('department')
super(ChangeManagerForm, self).__init__(*args, **kwargs)
def save(self):
new_manager = self.cleaned_data['manager']
DeptManager.objects.filter(
department=self.department
).set(
department=self.department,
employee=new_manager
)
class ChangeTitleForm(forms.Form):
position = forms.CharField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeTitleForm, self).__init__(*args, **kwargs)
def save(self):
new_title = self.cleaned_data['position']
Title.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
title=new_title
)
class ChangeSalaryForm(forms.Form):
salary = forms.IntegerField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeSalaryForm, self).__init__(*args, **kwargs)
def save(self):
new_salary = self.cleaned_data['salary']
Salary.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
salary=new_salary,
)
|
Python
| 0.000001
|
@@ -3,87 +3,26 @@
om d
-atetime import date%0A%0Afrom django import forms%0Afrom django.utils import timezone
+jango import forms
%0A%0Afr
@@ -1110,24 +1110,41 @@
ntegerField(
+max_value=1000000
)%0A%0A def _
|
1fa85588740a63696ab03e3b267281d2163495ea
|
Fix another misnamed variable
|
scriptorium/papers.py
|
scriptorium/papers.py
|
#!/usr/bin/env python
"""Paper oriented operations."""
import glob
import subprocess
import re
import os
import shutil
import scriptorium
def paper_root(dname):
"""Given a directory, finds the root document for the paper."""
root_doc = None
for fname in glob.glob(os.path.join(dname, '*.mmd')):
#Metadata only exists in the root document
output = subprocess.check_output(['multimarkdown', '-m', fname])
if output:
root_doc = fname
break
return os.path.basename(root_doc) if root_doc else None
def get_template(fname):
"""Attempts to find the template of a paper in a given file."""
output = subprocess.check_output(['multimarkdown', '-e', 'latexfooter', fname])
template_re = re.compile(r'(?P<template>[a-zA-Z0-9._]*)\/footer.tex')
match = template_re.search(output)
return match.group('template') if match else None
def to_pdf(paper_dir, template_dir=None, use_shell_escape=False):
"""Build paper in the given directory, returning the PDF filename if successful."""
template_dir = template_dir if template_dir else scriptorium.TEMPLATE_DIR
paper = os.path.abspath(paper_dir)
if not os.path.isdir(paper):
raise IOError("{0} is not a valid directory".format(paper))
old_cwd = os.getcwd()
if not os.path.samefile(old_cwd, paper):
os.chdir(paper)
fname = paper_root('.')
if not fname:
raise IOError("{0} does not contain a file that appears to be the root of the paper.".format(paper))
all_mmd = glob.glob('*.mmd')
default_mmd = subprocess.check_output(['multimarkdown', '-x', fname], universal_newlines=True)
default_mmd = default_mmd.splitlines()
for mmd in set(all_mmd) - set(default_mmd):
bname = os.path.basename(mmd).split('.')[0]
tname = '{0}.tex'.format(bname)
subprocess.check_call(['multimarkdown', '-t', 'latex', '-o', tname, mmd])
bname = os.path.basename(fname).split('.')[0]
tname = '{0}.tex'.format(bname)
#Need to set up environment here
new_env = dict(os.environ)
texinputs = './:{0}'.format(template_dir + '/.//')
if 'TEXINPUTS' in new_env:
texinputs = '{0}:{1}'.format(texinputs, new_env['TEXINPUTS'])
texinputs = texinputs + ':'
new_env['TEXINPUTS'] = texinputs
pdf_cmd = ['pdflatex', '-halt-on-error', tname]
if use_shell_escape:
pdf_cmd.append('-shell-escape')
try:
output = subprocess.check_output(pdf_cmd, env=new_env)
except subprocess.CalledProcessError:
print('LaTeX conversion failed with the following output:\n', output)
return None
auxname = '{0}.aux'.format(bname)
#Check if bibtex is defined in the frontmatter
bibtex_re = re.compile(r'^bibtex:')
if bibtex_re.search(open(fname).read()):
biber_re = re.compile(r'\\bibdata')
full = open('paper.aux').read()
with open(os.devnull, 'w') as null:
if biber_re.search(full):
subprocess.check_call(['bibtex', auxname], stdout=null, stderr=null)
else:
subprocess.check_call(['biber', bname], stdout=null, stderr=null)
subprocess.check_call(pdf_cmd, env=new_env, stdout=null, stderr=null)
subprocess.check_call(pdf_cmd, env=new_env, stdout=null, stderr=null)
# Revert working directory
if not os.path.samefile(os.getcwd(), old_cwd):
os.chdir(old_cwd)
return os.path.join(paper, '{0}.pdf'.format(bname))
def create(paper_dir, template, force=False, use_git=True, config=None):
"""Create folder with paper skeleton."""
config = config if config else []
if os.path.exists(paper_dir) and not force:
print('{0} exists, will not overwrite. Use -f to force creation.'.format(output))
return False
template_dir = scriptorium.find_template(template, scriptorium.TEMPLATES_DIR)
if not template_dir:
print('{0} is not an installed template.'.format(template))
return False
os.makedirs(paper_dir)
if use_git:
here = os.path.dirname(os.path.realpath(__file__))
giname = os.path.join(here, 'data', 'gitignore')
shutil.copyfile(giname, os.path.join(paper_dir))
#Create frontmatter section for paper
front_file = os.path.join(scriptorium.TEMPLATE_DIR, 'frontmatter.mmd')
if os.path.exists(front_file):
with open(front_file, 'r') as fp:
paper = fp.read()
else:
paper = ''
#Create metadata section
metaex_file = os.path.join(scriptorium.TEMPLATE_DIR, 'metadata.tex')
if os.path.exists(metaex_file):
with open(metaex_file, 'r') as fp:
metadata = fp.read()
else:
metadata = ''
for opt in config:
repl = re.compile('${0}'.format(opt[0].upper()))
repl.sub(opt[1], paper)
repl.sub(opt[1], metadata)
#Regex to find variable names
var_re = re.compile(r'\$[A-Z0-9]+')
paper_file = os.path.join(paper_dir, 'paper.mmd')
with open(paper_file, 'w') as fp:
fp.write(paper)
fp.write('\n')
fp.write('latex input: {0}/setup.tex\n'.format(template))
fp.write('latex footer: {0}/footer.tex\n\n'.format(template))
for ii in var_re.finditer(paper):
print('{0} contains unpopulated variable {1}'.format(paper_file, ii.group(0)))
if metadata:
metadata_file = os.path.join(paper_dir, 'metadata.tex')
with open(metadata_file, 'w') as fp:
fp.write(metadata)
for mtch in var_re.finditer(metadata):
print('{0} contains unpopulated variable {1}'.format(metadata_file, mtch.group(0)))
return True
|
Python
| 0.00157
|
@@ -3883,17 +3883,16 @@
TEMPLATE
-S
_DIR)%0A%0A
|
d82d43a32d770498e802b44089637e774c331c13
|
test for post and terminals
|
busineme/core/tests/test_views.py
|
busineme/core/tests/test_views.py
|
from django.test import TestCase
from django.test import Client
from ..models import Busline
from ..models import Terminal
STATUS_OK = 200
STATUS_NOT_FOUND = 404
BUSLINE_NOT_FOUND_ID = 99999999
class TestSearchResultView(TestCase):
def setUp(self):
self.client = Client()
self.busline = Busline()
self.busline.line_number = '001'
self.busline.description = 'route'
self.busline.route_size = 0.1
self.busline.fee = 3.50
self.terminal = Terminal(description="terminal")
self.terminal.save()
self.busline.save()
self.busline.terminals.add(self.terminal)
def test_get(self):
response = self.client.get("/buslines/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_busline(self):
bus = Busline.objects.get(description="route")
response = self.client.get(
"/buslines/" + str(bus.id) + "/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_busline_not_found(self):
response = self.client.get(
'/buslines/' + str(BUSLINE_NOT_FOUND_ID) + "/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
class TestTerminalSearchResultView(TestCase):
def setUp(self):
self.terminal = Terminal()
self.terminal.description = "Terminal Description Test String"
self.terminal.addres = "Terminal Adress Test String "
self.terminal.save()
def test_get(self):
response = self.client.get("/terminals/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_terminal(self):
terminal = self.terminal.id
response = self.client.get("/terminals/%s/" % str(terminal))
code = response.status_code
self.assertEquals(code, STATUS_OK)
|
Python
| 0
|
@@ -115,16 +115,89 @@
Terminal
+%0Afrom ..models import Post%0Afrom authentication.models import BusinemeUser
%0A%0ASTATUS
@@ -229,23 +229,23 @@
D = 404%0A
-BUSLINE
+GENERIC
_NOT_FOU
@@ -1223,15 +1223,15 @@
str(
-BUSLINE
+GENERIC
_NOT
@@ -1916,32 +1916,32 @@
nse.status_code%0A
-
self.ass
@@ -1947,28 +1947,1340 @@
sertEquals(code, STATUS_OK)%0A
+%0A def test_get_terminal_null(self):%0A response = self.client.get('%5C%0A /terminals/%25s/' %25 (str(GENERIC_NOT_FOUND_ID)))%0A code = response.status_code%0A self.assertEquals(code, STATUS_NOT_FOUND)%0A%0A%0Aclass TestPostView(TestCase):%0A%0A def setUp(self):%0A self.post = Post()%0A%0A self.busline = Busline()%0A self.busline.line_number = %22001%22%0A self.busline.route_size = 0.1%0A self.busline.fee = 3.50%0A self.busline.save()%0A%0A self.user = BusinemeUser()%0A self.user.username = %22TestUser%22%0A self.user.save()%0A%0A self.post.busline = self.busline%0A self.post.traffic = 1%0A self.post.capacity = 1%0A self.post.user = self.user%0A self.post.save()%0A%0A def test_get(self):%0A response = self.client.get(%22/posts/%22)%0A code = response.status_code%0A self.assertEquals(code, STATUS_OK)%0A%0A def test_get_post(self):%0A post_id = self.post.id%0A response = self.client.get(%22/posts/%25s/%22 %25 str(post_id))%0A code = response.status_code%0A self.assertEquals(code, STATUS_OK)%0A%0A def test_get_terminal_null(self):%0A response = self.client.get('%5C%0A /posts/%25s/' %25 (str(GENERIC_NOT_FOUND_ID)))%0A code = response.status_code%0A self.assertEquals(code, STATUS_NOT_FOUND)%0A
|
544c6190fee4ca9f2746862a12f54f01af37148f
|
Fix deprecation warning of scipy.median.
|
neuroimaging/fixes/scipy/stats/models/robust/scale.py
|
neuroimaging/fixes/scipy/stats/models/robust/scale.py
|
import numpy as np
from scipy.stats import norm, median
def unsqueeze(data, axis, oldshape):
"""
unsqueeze a collapsed array
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape)
def MAD(a, c=0.6745, axis=0):
"""
Median Absolute Deviation along given axis of an array:
median(abs(a - median(a))) / c
"""
a = np.asarray(a, np.float64)
d = median(a, axis=axis)
d = unsqueeze(d, axis, a.shape)
return median(np.fabs(a - d) / c, axis=axis)
class Huber:
"""
Huber's proposal 2 for estimating scale.
R Venables, B Ripley. \'Modern Applied Statistics in S\'
Springer, New York, 2002.
"""
c = 1.5
tol = 1.0e-06
tmp = 2 * norm.cdf(c) - 1
gamma = tmp + c**2 * (1 - tmp) - 2 * c * norm.pdf(c)
del tmp
niter = 30
def __call__(self, a, mu=None, scale=None, axis=0):
"""
Compute Huber\'s proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
"""
self.axis = axis
self.a = np.asarray(a, np.float64)
if mu is None:
self.n = self.a.shape[0] - 1
self.mu = median(self.a, axis=axis)
self.est_mu = True
else:
self.n = self.a.shape[0]
self.mu = mu
self.est_mu = False
if scale is None:
self.scale = MAD(self.a, axis=self.axis)**2
else:
self.scale = scale
self.scale = unsqueeze(self.scale, self.axis, self.a.shape)
self.mu = unsqueeze(self.mu, self.axis, self.a.shape)
for donothing in self:
pass
self.s = np.squeeze(np.sqrt(self.scale))
del(self.scale); del(self.mu); del(self.a)
return self.s
def __iter__(self):
self.iter = 0
return self
def next(self):
a = self.a
subset = self.subset(a)
if self.est_mu:
mu = np.sum(subset * a + (1 - Huber.c) * subset, axis=self.axis) / a.shape[self.axis]
else:
mu = self.mu
self.axis = unsqueeze(mu, self.axis, self.a.shape)
scale = np.sum(subset * (a - mu)**2, axis=self.axis) / (self.n * Huber.gamma - np.sum(1. - subset, axis=self.axis) * Huber.c**2)
self.iter += 1
if np.alltrue(np.less_equal(np.fabs(np.sqrt(scale) - np.sqrt(self.scale)), np.sqrt(self.scale) * Huber.tol)) and np.alltrue(np.less_equal(np.fabs(mu - self.mu), np.sqrt(self.scale) * Huber.tol)):
self.scale = scale
self.mu = mu
raise StopIteration
else:
self.scale = scale
self.mu = mu
self.scale = unsqueeze(self.scale, self.axis, self.a.shape)
if self.iter >= self.niter:
raise StopIteration
def subset(self, a):
tmp = (a - self.mu) / np.sqrt(self.scale)
return np.greater(tmp, -Huber.c) * np.less(tmp, Huber.c)
huber = Huber()
|
Python
| 0.000001
|
@@ -40,17 +40,34 @@
ort norm
-,
+%0Afrom numpy import
median%0A
|
dd7a857c98975eac7930747e0aee34ebcb9f3178
|
Update Evaluation.py
|
src/LiviaNet/Modules/General/Evaluation.py
|
src/LiviaNet/Modules/General/Evaluation.py
|
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVA Department, ETS, Montreal.
"""
import pdb
import numpy as np
# ----- Dice Score -----
def computeDice(autoSeg, groundTruth):
""" Returns
-------
DiceArray : floats array
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0 """
n_classes = int( np.max(groundTruth) + 1)
DiceArray = []
for c_i in xrange(1,n_classes):
idx_Auto = np.where(autoSeg.flatten() == c_i)[0]
idx_GT = np.where(groundTruth.flatten() == c_i)[0]
autoArray = np.zeros(autoSeg.size,dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size,dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray, empty_score=1.0)
#dice = np.sum(autoSeg[groundTruth==c_i])*2.0 / (np.sum(autoSeg) + np.sum(groundTruth))
DiceArray.append(dsc)
return DiceArray
def dice(im1, im2):
"""
Computes the Dice coefficient
----------
im1 : boolean array
im2 : boolean array
If they are not boolean, they will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Both are empty (sum eq to zero) = empty_score
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.size != im2.size:
raise ValueError("Size mismatch between input arrays!!!")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return 1.0
# Compute Dice
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
|
Python
| 0
|
@@ -2270,24 +2270,17 @@
ed.%0A
-Returns
%0A
+
----
@@ -2291,28 +2291,22 @@
-dice : float%0A
+It returns the
Dic
@@ -2325,32 +2325,36 @@
t as a float on
+the
range %5B0,1%5D.%0A
@@ -2362,255 +2362,59 @@
-Maximum similarity = 1%0A No similarity = 0%0A Both are empty (sum eq to zero) = empty_score%0A %0A Notes%0A -----%0A The order of inputs for %60dice%60 is irrelevant. The result will be%0A identical if %60im1%60 and %60im2%60 are switched.
+1: Perfect overlapping %0A 0: Not overlapping
%0A
|
87af22b507fd8d17f707b5659e2914709527d35e
|
Refactor client.py. Pass default value to getattr
|
gobiko/apns/client.py
|
gobiko/apns/client.py
|
import importlib
import json
import jwt
import time
import uuid
from collections import namedtuple
from contextlib import closing
from hyper import HTTP20Connection
from .exceptions import (
InternalException,
ImproperlyConfigured,
PayloadTooLarge,
BadDeviceToken,
PartialBulkMessage
)
from .utils import validate_private_key, wrap_private_key
ALGORITHM = 'ES256'
SANDBOX_HOST = 'api.development.push.apple.com:443'
PRODUCTION_HOST = 'api.push.apple.com:443'
MAX_NOTIFICATION_SIZE = 4096
APNS_RESPONSE_CODES = {
'Success': 200,
'BadRequest': 400,
'TokenError': 403,
'MethodNotAllowed': 405,
'TokenInactive': 410,
'PayloadTooLarge': 413,
'TooManyRequests': 429,
'InternalServerError': 500,
'ServerUnavailable': 503,
}
APNSResponseStruct = namedtuple('APNSResponseStruct', APNS_RESPONSE_CODES.keys())
APNSResponse = APNSResponseStruct(**APNS_RESPONSE_CODES)
class APNsClient(object):
def __init__(self, team_id, auth_key_id,
auth_key=None, auth_key_filepath=None, bundle_id=None, use_sandbox=False, force_proto=None, wrap_key=False
):
if not (auth_key_filepath or auth_key):
raise ImproperlyConfigured(
'You must provide either an auth key or a path to a file containing the auth key'
)
if not auth_key:
try:
with open(auth_key_filepath, "r") as f:
auth_key = f.read()
except Exception as e:
raise ImproperlyConfigured("The APNS auth key file at %r is not readable: %s" % (auth_key_filepath, e))
validate_private_key(auth_key)
if wrap_key:
auth_key = wrap_private_key(auth_key) # Some have had issues with keys that aren't wrappd to 64 lines
self.team_id = team_id
self.bundle_id = bundle_id
self.auth_key = auth_key
self.auth_key_id = auth_key_id
self.force_proto = force_proto
self.host = SANDBOX_HOST if use_sandbox else PRODUCTION_HOST
def send_message(self, registration_id, alert, **kwargs):
return self._send_message(registration_id, alert, **kwargs)
def send_bulk_message(self, registration_ids, alert, **kwargs):
good_registration_ids = []
bad_registration_ids = []
with closing(self._create_connection()) as connection:
for registration_id in registration_ids:
try:
res = self._send_message(registration_id, alert, connection=connection, **kwargs)
good_registration_ids.append(registration_id)
except:
bad_registration_ids.append(registration_id)
if not bad_registration_ids:
return res
elif not good_registration_ids:
raise BadDeviceToken("None of the registration ids were accepted"
"Rerun individual ids with ``send_message()``"
"to get more details about why")
else:
raise PartialBulkMessage(
"Some of the registration ids were accepted. Rerun individual "
"ids with ``send_message()`` to get more details about why. "
"The ones that failed: \n:"
"{bad_string}\n"
"The ones that were pushed successfully: \n:"
"{good_string}\n".format(
bad_string="\n".join(bad_registration_ids),
good_string = "\n".join(good_registration_ids)
),
bad_registration_ids
)
def _create_connection(self):
return HTTP20Connection(self.host, force_proto=self.force_proto)
def _create_token(self):
token = jwt.encode(
{
'iss': self.team_id,
'iat': time.time()
},
self.auth_key,
algorithm= ALGORITHM,
headers={
'alg': ALGORITHM,
'kid': self.auth_key_id,
}
)
return token.decode('ascii')
def _send_message(self, registration_id, alert,
badge=None, sound=None, category=None, content_available=False,
mutable_content=False,
action_loc_key=None, loc_key=None, loc_args=[], extra={},
identifier=None, expiration=None, priority=10,
connection=None, auth_token=None, bundle_id=None, topic=None
):
topic = topic or bundle_id or self.bundle_id
if not topic:
raise ImproperlyConfigured(
'You must provide your bundle_id if you do not specify a topic'
)
data = {}
aps_data = {}
if action_loc_key or loc_key or loc_args:
alert = {"body": alert} if alert else {}
if action_loc_key:
alert["action-loc-key"] = action_loc_key
if loc_key:
alert["loc-key"] = loc_key
if loc_args:
alert["loc-args"] = loc_args
if alert is not None:
aps_data["alert"] = alert
if badge is not None:
aps_data["badge"] = badge
if sound is not None:
aps_data["sound"] = sound
if category is not None:
aps_data["category"] = category
if content_available:
aps_data["content-available"] = 1
if mutable_content:
aps_data["mutable-content"] = 1
data["aps"] = aps_data
data.update(extra)
# Convert to json, avoiding unnecessary whitespace with separators (keys sorted for tests)
json_data = json.dumps(data, separators=(",", ":"), sort_keys=True).encode("utf-8")
if len(json_data) > MAX_NOTIFICATION_SIZE:
raise PayloadTooLarge("Notification body cannot exceed %i bytes" % (MAX_NOTIFICATION_SIZE))
# If expiration isn't specified use 1 month from now
expiration_time = expiration if expiration is not None else int(time.time()) + 2592000
auth_token = auth_token or self._create_token()
request_headers = {
'apns-expiration': str(expiration_time),
'apns-id': str(identifier or uuid.uuid4())
'apns-priority': str(priority),
'apns-topic': topic,
'authorization': 'bearer {0}'.format(auth_token)
}
if connection:
response = self._send_push_request(connection, registration_id, json_data, request_headers)
else:
with closing(self._create_connection()) as connection:
response = self._send_push_request(connection, registration_id, json_data, request_headers)
return response
def _send_push_request(self, connection, registration_id, json_data, request_headers):
connection.request(
'POST',
'/3/device/{0}'.format(registration_id),
json_data,
headers=request_headers
)
response = connection.get_response()
if response.status != APNSResponse.Success:
body = json.loads(response.read().decode('utf-8'))
reason = body.get("reason")
if reason:
exceptions_module = importlib.import_module("gobiko.apns.exceptions")
try:
exception_class = getattr(exceptions_module, reason)
except AttributeError:
raise InternalException
else:
raise exception_class
return True
|
Python
| 0
|
@@ -7343,232 +7343,113 @@
-try:%0A exception_class = getattr(exceptions_module, reason)%0A except AttributeError:%0A raise InternalException%0A else:%0A raise exception_class
+# get exception class by name%0A raise getattr(exceptions_module, reason, InternalException)
%0A%0A
|
06f78c21e6b7e3327244e89e90365169f4c32ea1
|
Fix style issues raised by pep8.
|
calaccess_campaign_browser/api.py
|
calaccess_campaign_browser/api.py
|
from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = { 'filer_id_raw': ALL }
excludes = [ 'id' ]
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = { 'filing_id_raw': ALL }
excludes = [ 'id' ]
|
Python
| 0
|
@@ -280,25 +280,24 @@
iltering = %7B
-
'filer_id_ra
@@ -295,33 +295,32 @@
ler_id_raw': ALL
-
%7D%0A exclud
@@ -325,23 +325,22 @@
udes = %5B
-
'id'
-
%5D
+%0A
%0A%0Aclass
@@ -490,17 +490,16 @@
ring = %7B
-
'filing_
@@ -510,17 +510,16 @@
aw': ALL
-
%7D%0A
@@ -536,12 +536,10 @@
= %5B
-
'id'
-
%5D%0A
|
a473b2cb9af95c1296ecae4d2138142f2be397ee
|
Add variant extension in example script
|
examples/variants.py
|
examples/variants.py
|
#!/usr/bin/env python
# -*- coding: utf8 - *-
from __future__ import print_function, unicode_literals
from cihai.bootstrap import bootstrap_unihan
from cihai.core import Cihai
def variant_list(unihan, field):
for char in unihan.with_fields(field):
print("Character: {}".format(char.char))
for var in char.untagged_vars(field):
print(var)
def script(unihan_options={}):
"""Wrapped so we can test in tests/test_examples.py"""
print("This example prints variant character data.")
c = Cihai()
c.add_dataset('cihai.unihan.Unihan', namespace='unihan')
if not c.sql.is_bootstrapped: # download and install Unihan to db
bootstrap_unihan(c.sql.metadata, options=unihan_options)
c.sql.reflect_db() # automap new table created during bootstrap
print("## ZVariants")
variant_list(c.unihan, "kZVariant")
print("## kSemanticVariant")
variant_list(c.unihan, "kSemanticVariant")
print("## kSpecializedSemanticVariant")
variant_list(c.unihan, "kSpecializedSemanticVariant")
if __name__ == '__main__':
script()
|
Python
| 0
|
@@ -804,16 +804,97 @@
tstrap%0A%0A
+ c.unihan.add_extension('cihai.unihan.UnihanVariants', namespace='variants')%0A%0A
prin
|
485d33f07bfcc10b9fa460331e84b41ecfc89dc1
|
change some plotting parameters and default to testing some of the more advanced fiteach features
|
examples/n2hp_cube_example.py
|
examples/n2hp_cube_example.py
|
import astropy
import pyspeckit
import os
import astropy.units as u
import warnings
from astropy import wcs
if not os.path.exists('n2hp_cube.fit'):
import astropy.utils.data as aud
from astropy.io import fits
try:
f = aud.download_file('ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/472/519/fits/opha_n2h.fit')
except Exception as ex:
# this might be any number of different timeout errors (urllib2.URLError, socket.timeout, etc)
# travis-ci can't handle ftp:
# https://blog.travis-ci.com/2018-07-23-the-tale-of-ftp-at-travis-ci
print("Failed to download from ftp. Exception was: {0}".format(ex))
f = aud.download_file('http://cdsarc.u-strasbg.fr/ftp/cats/J/A+A/472/519/fits/opha_n2h.fit')
with fits.open(f) as ff:
ff[0].header['CUNIT3'] = 'm/s'
for kw in ['CTYPE4','CRVAL4','CDELT4','CRPIX4','CROTA4']:
if kw in ff[0].header:
del ff[0].header[kw]
ff.writeto('n2hp_cube.fit')
# Load the spectral cube cropped in the middle for efficiency
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=wcs.FITSFixedWarning)
spc = pyspeckit.Cube('n2hp_cube.fit')[:,25:29,12:16]
# Set the velocity convention: in the future, this may be read directly from
# the file, but for now it cannot be.
spc.xarr.refX = 93176265000.0*u.Hz
spc.xarr.velocity_convention = 'radio'
spc.xarr.convert_to_unit('km/s')
# Register the fitter
# The N2H+ fitter is 'built-in' but is not registered by default; this example
# shows how to register a fitting procedure
# 'multi' indicates that it is possible to fit multiple components and a
# background will not automatically be fit 4 is the number of parameters in the
# model (excitation temperature, optical depth, line center, and line width)
spc.Registry.add_fitter('n2hp_vtau', pyspeckit.models.n2hp.n2hp_vtau_fitter, 4)
# Get a measurement of the error per pixel
errmap = spc.slice(20, 28, unit='km/s').cube.std(axis=0)
# A good way to write a cube fitter is to have it load from disk if the cube
# fit was completed successfully in the past
if os.path.exists('n2hp_fitted_parameters.fits'):
spc.load_model_fit('n2hp_fitted_parameters.fits', npars=4, npeaks=1)
else:
# Run the fitter
# Estimated time to completion ~ 2 minutes
spc.fiteach(fittype='n2hp_vtau',
guesses=[5,0.5,3,1], # Tex=5K, tau=0.5, v_center=12, width=1 km/s
signal_cut=3, # minimize the # of pixels fit for the example
start_from_point=(2,2), # start at a pixel with signal
errmap=errmap,
)
# There are a huge number of parameters for the fiteach procedure. See:
# http://pyspeckit.readthedocs.org/en/latest/example_nh3_cube.html
# http://pyspeckit.readthedocs.org/en/latest/cubes.html?highlight=fiteach#pyspeckit.cubes.SpectralCube.Cube.fiteach
#
# Unfortunately, a complete tutorial on this stuff is on the to-do list;
# right now the use of many of these parameters is at a research level.
# However, pyspeckit@gmail.com will support them! They are being used
# in current and pending publications
# Save the fitted parameters to a FITS file, and overwrite one if one exists
spc.write_fit('n2hp_fitted_parameters.fits', overwrite=True)
# Show an integrated image
spc.mapplot()
# you can click on any pixel to see its spectrum & fit
# plot one of the fitted spectra
spc.plot_spectrum(2, 2, plot_fit=True)
# spc.parcube[:,27,14] = [ 14.82569198, 1.77055642, 3.15740051, 0.16035407]
# Note that the optical depth is the "total" optical depth, which is
# distributed among 15 hyperfine components. You can see this in
# pyspeckit.spectrum.models.n2hp.line_strength_dict
# As a sanity check, you can see that the brightest line has 0.259 of the total
# optical depth, so the peak line brightness is:
# (14.825-2.73) * (1-np.exp(-1.77 * 0.259)) = 4.45
# which matches the peak of 4.67 pretty well
# Show an image of the best-fit velocity
spc.mapplot.plane = spc.parcube[2,:,:]
spc.mapplot(estimator=None)
# running in script mode, the figures won't show by default on some systems
# import pylab as pl
# pl.draw()
# pl.show()
|
Python
| 0
|
@@ -2608,16 +2608,89 @@
errmap,%0A
+ use_neighbor_as_guess=True,%0A multicore=4,%0A
@@ -3382,36 +3382,71 @@
an i
-ntegrated image%0Aspc.mapplot(
+mage of the fitted tex%0Aspc.mapplot(estimator=1, vmin=0, vmax=10
)%0A#
|
eeebe264c4d873369f3d24b2e7b676e004eb6671
|
Fix path bug in update_source.
|
neuroimaging/externals/pynifti/utils/update_source.py
|
neuroimaging/externals/pynifti/utils/update_source.py
|
#!/usr/bin/env python
"""Copy source files from pynifti git directory into nipy source directory.
We only want to copy the files necessary to build pynifti and the nifticlibs,
and use them within nipy. We will not copy docs, tests, etc...
Pynifti should be build before this script is run so swig generates the
wrapper for nifticlib. We do not want swig as a dependency for nipy.
"""
from os import mkdir
from os.path import join, exists
from shutil import copy2 as copy
"""
The pynifti source should be in a directory level with nipy-trunk
Ex:
/Users/cburns/src/nipy
/Users/cburns/src/pynifti
"""
src_dir = '../../../../../pynifti'
# Destination directory is the top-level externals/pynifti directory
dst_dir = '..'
assert exists(src_dir)
copy(join(src_dir, 'AUTHOR'), join(dst_dir, 'AUTHOR'))
copy(join(src_dir, 'COPYING'), join(dst_dir, 'COPYING'))
# pynifti source and swig wrappers
nifti_list = ['niftiformat.py', 'niftiimage.py', 'utils.py',
'nifticlib.py', 'nifticlib_wrap.c']
nifti_src = join(src_dir, 'nifti')
nifti_dst = join(dst_dir, 'nifti')
if not exists(nifti_dst):
mkdir(nifti_dst)
def copynifti(filename):
copy(join(nifti_src, filename), join(nifti_dst, filename))
for nf in nifti_list:
copynifti(nf)
# nifticlib sources
nifticlib_list = ['LICENSE', 'README', 'nifti1.h', 'nifti1_io.c',
'nifti1_io.h', 'znzlib.c', 'znzlib.h']
nifticlib_src = join(src_dir, '3rd', 'nifticlibs')
nifticlib_dst = join(nifti_dst, 'nifticlibs')
if not exists(nifticlib_dst):
mkdir(nifticlib_dst)
def copynifticlib(filename):
copy(join(nifticlib_src, filename), join(nifticlib_dst, filename))
for nf in nifticlib_list:
copynifticlib(nf)
|
Python
| 0
|
@@ -436,16 +436,28 @@
, exists
+, expanduser
%0Afrom sh
@@ -619,16 +619,17 @@
fti%0A%22%22%22%0A
+%0A
src_dir
@@ -634,23 +634,25 @@
r =
-'../../../../..
+expanduser('~/src
/pyn
@@ -656,16 +656,17 @@
pynifti'
+)
%0A%0A# Dest
|
3444a24078f91461f8aed7dfec153bb370f0784c
|
version bump
|
ema_workbench/__init__.py
|
ema_workbench/__init__.py
|
from __future__ import (absolute_import)
from . import analysis
from . import em_framework
from .em_framework import (Model, RealParameter, CategoricalParameter,
IntegerParameter, perform_experiments, optimize,
ScalarOutcome, TimeSeriesOutcome, Constant,
Scenario, Policy, MultiprocessingEvaluator,
IpyparallelEvaluator, ReplicatorModel)
from . import util
from .util import save_results, load_results, ema_logging, EMAError
__version__ = '0.9.5'
|
Python
| 0.000001
|
@@ -554,6 +554,6 @@
0.9.
-5
+9
'
|
ccb6728111a3142830bd4b3fccb8a956002013f0
|
Update example to remove upload, not relevant for plotly!
|
examples/plotly_datalogger.py
|
examples/plotly_datalogger.py
|
from pymoku import Moku, MokuException
from pymoku.instruments import *
import pymoku.plotly_support as pmp
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
linespec = {
'shape' : 'spline',
'width' : '2'
}
try:
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
if i.datalogger_busy():
i.datalogger_stop()
pmp.stream_init(m, 'benizl.anu', 'na8qic5nqw', 'kdi5h54dhl', 'v7qd9o6bcq', line=linespec)
i.datalogger_start(start=0, duration=60*10, filetype='plot')
print "Plotly URL is: %s" % pmp.stream_url(m)
while True:
time.sleep(1)
trems, treme = i.datalogger_remaining()
samples = i.datalogger_samples()
print "Captured (%d samples); %d seconds from start, %d from end" % (samples, trems, treme)
# TODO: Symbolic constants
if i.datalogger_completed():
break
e = i.datalogger_error()
if e:
print "Error occured: %s" % e
i.datalogger_stop()
i.datalogger_upload()
except Exception as e:
print e
finally:
i.datalogger_stop()
m.close()
|
Python
| 0
|
@@ -263,13 +263,12 @@
ing.
-DEBUG
+INFO
)%0A%0A#
@@ -873,16 +873,17 @@
t(start=
+1
0, durat
@@ -892,10 +892,8 @@
n=60
-*1
0, f
@@ -1297,85 +1297,48 @@
%25 e%0A
-%0A%09i.datalogger_stop()%0A%09i.datalogger_upload()%0A%0Aexcept Exception as e:%0A%09print e
+except Exception:%0A%09traceback.print_exc()
%0Afin
|
09bd40bc8d29fab157630d6411aa8316148a10d6
|
Fix indentation bug
|
src/backend.py
|
src/backend.py
|
import os
import logging
import imp
import translation
#from mpi4py import MPI
class Backend(object):
def __init__(self, config_file):
if(config_file is None):
# Try to load an example configuration file
config_file = os.path.abspath(os.path.dirname(__file__)+
"/../examples/cxitut13/conf.py")
logging.warning("No configuration file given! "
"Loading example configuration from %s" % (config_file))
self.backend_conf = imp.load_source('backend_conf', config_file)
self.translator = translation.init_translator(self.backend_conf.state)
print 'Starting backend...'
def mpi_init(self):
comm = MPI.COMM_WORLD
self.rank = comm.Get_rank()
print "MPI rank %d inited" % rank
def start(self):
self.backend_conf.state['_running'] = True
while(self.backend_conf.state['_running']):
evt = self.translator.nextEvent()
self.backend_conf.onEvent(evt)
|
Python
| 0.000019
|
@@ -516,36 +516,32 @@
))%0A %0A
-
self.backend_con
@@ -589,20 +589,16 @@
g_file)%0A
-
@@ -668,20 +668,16 @@
.state)%0A
-
|
04ca95ea717ca3ab1880797e041fb342e82ef26e
|
Use gpkg keywords to identify Guild packages
|
guild/commands/packages_impl.py
|
guild/commands/packages_impl.py
|
# Copyright 2017-2018 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
from guild import cli
from guild import namespace
from guild import package
from guild import pip_util
from guild import util
log = logging.getLogger("guild")
INTERNAL_PACKAGES = ["guildai"]
def list_packages(args):
installed = pip_util.get_installed()
scope_filtered = [pkg for pkg in installed if _filter_scope(pkg, args)]
formatted = [_format_pkg(pkg) for pkg in scope_filtered]
filtered = [pkg for pkg in formatted if _filter_model(pkg, args)]
cli.table(filtered, cols=["name", "version", "summary"], sort=["name"])
def _filter_scope(pkg, args):
return (
pkg.project_name not in INTERNAL_PACKAGES
and (args.all or package.is_gpkg(pkg.project_name))
)
def _format_pkg(pkg):
return {
"name": pkg.project_name,
"summary": _pkg_summary(pkg),
"version": pkg.version,
}
def _pkg_summary(pkg):
try:
metadata_lines = pkg.get_metadata_lines("METADATA")
except IOError:
# no METADATA
metadata_lines = []
# For efficiency, just look at the first few lines for Summary
for i, line in enumerate(metadata_lines):
if line[:9] == "Summary: ":
return line[9:]
if i == 5:
break
return ""
def _filter_model(pkg, args):
return util.match_filters(args.terms, [pkg["name"], pkg["summary"]])
def install_packages(args):
for reqs, index_urls in _installs(args.packages):
try:
pip_util.install(
reqs,
index_urls=index_urls,
upgrade=args.upgrade or args.reinstall,
pre_releases=args.pre,
no_cache=args.no_cache,
no_deps=args.no_deps,
reinstall=args.reinstall,
target=args.target)
except pip_util.InstallError as e:
cli.error(str(e))
def _installs(pkgs):
index_urls = {}
for pkg in pkgs:
if os.path.exists(pkg):
index_urls.setdefault(None, []).append(pkg)
else:
info = _pip_info(pkg)
urls_key = "\n".join(info.install_urls)
index_urls.setdefault(urls_key, []).append(info.project_name)
return [
(reqs, urls_key.split("\n") if urls_key else [])
for urls_key, reqs in index_urls.items()
]
def _pip_info(pkg):
try:
return namespace.pip_info(pkg)
except namespace.NamespaceError as e:
terms = " ".join(pkg.split("/")[1:])
cli.error(
"unsupported namespace %s in '%s'\n"
"Try 'guild search %s -a' to find matching packages."
% (e.value, pkg, terms))
def uninstall_packages(args):
pip_util.uninstall(args.packages, dont_prompt=args.yes)
def package_info(args):
for i, (project, pkg) in enumerate(_iter_project_names(args.packages)):
if i > 0:
cli.out("---")
exit_status = pip_util.print_package_info(
project,
verbose=args.verbose,
show_files=args.files)
if exit_status != 0:
log.warning("unknown package %s", pkg)
def _iter_project_names(pkgs):
for pkg in pkgs:
try:
ns, name = namespace.split_name(pkg)
except namespace.NamespaceError:
log.warning("unknown namespace in '%s', ignoring", pkg)
else:
yield ns.pip_info(name).project_name, pkg
|
Python
| 0
|
@@ -726,34 +726,8 @@
ace%0A
-from guild import package%0A
from
@@ -811,41 +811,8 @@
%22)%0A%0A
-INTERNAL_PACKAGES = %5B%22guildai%22%5D%0A%0A
def
@@ -877,30 +877,30 @@
d()%0A
-scope_filtered
+guild_packages
= %5Bpkg
@@ -924,38 +924,27 @@
led if _
-filter_scope(pkg, args
+is_gpkg(pkg
)%5D%0A f
@@ -987,22 +987,22 @@
in
-scope_filtered
+guild_packages
%5D%0A
@@ -1155,329 +1155,143 @@
ef _
-filter_scope(pkg, args):%0A return (%0A pkg.project_name not in INTERNAL_PACKAGES%0A and (args.all or package.is_gpkg(pkg.project_name))%0A )%0A%0Adef _format_pkg(pkg):%0A return %7B%0A %22name%22: pkg.project_name,%0A %22summary%22: _pkg_summary(pkg),%0A %22version%22: pkg.version,%0A %7D%0A%0Adef _pkg_summary(pkg
+is_gpkg(pkg):%0A keywords = _pkg_metadata(pkg, %22Keywords%22) or %22%22%0A return %22gpkg%22 in keywords.split(%22 %22)%0A%0Adef _pkg_metadata(pkg, name
):%0A
@@ -1436,78 +1436,88 @@
-# For efficiency, just look at the first few lines for Summary
+line_prefix = %22%7B%7D: %22.format(name)%0A len_line_prefix = len(line_prefix)
%0A for
i,
@@ -1512,19 +1512,16 @@
%0A for
- i,
line in
@@ -1521,26 +1521,16 @@
line in
-enumerate(
metadata
@@ -1535,17 +1535,16 @@
ta_lines
-)
:%0A
@@ -1558,105 +1558,262 @@
ne%5B:
-9%5D == %22Summary: %22:%0A return line%5B9:%5D%0A if i == 5:%0A break%0A return %22%22
+len_line_prefix%5D == line_prefix:%0A return line%5Blen_line_prefix:%5D%0A return None%0A%0Adef _format_pkg(pkg):%0A return %7B%0A %22name%22: pkg.project_name,%0A %22summary%22: _pkg_metadata(pkg, %22Summary%22) or %22%22,%0A %22version%22: pkg.version,%0A %7D
%0A%0Ade
|
e7cab954d320d8ff97c1c422a0f875c924c7b2df
|
fix null-pointer exception
|
scripts/experiment.py
|
scripts/experiment.py
|
#!/usr/bin/python
# ------------------------------------------------------------------------------
#
# Run experiment
#
# Author: Stefan Heule <sheule@cs.stanford.edu>
#
# ------------------------------------------------------------------------------
import sys
import os
import time
import argparse
import json
import threading
import subprocess
import shutil
import re
import signal
# ------------------------------------------
# main entry point
# ------------------------------------------
def get_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def main():
parser = argparse.ArgumentParser(description='Run synthesis experiment.')
parser.add_argument('-n', type=int, help='Number of repetitions', default=5)
parser.add_argument('--filter', type=str, help='Filter which experiments to run', default="")
parser.add_argument('-r', '--run', help='Only run the first experiment', action='store_true')
parser.add_argument('--verify', help='Verify that all experiments are successful at least some of the time', action='store_true')
argv = parser.parse_args()
workdir = os.path.abspath(os.path.dirname(__file__) + "/../tests")
n = argv.n
only_run = argv.run
verify = argv.verify
if only_run and verify:
print "Cannot both --run and --verify"
sys.exit(1)
out = ""
if not only_run:
out = workdir + "/out"
if os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
categories = []
for f in os.listdir(workdir):
if os.path.isfile(workdir + "/" + f):
categories.append(f)
line = "-" * 80
for e in categories:
category = e[0:-5].replace("/", "-")
try:
examples = json.loads(open(workdir + "/" + e).read())
except ValueError as ex:
print "Failed to parse configuration: " + str(ex)
sys.exit(1)
for example in examples:
title = example['name']
if argv.filter != "" and re.search(argv.filter, title, re.UNICODE) is None:
continue
name = title[title.rfind(".")+1:]
function = "\n".join(example['function'])
argnames = example['argnames']
arguments = example['arguments']
if not only_run:
print line
print "Experiment: " + title
args = '"' + ('" "'.join(arguments)) + '"'
succ_time = 0.0
succ_count = 0
succ_iterations = 0
for i in range(n):
more = ""
if "loop" in example:
more = " --loop " + str(example['loop'])
if only_run:
command = './model-synth synth ' + more + ' --cleanup 1000 --iterations 100000000 "%s" "%s" %s' % (argnames, function, args)
print command
os.system(command)
sys.exit(0)
sys.stdout.write(' Running try #' + str(i+1))
sys.stdout.flush()
t = time.time()
command = './model-synth synth ' + more + ' --out "%s/%s-%s-%d.js" "%s" "%s" %s' % (out, category, name, i, argnames, function, args)
val, output = execute(command, 60)
elapsed_time = time.time() - t
print ". Exit status %d after %.2f seconds." % (val, elapsed_time)
if val == 0:
succ_count += 1
if verify:
break
succ_time += elapsed_time
iters = int([m.group(1) for m in re.finditer('Found in ([0-9]+) iteration', output)][-1])
succ_iterations += iters
if verify:
if succ_count == 0:
print "ERROR: didn't succeed :("
else:
print "Success rate: %.2f%%" % (float(succ_count) * 100.0/float(n))
print "Average time until success: %.2f seconds" % (succ_time / float(succ_count))
print "Average iterations until success: %.1f" % (float(succ_iterations) / float(succ_count))
print line
# print a string to a file
def fprint(f, s):
f = open(f, 'w')
f.write(s)
f.close()
# print a string to a file
def fprinta(f, s):
f = open(f, 'a')
f.write(s)
f.close()
def execute(cmd, timeout=100000000):
out = ""
try:
out = subprocess.check_output("timeout " + str(timeout) + "s " + cmd, shell=True)
return (0, out)
except subprocess.CalledProcessError as ex:
return (ex.returncode, ex.output)
# from http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish
class timeout_c:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
class TimeoutError(Exception):
pass
if __name__ == '__main__':
main()
|
Python
| 0.00006
|
@@ -3518,16 +3518,45 @@
oat(n))%0A
+ if succ_count %3E 0:%0A
@@ -3638,16 +3638,18 @@
count))%0A
+
|
5c44ff83c32d95d4e60513a11ce25f29025599ed
|
Update test_rpi-daemon-gui.py
|
ci_scripts/test_rpi-daemon-gui.py
|
ci_scripts/test_rpi-daemon-gui.py
|
import os
import urllib2
import multiprocessing as mp
import neblio_ci_libs as nci
nci.setup_travis_or_gh_actions_env_vars()
working_dir = os.getcwd()
deploy_dir = os.path.join(os.environ['BUILD_DIR'],'deploy', '')
# If this is a Travis PR, bail out instead of just wasting 45 mins running
if (os.environ.get('TRAVIS_BUILD_DIR') is not None and os.environ['TRAVIS_PULL_REQUEST'] != 'false'):
print('Pull Requests are not built for RPi since ccache cannot be used!')
exit(0)
nci.mkdir_p(deploy_dir)
os.chdir(deploy_dir)
build_target = ''
build_target_alt = ''
if(os.environ['target_v'] == "rpi_daemon"):
build_target = 'nebliod'
build_target_alt = 'nebliod'
elif(os.environ['target_v'] == "rpi_wallet_test"):
build_target = 'tests-neblio-qt'
build_target_alt = 'tests-neblio-Qt'
else:
build_target = 'neblio-qt'
build_target_alt = 'neblio-Qt'
# Install docker
nci.call_with_err_code('curl -fsSL https://get.docker.com -o get-docker.sh && sudo sh get-docker.sh && rm get-docker.sh')
# Prepare qemu
nci.call_with_err_code('docker run --rm --privileged multiarch/qemu-user-static:register --reset')
# move .ccache folder to our deploy directory
nci.mkdir_p(os.path.join(os.environ['HOME'],'.ccache', ''))
nci.call_with_err_code('mv ' + os.path.join(os.environ['HOME'],'.ccache', '') + ' ' + os.path.join(deploy_dir,'.ccache', ''))
# Start Docker Container to Build nebliod or neblio-Qt
if (os.environ.get('TRAVIS_BUILD_DIR') is not None):
nci.call_with_err_code('timeout --signal=SIGKILL 42m sudo docker run -e BRANCH=' + os.environ['BRANCH'] + ' -e BUILD=' + build_target + ' -v ' + os.environ['BUILD_DIR'] + ':/root/vol -t neblioteam/nebliod-build-ccache-rpi')
else:
nci.call_with_err_code('sudo docker run -e BRANCH=' + os.environ['BRANCH'] + ' -e BUILD=' + build_target + ' -v ' + os.environ['BUILD_DIR'] + ':/root/vol -t neblioteam/nebliod-build-ccache-rpi')
nci.call_with_err_code('sleep 15 && sudo docker kill $(sudo docker ps -q);exit 0')
# move .ccache folder back to travis ccache dir
nci.call_with_err_code('mv ' + os.path.join(deploy_dir,'.ccache', '') + ' ' + os.path.join(os.environ['HOME'],'.ccache', ''))
file_name = '$(date +%Y-%m-%d)---' + os.environ['BRANCH'] + '-' + os.environ['COMMIT'][:7] + '---' + build_target_alt + '---RPi-raspbian-stretch.tar.gz'
if (os.environ.get('TRAVIS_BUILD_DIR') is not None):
# since RPi jobs are run several times we need to be creative in our deployment as only the original job can post to GitHub Releases.
# So, start the original job via the GitHub tag and pass its job ID to every subsequent job. Once the build is complete, the final build job
# will restart the original job, which will handle the deployment to GitHub releases.
deploy_job_id = os.environ.get('TRAVIS_DEPLOY_JOB_ID', '0')
travis_tag = os.environ.get('TRAVIS_TAG', '')
# if travis tag is populated, this is the job ID we want
if (travis_tag != ''):
deploy_job_id = os.environ['TRAVIS_JOB_ID']
# Check if binary exists before trying to package it.
# If it does not exist we had a build timeout
if(os.path.isfile(build_target)):
nci.call_with_err_code('tar -zcvf "' + file_name + '" ' + build_target)
nci.call_with_err_code('rm -f ' + build_target)
nci.call_with_err_code('echo "Binary package at ' + deploy_dir + file_name + '"')
# if we are just running tests, delete the deploy package
if(os.environ['target_v'] == "rpi_wallet_test"):
nci.call_with_err_code('rm -f ' + deploy_dir + file_name)
if (os.environ.get('TRAVIS_BUILD_DIR') is not None and travis_tag == '' and deploy_job_id != '0'):
nci.call_with_err_code('echo "Restarting our deploy job. Job ID: ' + deploy_job_id + '"')
nci.call_with_err_code('curl -X POST -H "Content-Type: application/json" -H "Travis-API-Version: 3" -H "Accept: application/json" -H "Authorization: token ' + os.environ["TRAVIS_API_TOKEN"] + '" -d \'{}\' \'https://api.travis-ci.org/job/' + deploy_job_id + '/restart\'')
elif(os.environ.get('TRAVIS_BUILD_DIR') is not None):
nci.call_with_err_code('echo "Binary not found, likely due to a timeout, starting new job..."')
nci.call_with_err_code('curl -X POST -H "Content-Type: application/json" -H "Travis-API-Version: 3" -H "Accept: application/json" -H "Authorization: token ' + os.environ["TRAVIS_API_TOKEN"] + '" -d \'{"request":{"message":"RPi ' + build_target_alt + ' Build Restart","branch":"' + os.environ["TRAVIS_BRANCH"] + '","config":{"merge_mode":"deep_merge","env":{"global":{"TRAVIS_DEPLOY_JOB_ID":"' + deploy_job_id + '"},"matrix":["target_v=' + os.environ["target_v"] + '"]}}}}\' \'https://api.travis-ci.org/repo/NeblioTeam%2Fneblio/requests\'')
|
Python
| 0.000001
|
@@ -2014,16 +2014,102 @@
che dir%0A
+if (os.environ.get('TRAVIS_BUILD_DIR') is not None):%0A # move ccache dir for travis%0A
nci.call
@@ -2225,16 +2225,178 @@
e', ''))
+%0Aelse:%0A # move ccache for github actions%0A nci.call_with_err_code('mv ' + os.path.join(deploy_dir,'.ccache', '') + ' ' + os.path.join(working_dir,'.ccache', ''))
%0A%0Afile_n
@@ -3829,17 +3829,18 @@
'0'):%0A
-%09
+
nci.call
@@ -3927,9 +3927,10 @@
)%0A
-%09
+
nci.
|
a333ca8964132b3f1830c2ceda8cbb805df78999
|
Fix locale initialization
|
product/runtime/src/main/python/java/android/__init__.py
|
product/runtime/src/main/python/java/android/__init__.py
|
"""Copyright (c) 2018 Chaquo Ltd. All rights reserved."""
from importlib import reload
import os
from os.path import exists, join
import sys
import traceback
from . import stream, importer
def initialize(context, build_json, app_path):
stream.initialize()
importer.initialize(context, build_json, app_path)
initialize_stdlib(context)
def initialize_stdlib(context):
from com.chaquo.python import Common
initialize_sys(context, Common)
initialize_os(context)
initialize_tempfile(context)
initialize_ssl(context)
initialize_ctypes(context)
def initialize_sys(context, Common):
if sys.version_info[0] >= 3:
sys.abiflags = Common.PYTHON_SUFFIX[len(Common.PYTHON_VERSION_SHORT):]
# argv defaults to not existing, which may crash some programs.
sys.argv = [""]
# executable defaults to "python" on 2.7, or "" on 3.6. But neither of these values (or
# None, which is mentioned in the documentation) will allow platform.platform() to run
# without crashing.
try:
sys.executable = os.readlink("/proc/{}/exe".format(os.getpid()))
except Exception:
# Can't be certain that /proc will work on all devices, so try to carry on.
traceback.print_exc()
sys.executable = ""
# Remove default paths (#5410).
invalid_paths = [p for p in sys.path
if not (exists(p) or p.startswith(importer.ASSET_PREFIX))]
for p in invalid_paths:
sys.path.remove(p)
def initialize_os(context):
# By default, os.path.expanduser("~") returns "/data", which is an unwritable directory.
# Make it return something more usable.
os.environ.setdefault("HOME", str(context.getFilesDir()))
def initialize_tempfile(context):
tmpdir = join(str(context.getCacheDir()), "chaquopy/tmp")
if not exists(tmpdir):
os.makedirs(tmpdir)
os.environ["TMPDIR"] = tmpdir
def initialize_ssl(context):
# OpenSSL actually does know the location of the system CA store on Android, but
# unfortunately there are multiple incompatible formats of that location, so we can't rely
# on it (https://blog.kylemanna.com/android/android-ca-certificates/).
os.environ["SSL_CERT_FILE"] = join(str(context.getFilesDir()), "chaquopy/cacert.pem")
# hashlib may already have been imported during bootstrap: reload it now that the the
# OpenSSL interface in `_hashlib` is on sys.path.
import hashlib
reload(hashlib)
def initialize_ctypes(context):
import ctypes.util
import sysconfig
# The standard implementation of find_library requires external tools, so will always fail
# on Android. I can't see any easy way of finding the absolute library pathname ourselves
# (there is no LD_LIBRARY_PATH on Android), but we can at least support the case where the
# user passes the return value of find_library to CDLL().
def find_library_override(name):
filename = "lib{}.so".format(name)
try:
ctypes.CDLL(filename)
except OSError:
return None
else:
return filename
ctypes.util.find_library = find_library_override
ctypes.pythonapi = ctypes.PyDLL(sysconfig.get_config_vars()["LDLIBRARY"])
|
Python
| 0.009032
|
@@ -416,16 +416,73 @@
t Common
+%0A%0A # These are ordered roughly from low to high level.
%0A ini
@@ -621,32 +621,63 @@
ctypes(context)%0A
+ initialize_locale(context)%0A
%0A%0Adef initialize
@@ -3317,8 +3317,352 @@
RARY%22%5D)%0A
+%0A%0Adef initialize_locale(context):%0A import locale%0A # Of the various encoding functions in test_android.py, this only affects %60getlocale%60. All%0A # the others are controlled by the LC_ALL environment variable (set in chaquopy_java.pyx),%0A # and are not modifiable after Python startup.%0A locale.setlocale(locale.LC_ALL, %22en_US.UTF-8%22)%0A
|
d06c8486c8353a0775259f7342f61964b738f341
|
Add missing symbol.
|
sensibility/language/java/__init__.py
|
sensibility/language/java/__init__.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import token
import tokenize
from io import BytesIO
from keyword import iskeyword
from pathlib import Path
from typing import (
Any, AnyStr, Callable, IO, Iterable, Optional, Tuple, Union,
overload,
)
import javalang # type: ignore
from javalang.parser import JavaSyntaxError # type: ignore
from javalang.tokenizer import LexerError # type: ignore
from .. import Language, SourceSummary
from ...lexical_analysis import Lexeme, Location, Position, Token
from ...vocabulary import Vocabulary
here = Path(__file__).parent
class Java(Language):
"""
Defines the Java 8 programming language.
"""
extensions = {'.java'}
vocabulary = Vocabulary.from_json_file(Path(__file__).parent /
'vocabulary.json')
def tokenize(self, source: Union[str, bytes, IO[bytes]]) -> Iterable[Token]:
tokens = javalang.tokenizer.tokenize(source)
for token in tokens:
loc = Location.from_string(token.value,
line=token.position[0],
column=token.position[1])
yield Token(name=type(token).__name__,
value=token.value,
start=loc.start, end=loc.end)
def check_syntax(self, source: Union[str, bytes]) -> bool:
try:
javalang.parse.parse(source)
return True
except (JavaSyntaxError, LexerError):
return False
def summarize_tokens(self, source: Iterable[Token]) -> SourceSummary:
toks = [tok for tok in source if tok.name != 'EndOfInput']
slines = set(line for tok in toks for line in tok.lines)
return SourceSummary(n_tokens=len(toks), sloc=len(slines))
def vocabularize_tokens(self, source: Iterable[Token]) -> Iterable[Tuple[Location, str]]:
for token in source:
yield token.location, java2sensibility(token)
RESERVED_WORDS = {
'abstract', 'assert', 'boolean', 'break', 'byte', 'case', 'catch',
'char', 'class', 'const', 'continue', 'default', 'do', 'double',
'else', 'enum', 'extends', 'final', 'finally', 'float', 'for', 'goto',
'if', 'implements', 'import', 'instanceof', 'int', 'interface', 'long',
'native', 'new', 'package', 'private', 'protected', 'public', 'return',
'short', 'static', 'strictfp', 'super', 'switch', 'synchronized',
'this', 'throw', 'throws', 'transient', 'try', 'void', 'volatile',
'while', 'abstract', 'default', 'final', 'native', 'private',
'protected', 'public', 'static', 'strictfp', 'synchronized',
'transient', 'volatile', 'boolean', 'byte', 'char', 'double', 'float',
'int', 'long', 'short', 'true', 'false', 'null'
}
SYMBOLS = {
'>>>=', '>>=', '<<=', '%=', '^=', '|=', '&=', '/=',
'*=', '-=', '+=', '<<', '--', '++', '||', '&&', '!=',
'>=', '<=', '==', '%', '^', '|', '&', '/', '*', '-',
'+', ':', '?', '~', '!', '<', '>', '=', '...', '->', '::',
'(', ')', '{', '}', '[', ']', ';', ',', '.'
}
CLOSED_CLASSES = {
'Keyword', 'Modifier', 'BasicType', 'Boolean', 'Null',
'Separator', 'Operator', 'Annotation', 'EndOfInput'
}
INTEGER_LITERALS = {
'Integer',
'DecimalInteger', 'OctalInteger', 'BinaryInteger', 'HexInteger',
}
FLOATING_POINT_LITERALS = {
'FloatingPoint',
'DecimalFloatingPoint', 'HexFloatingPoint',
}
STRING_LITERALS = {
'Character', 'String',
}
OPEN_CLASSES = (
INTEGER_LITERALS | FLOATING_POINT_LITERALS | STRING_LITERALS |
{'Identifier'}
)
def java2sensibility(lex: Lexeme) -> str:
# > Except for comments (§3.7), identifiers, and the contents of character
# > and string literals (§3.10.4, §3.10.5), all input elements (§3.5) in a
# > program are formed only from ASCII characters (or Unicode escapes (§3.3)
# > which result in ASCII characters).
# https://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html
if lex.name == 'EndOfInput':
return '</s>'
if lex.name in CLOSED_CLASSES:
assert lex.value in RESERVED_WORDS | SYMBOLS
return lex.value
else:
assert lex.name in OPEN_CLASSES
if lex.name in INTEGER_LITERALS | FLOATING_POINT_LITERALS:
return '<NUMBER>'
elif lex.name in STRING_LITERALS:
return '<STRING>'
else:
assert lex.name == 'Identifier'
return '<IDENTIFIER>'
java: Language = Java()
|
Python
| 0.000017
|
@@ -3677,16 +3677,21 @@
',', '.'
+, '@'
%0A%7D%0ACLOSE
|
038509c11d029dd1bd0adef2c29f472cad5f0ea6
|
Fix a few problems.
|
scripts/find_error.py
|
scripts/find_error.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Lexing error finder
~~~~~~~~~~~~~~~~~~~
For the source files given on the command line, display
the text where Error tokens are being generated, along
with some context.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os
try:
import pygments
except ImportError:
# try parent path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pygments.lexer import RegexLexer
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error, Text, _TokenType
class DebuggingRegexLexer(RegexLexer):
"""Make the state stack, position and current match instance attributes."""
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
self.pos = 0
tokendefs = self._tokens
self.statestack = list(stack)
statetokens = tokendefs[self.statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
self.m = m = rexmatch(text, self.pos)
if m:
if type(action) is _TokenType:
yield self.pos, action, m.group()
else:
for item in action(self, m):
yield item
self.pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
self.statestack.pop()
elif state == '#push':
self.statestack.append(self.statestack[-1])
else:
self.statestack.append(state)
elif isinstance(new_state, int):
# pop
del self.statestack[new_state:]
elif new_state == '#push':
self.statestack.append(self.statestack[-1])
else:
assert False, 'wrong state def: %r' % new_state
statetokens = tokendefs[self.statestack[-1]]
break
else:
try:
if text[self.pos] == '\n':
# at EOL, reset state to 'root'
self.pos += 1
self.statestack = ['root']
statetokens = tokendefs['root']
yield self.pos, Text, u'\n'
continue
yield self.pos, Error, text[self.pos]
self.pos += 1
except IndexError:
break
def main(fn, lexer=None):
if lexer is not None:
lx = get_lexer_by_name(lexer)
else:
try:
lx = get_lexer_for_filename(os.path.basename(fn))
except ValueError:
try:
name, rest = fn.split('_', 1)
lx = get_lexer_by_name(name)
except ValueError:
raise AssertionError('no lexer found for file %r' % fn)
debug_lexer = False
# does not work for e.g. ExtendedRegexLexers
if lx.__class__.__bases__ == (RegexLexer,):
lx.__class__.__bases__ = (DebuggingRegexLexer,)
debug_lexer = True
lno = 1
text = file(fn, 'U').read()
text = text.strip('\n') + '\n'
text = text.decode('latin1')
ntext = []
states = []
def show_token(tok):
reprs = map(repr, tok)
print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
if debug_lexer:
print ' ' + ' ' * (29-len(reprs[0])) + repr(states[i]),
print
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error:
print 'Error parsing', fn, 'on line', lno
print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
for i in range(len(ntext) - num, len(ntext)):
show_token(ntext[i])
print 'Error token:'
l = len(repr(val))
print ' ' + repr(val),
if debug_lexer:
print ' ' * (60-l) + repr(lx.statestack),
print
print
return 1
ntext.append((type,val))
if debug_lexer:
states.append(lx.statestack[:])
if showall:
for tok in ntext:
show_token(tok)
return 0
num = 10
showall = False
lexer = None
if __name__ == '__main__':
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'n:l:a')
for opt, val in opts:
if opt == '-n':
num = int(val)
elif opt == '-a':
showall = True
elif opt == '-l':
lexer = val
ret = 0
for f in args:
ret += main(f, lexer)
sys.exit(bool(ret))
|
Python
| 0.000145
|
@@ -3831,21 +3831,22 @@
1')%0A
-ntext
+tokens
= %5B%5D%0A
@@ -3882,16 +3882,23 @@
oken(tok
+, state
):%0A
@@ -4088,12 +4088,8 @@
tate
-s%5Bi%5D
),%0A
@@ -4337,16 +4337,160 @@
) + ':'%0A
+ if showall:%0A for tok, state in zip(tokens, states):%0A show_token(tok, state)%0A else:%0A
@@ -4512,21 +4512,22 @@
nge(len(
-ntext
+tokens
) - num,
@@ -4535,17 +4535,22 @@
len(
-ntext)):%0A
+tokens)):%0A
@@ -4572,21 +4572,33 @@
w_token(
-ntext
+tokens%5Bi%5D, states
%5Bi%5D)%0A
@@ -4713,32 +4713,62 @@
if debug_lexer
+ and hasattr(lx, 'statestack')
:%0A
@@ -4876,21 +4876,22 @@
-ntext
+tokens
.append(
@@ -4942,80 +4942,201 @@
-states.append(lx.statestack%5B:%5D)%0A if showall:%0A for tok in ntext
+if hasattr(lx, 'statestack'):%0A states.append(lx.statestack%5B:%5D)%0A else:%0A states.append(None)%0A if showall:%0A for tok, state in zip(tokens, states)
:%0A
@@ -5159,16 +5159,23 @@
oken(tok
+, state
)%0A re
|
b75f46549f0a8354772e923873946ec9b7d7be95
|
Remove unneeded absolute pathnames from the arguments to open()
|
scraper.py
|
scraper.py
|
import requests
import os
from bs4 import BeautifulSoup
import sys
import json
def search_CL(bedrooms=None, minAsk=None, maxAsk=None, query=None):
u"""
Return content and encoding of a response to a query of CL.
Submits a request to http://seattle.craigslist.org/search/apa as
search paramaters and returns the content and encoding of the
server's response.
Keyword arguments:
bedrooms: An int indicating the minimum number of bedrooms.
minAsk: An int indicating the minimum monthly rent.
maxAsk: An int indicating the maximum monthly rent.
query: A string representing other search terms 'parking', 'bus', etc.
"""
url = "http://seattle.craigslist.org/search/apa"
params = {}
for k, v in locals().items():
if v is not None:
params[k] = v
if not params:
raise ValueError(u"No keywords given")
else:
response = requests.get(url, params=params)
if response.ok:
return response.content, response.encoding
else:
response.raise_for_status()
def fetch_json_results(**kwargs):
u"""
Return content of a response to a json query of CL.
Submits a request to http://seattle.craigslist.org/jsonsearch/apa as
search paramaters and returns the content of the
server's response. The search arguments need to match
search_CL()'s in order for this to collect the corresponding data.
Keyword arguments:
bedrooms: An int indicating the minimum number of bedrooms.
minAsk: An int indicating the minimum monthly rent.
maxAsk: An int indicating the maximum monthly rent.
query: A string representing other search terms 'parking', 'bus', etc.
"""
url = 'http://seattle.craigslist.org/jsonsearch/apa'
results = requests.get(url, params=kwargs)
results.raise_for_status()
return results.json()
def read_search_results(results='apartments.html'):
u"""Return the contents of a local html file."""
with open(os.getcwd() + '/' + results, 'r') as source:
return source.read(), 'utf-8'
def read_json_results(results='apartments.json'):
u"""Return the contents of a local json file."""
with open(os.getcwd() + '/' + results, 'r') as source:
json_string = source.read()
return json.loads(json_string)
def parse_source(body, encoding='utf-8'):
u"""Return HTML parsed by BeautifulSoup."""
return BeautifulSoup(body, from_encoding=encoding)
def extract_listings(parsed_html):
u"""
Yield list of dicts containing attributes of listed apartments.
Accepts BeautifulSoup parsed HTML. Searches and traverses
the parsed HTML for each listing and collects the link to,
description of, price, and size of each apartment.
Yield:
Dictionary containing apartment attributes.
"""
listings = parsed_html.find_all('p', class_="row")
for listing in listings:
link = listing.find('span', class_='pl').find('a')
price = listing.find('span', class_='price')
size = price.next_sibling.strip('\n-/')
this_listing = {
'pid': listing.attrs.get('data-pid', ''),
'link': link.attrs['href'],
'description': link.string.strip(),
'price': price.string.strip(),
'size': size
}
yield this_listing
def add_location(listing, search):
u"""
Merge latt/long search results into the listing's dictionary.
Accepts a dictionary representing a listing on CL and adds the
lattitude and longitude specificed for that listing in a
CL JSON search.
Return:
True: If listing's identifier (pid) is in the search output.
False: If listing's identifier (pid) is not in the search output.
"""
if listing['pid'] in search:
match = search[listing['pid']]
listing['location'] = {
'data-latitude': match.get('Latitude', ''),
'data-longitude': match.get('Longitude', '')
}
return True
return False
def add_address(listing):
u"""
Return the listing with an address from Google Maps based on lat/long.
Return:
Dictionary with a new key, 'address', that ncludes the an address for the
listing's lat/long if it can be determined or the string 'unavailable' if
it can't.
"""
url = 'http://maps.googleapis.com/maps/api/geocode/json'
latlng = "{data-latitude},{data-longitude}".format(**listing['location'])
parameters = {'latlng': latlng, 'sensor': 'false'}
response = requests.get(url, params=parameters)
response.raise_for_status()
data = response.json()
if data['status'] == 'OK':
best = data['results'][0]
listing['address'] = best['formatted_address']
else:
listing['address'] = 'unavailable'
return listing
if __name__ == "__main__":
import pprint
if len(sys.argv) > 1 and sys.argv[1] == 'test':
response, encoding = read_search_results()
json_res = read_json_results()
else:
response, encoding = search_CL(minAsk=1000, maxAsk=1500, bedrooms=2)
with open('apartments.html', 'w') as outfile:
outfile.write(response)
json_res = fetch_json_results(minAsk=1000, maxAsk=1500, bedrooms=2)
with open('apartments.json', 'w') as outfile:
outfile.write(json.dumps(json_res))
parsed = parse_source(response, encoding)
search = {j['PostingID']: j for j in json_res[0]}
for listing in extract_listings(parsed):
if (add_location(listing, search)):
listing = add_address(listing)
pprint.pprint(listing)
|
Python
| 0
|
@@ -1980,36 +1980,16 @@
th open(
-os.getcwd() + '/' +
results,
@@ -2166,28 +2166,8 @@
pen(
-os.getcwd() + '/' +
resu
|
2e5f5fc689ee55f32556be69dcbf0672ea7fdbed
|
change deprecation warning
|
district42/json_schema/schema.py
|
district42/json_schema/schema.py
|
import warnings
from copy import deepcopy
from ..errors import DeclarationError
from .types import (Any, AnyOf, Array, ArrayOf, Boolean, Enum, Null, Number,
Object, OneOf, SchemaType, String, Timestamp, Undefined)
class Schema:
def ref(self, schema):
return deepcopy(schema)
def from_native(self, value):
if value is None:
return self.null
datatype = type(value)
if datatype is bool:
return self.boolean(value)
elif datatype is int:
return self.integer(value)
elif datatype is float:
return self.float(value)
elif datatype is str:
return self.string(value)
elif datatype is list:
return self.array([self.from_native(elem) for elem in value])
elif datatype is dict:
return self.object({k: self.from_native(v) for k, v in value.items()})
elif datatype is tuple:
return self.enum(*value)
raise DeclarationError('Unknown type "{}"'.format(datatype))
@property
def null(self):
return Null()
@property
def boolean(self):
return Boolean()
@property
def number(self):
return Number()
@property
def integer(self):
return Number().integer
@property
def float(self):
return Number().float
@property
def string(self):
return String()
@property
def timestamp(self):
return Timestamp()
@property
def array(self):
return Array()
@property
def array_of(self):
warnings.warn('deprecated', DeprecationWarning, stacklevel=2)
return ArrayOf()
@property
def object(self):
return Object()
@property
def any(self):
return Any()
@property
def any_of(self):
return AnyOf()
@property
def one_of(self):
return OneOf()
@property
def enum(self):
return Enum()
@property
def undefined(self):
return Undefined()
|
Python
| 0.000001
|
@@ -1613,34 +1613,108 @@
-warnings.warn('deprecated'
+message = 'schema.array_of is deprecated, use schema.array.of instead'%0A warnings.warn(message
, De
|
51b67ad1df17df8a137f32f261a84c94b1b27189
|
Replace hardcoded 'edx' with platform name. (#29657)
|
openedx/core/djangoapps/user_api/accounts/__init__.py
|
openedx/core/djangoapps/user_api/accounts/__init__.py
|
"""
Account constants
"""
from django.conf import settings
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
# The maximum length for the bio ("about me") account field
BIO_MAX_LENGTH = 300
# The minimum and maximum length for the name ("full name") account field
NAME_MIN_LENGTH = 1
NAME_MAX_LENGTH = 255
# The minimum and maximum length for the username account field
USERNAME_MIN_LENGTH = 2
# Note: 30 chars is the default for historical reasons. Django uses 150 as the username length since 1.10
USERNAME_MAX_LENGTH = getattr(settings, 'USERNAME_MAX_LENGTH', 30)
# The minimum and maximum length for the email account field
EMAIL_MIN_LENGTH = 3
EMAIL_MAX_LENGTH = 254 # Limit per RFCs is 254
ACCOUNT_VISIBILITY_PREF_KEY = 'account_privacy'
# Indicates the user's preference that all users can view the shareable fields in their account information.
ALL_USERS_VISIBILITY = 'all_users'
# Indicates the user's preference that all their account information be private.
PRIVATE_VISIBILITY = 'private'
# Indicates that the user has custom preferences for the visibility of their account information.
CUSTOM_VISIBILITY = 'custom'
# Prefix prepended to user preferences related to custom account visibility preferences.
VISIBILITY_PREFIX = 'visibility.'
# Translators: This message is shown when the Unicode usernames are NOT allowed.
# It is shown to users who attempt to create a new account using invalid characters
# in the username.
USERNAME_INVALID_CHARS_ASCII = _(
"Usernames can only contain letters (A-Z, a-z), numerals (0-9), underscores (_), and hyphens (-)."
)
# Translators: This message is shown only when the Unicode usernames are allowed.
# It is shown to users who attempt to create a new account using invalid characters
# in the username.
USERNAME_INVALID_CHARS_UNICODE = _(
"Usernames can only contain letters, numerals, and @/./+/-/_ characters."
)
# Translators: This message is shown to users who attempt to create a new account using
# an invalid email format.
EMAIL_INVALID_MSG = _('"{email}" is not a valid email address.')
AUTHN_EMAIL_INVALID_MSG = _('Enter a valid email address')
# Translators: This message is shown to users who attempt to create a new
# account using an username/email associated with an existing account.
EMAIL_CONFLICT_MSG = _(
"It looks like {email_address} belongs to an existing account. "
"Try again with a different email address."
)
AUTHN_EMAIL_CONFLICT_MSG = _("This email is already associated with an existing or previous edX account")
AUTHN_PASSWORD_COMPROMISED_MSG = _(
"The password you entered is on a list of known compromised passwords. Please choose a different one."
)
USERNAME_CONFLICT_MSG = _(
"It looks like {username} belongs to an existing account. "
"Try again with a different username."
)
AUTHN_USERNAME_CONFLICT_MSG = _("It looks like this username is already taken")
# Translators: This message is shown to users who enter a username/email/password
# with an inappropriate length (too short or too long).
USERNAME_BAD_LENGTH_MSG = format_lazy(
_("Username must be between {min} and {max} characters long."),
min=USERNAME_MIN_LENGTH,
max=USERNAME_MAX_LENGTH,
)
EMAIL_BAD_LENGTH_MSG = format_lazy(
_("Enter a valid email address that contains at least {min} characters."),
min=EMAIL_MIN_LENGTH,
)
# These strings are normally not user-facing.
USERNAME_BAD_TYPE_MSG = "Username must be a string."
EMAIL_BAD_TYPE_MSG = "Email must be a string."
PASSWORD_BAD_TYPE_MSG = "Password must be a string."
# Translators: These messages are shown to users who do not enter information
# into the required field or enter it incorrectly.
REQUIRED_FIELD_NAME_MSG = _("Enter your full name.")
REQUIRED_FIELD_CONFIRM_EMAIL_MSG = _("The email addresses do not match.")
REQUIRED_FIELD_COUNTRY_MSG = _("Select your country or region of residence.")
REQUIRED_FIELD_PROFESSION_SELECT_MSG = _("Select your profession.")
REQUIRED_FIELD_SPECIALTY_SELECT_MSG = _("Select your specialty.")
REQUIRED_FIELD_PROFESSION_TEXT_MSG = _("Enter your profession.")
REQUIRED_FIELD_SPECIALTY_TEXT_MSG = _("Enter your specialty.")
REQUIRED_FIELD_CITY_MSG = _("Enter your city.")
REQUIRED_FIELD_GOALS_MSG = _("Tell us your goals.")
REQUIRED_FIELD_LEVEL_OF_EDUCATION_MSG = _("Select the highest level of education you have completed.")
REQUIRED_FIELD_MAILING_ADDRESS_MSG = _("Enter your mailing address.")
|
Python
| 0.000011
|
@@ -2485,17 +2485,68 @@
MSG = _(
-%22
+ # pylint: disable=translation-of-non-string%0A f'
This ema
@@ -2603,19 +2603,40 @@
ous
-edX
+%7Bsettings.PLATFORM_NAME%7D
account
%22)%0AA
@@ -2631,17 +2631,17 @@
account
-%22
+'
)%0AAUTHN_
|
b759455bec4b9e231808b704f289f37eb198a895
|
Add guest user account with admin privs
|
ditto/scripts/setup_test_data.py
|
ditto/scripts/setup_test_data.py
|
"""Script to set up test data for a Ditto instance.
As before we tried to do this with migrations but ran into problems
early on with custom permissions not being created.
In any case, it's probably easier/better to have a single bootstrap
script instead of a bunch of data migrations.
"""
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
import configuration.models
import core
import dittoforms.models
import multitenancy.models
import multitenancy.tenant
from users.models import User
INTERACTIONS = ["Messaging"]
REG_FORM_SPEC = '[{"name":"Name","on":true,"fields":[{"name":"First name"},{"name":"Last name"}]},{"name":"Gender","on":true,"options":["Male","Female","Other"]},{"name":"Ethnicity","on":true,"options":["White British","Other"]},{"name":"How did you hear about us?","on":true,"multiple":true,"options":["Internet search","Magazine","Other"]}]'
def run():
setup_site()
setup_features()
setup_default_roles()
setup_admin_permission()
setup_interactions()
setup_admin_user()
setup_members()
setup_tenants()
setup_reg_form()
def setup_site(name='DITTO.TECHNOLOGY', subdomain=None):
site = Site.objects.get_current()
site.name = name
domain = 'localhost' if settings.DEBUG else site.name.lower()
if subdomain:
domain = '%s.%s' % (subdomain, domain)
site.domain = domain
site.save()
def setup_features():
for slug, name, perms in (
('chatroom', 'Chatroom', [('can_chat', 'Can chat')]),
('news', 'News', [('can_news', 'Can manage news')]),
('blog', 'Blog', [
('can_blog', 'Can Blog'),
('can_comment', 'Can comment'),
]),
):
feature, _ = configuration.models.Feature.objects.get_or_create(
slug=slug, name=name)
content_type = ContentType.objects.get_for_model(configuration.models.Feature)
for codename, name in perms:
perm, _ = Permission.objects.get_or_create(
codename=codename,
content_type=content_type)
perm.name = name
perm.save()
feature.permissions.add(perm)
def setup_default_roles():
for group in core.DEFAULT_ROLES:
group, _ = Group.objects.get_or_create(name=group)
def setup_admin_permission():
content_type = ContentType.objects.get_for_model(User)
perm, _ = Permission.objects.get_or_create(
codename='can_admin',
content_type=content_type)
perm.name = 'Can administer'
perm.save()
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
def setup_interactions():
for interaction in INTERACTIONS:
configuration.models.Interaction.objects.get_or_create(name=interaction)
def setup_admin_user():
_create_user('admin', core.ADMIN_ROLE)
def setup_members():
for name in ['mark', 'sarah', 'ross', 'emma']:
_create_user(name, core.MEMBER_ROLE)
# 'visitor' is someone who's come to the site to create their own
# network, hence we give them the admin role so they can do all
# the configuration necessary for a new network.
_create_user('visitor', core.ADMIN_ROLE)
def _create_user(username, group_name):
user, created = User.objects.get_or_create(username=username)
user.emailaddress_set.get_or_create(
verified=1,
defaults={'email': '%s@example.com' % username})
if created:
user.set_password("let me in")
user.save()
user.groups.add(Group.objects.get(name=group_name))
def setup_tenants():
user = User.objects.get(username='mark')
multitenancy.models.Tenant.objects.create(
user=user,
network_name='Digital Impacts',
slug='di',
is_configured=True,
)
if not multitenancy.tenant.is_main():
setup_site(name='Digital Impacts', subdomain='di')
def setup_reg_form():
for role in Group.objects.all():
form = dittoforms.models.FormSpec.objects.create(
slug='reg',
spec=REG_FORM_SPEC
)
configuration.models.RegForm.objects.create(
role=role,
form=form
)
|
Python
| 0
|
@@ -1158,16 +1158,17 @@
min_user
+s
()%0A s
@@ -2931,16 +2931,17 @@
min_user
+s
():%0A
@@ -2979,16 +2979,59 @@
N_ROLE)%0A
+ _create_user('guest', core.ADMIN_ROLE)%0A
%0A%0Adef se
|
732ac42475b30f51ff672ab43ae0c4789f57ef9c
|
use printinfo
|
scripts/lpod-style.py
|
scripts/lpod-style.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009-2010 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Hervé Cauwelier <herve@itaapy.com>
# Romain Gauthier <romain@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the standard library
from optparse import OptionParser
from sys import exit, stdout, stderr
# Import from lpod
from lpod import __version__
from lpod.const import ODF_CLASSES
from lpod.document import odf_get_document
from lpod.scriptutils import add_option_output, StdoutWriter, printinfo
from lpod.scriptutils import check_target_file, printerr
def show_styles(document, target, automatic=True, common=True,
properties=False):
"""Show the different styles of a document and their properties.
"""
output = document.show_styles(automatic=automatic, common=common,
properties=properties)
# Print the output
if target is None:
target = stdout
encoding = target.encoding if target.encoding is not None else 'utf-8'
target.write(output.encode(encoding))
target.flush()
def delete_styles(document, target, pretty=True):
n = document.delete_styles()
document.save(target=target, pretty=pretty)
printinfo(n, "styles removed (0 error, 0 warning).")
def find_presentation_list_style(body):
for frame in body.get_frames(presentation_class='outline'):
first_list = frame.get_list()
if first_list is not None:
return first_list.get_style()
return None
def merge_presentation_styles(document, source):
# Apply master page found
source_body = source.get_body()
first_page = source_body.get_draw_page()
master_page_name = first_page.get_master_page()
first_master_page = document.get_style('master-page',
master_page_name)
print >> stderr, "master page used:", first_master_page.get_display_name()
body = document.get_body()
for page in body.get_draw_pages():
page.set_style(first_page.get_style())
page.set_master_page(first_page.get_master_page())
page.set_presentation_page_layout(
first_page.get_presentation_page_layout())
# Adjust layout -- will obviously work only if content is separated from
# style: use of master pages, layout, etc.
for presentation_class in ODF_CLASSES:
first_frame = source_body.get_frame(
presentation_class=presentation_class)
if first_frame is None:
continue
# Mimic frame style
style = first_frame.get_style()
presentation_style = first_frame.get_presentation_style()
for page in body.get_draw_pages():
for frame in page.get_frames(
presentation_class=presentation_class):
frame.set_style(style)
frame.set_presentation_style(presentation_style)
# Mimic list style (XXX only first level)
if presentation_class == 'outline':
list_style = find_presentation_list_style(source_body)
for page in body.get_draw_pages():
for frame in page.get_frames(
presentation_class='outline'):
for list in frame.get_lists():
list.set_style(list_style)
def merge_styles(document, from_file, target=None, pretty=True):
source = odf_get_document(from_file)
document.delete_styles()
document.merge_styles_from(source)
type = document.get_type()
# Enhance Presentation merge
if type in ('presentation', 'presentation-template'):
print >> stderr, "merging presentation styles..."
merge_presentation_styles(document, source)
document.save(target=target, pretty=pretty)
printinfo("Done (0 error, 0 warning).")
if __name__ == '__main__':
# Options initialisation
usage = '%prog [options] <file>'
description = ("A command line interface to manipulate styles of "
"OpenDocument files. By default prints all the styles to the "
"standard output.")
parser = OptionParser(usage, version=__version__,
description=description)
# --automatic
parser.add_option('-a', '--automatic', action='store_true', default=False,
help="show automatic styles only")
# --common
parser.add_option('-c', '--common', action='store_true', default=False,
help="show common styles only")
# --properties
parser.add_option('-p', '--properties', action='store_true',
help="show properties of styles")
# --delete
help = ("return a copy with all styles (except default) deleted from "
"<file>")
parser.add_option('-d', '--delete', action='store_true', help=help)
# --merge
help = ('copy styles from FILE to <file>. Any style with the same name '
'will be replaced.')
parser.add_option('-m', '--merge-styles-from', dest='merge',
metavar='FILE', help=help)
# --output
add_option_output(parser)
# Parse options
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(1)
document = odf_get_document(args[0])
if options.delete:
target = options.output
if target is None:
printerr("Will not delete in-place: ",
'output file needed or "-" for stdout')
exit(1)
elif target == "-":
target = StdoutWriter()
else:
check_target_file(target)
delete_styles(document, target)
elif options.merge:
merge_styles(document, options.merge, target=options.output)
else:
automatic = options.automatic
common = options.common
if not automatic ^ common:
automatic, common = True, True
target = options.output
if target is not None:
target = open(target, 'wb')
show_styles(document, target, automatic=automatic,
common=common, properties=options.properties)
|
Python
| 0.000001
|
@@ -1229,16 +1229,8 @@
dout
-, stderr
%0A%0A#
@@ -2703,28 +2703,21 @@
print
- %3E%3E stderr,
+info(
%22master
@@ -2765,16 +2765,17 @@
y_name()
+)
%0A bod
@@ -4475,20 +4475,13 @@
rint
- %3E%3E stderr,
+info(
%22mer
@@ -4508,16 +4508,17 @@
yles...%22
+)
%0A
|
a0e1183d9da98dd9f79c496b055cab0bb2638532
|
Update h_RNN
|
h_RNN/Mnist.py
|
h_RNN/Mnist.py
|
import time
import tflearn
import numpy as np
import tensorflow as tf
from h_RNN.RNN import RNNWrapper, Generator
from h_RNN.SpRNN import SparseRNN
from Util.Util import DataUtil
class MnistGenerator(Generator):
def __init__(self, im=None, om=None, one_hot=True):
super(MnistGenerator, self).__init__(im, om)
self._x, self._y = DataUtil.get_dataset("mnist", "../_Data/mnist.txt", quantized=True, one_hot=one_hot)
self._x = self._x.reshape(-1, 28, 28)
self._x_train, self._x_test = self._x[:1800], self._x[1800:]
self._y_train, self._y_test = self._y[:1800], self._y[1800:]
def gen(self, batch, test=False, **kwargs):
if batch == 0:
if test:
return self._x_test, self._y_test
return self._x_train, self._y_train
batch = np.random.choice(len(self._x_train), batch)
return self._x_train[batch], self._y_train[batch]
if __name__ == '__main__':
n_history = 3
print("=" * 60, "\n" + "Normal LSTM", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10, squeeze=True)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Sparse LSTM" + "\n" + "-" * 60)
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = SparseRNN()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Tflearn", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, 28, 28])
net = tf.concat(tflearn.lstm(net, 128, return_seq=True)[-n_history:], axis=1)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='adam', batch_size=64,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)
print("Time Cost: {}".format(time.time() - t))
|
Python
| 0.000001
|
@@ -1,19 +1,17 @@
import
-time
+os
%0D%0Aimport
@@ -11,23 +11,133 @@
%0Aimport
-tflearn
+sys%0D%0Aroot_path = os.path.abspath(%22../%22)%0D%0Aif root_path not in sys.path:%0D%0A sys.path.append(root_path)%0D%0A%0D%0Aimport time
%0D%0Aimport
@@ -1755,687 +1755,4 @@
()%0D%0A
-%0D%0A print(%22=%22 * 60, %22%5Cn%22 + %22Tflearn%22, %22%5Cn%22 + %22-%22 * 60)%0D%0A generator = MnistGenerator()%0D%0A t = time.time()%0D%0A tf.reset_default_graph()%0D%0A net = tflearn.input_data(shape=%5BNone, 28, 28%5D)%0D%0A net = tf.concat(tflearn.lstm(net, 128, return_seq=True)%5B-n_history:%5D, axis=1)%0D%0A net = tflearn.fully_connected(net, 10, activation='softmax')%0D%0A net = tflearn.regression(net, optimizer='adam', batch_size=64,%0D%0A loss='categorical_crossentropy')%0D%0A model = tflearn.DNN(net, tensorboard_verbose=0)%0D%0A model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)%0D%0A print(%22Time Cost: %7B%7D%22.format(time.time() - t))%0D%0A
|
b9cc76d410ca034918c615402e3fbe82b226859e
|
Add public address validation test.
|
path_and_address/tests/test_validation.py
|
path_and_address/tests/test_validation.py
|
from itertools import product
from ..validation import valid_address, valid_hostname, valid_port
def _join(host_and_port):
return '%s:%s' % host_and_port
def _join_all(hostnames, ports):
return map(_join, product(hostnames, ports))
hostnames = [
'127.0.0.1',
'localhost',
'example.com',
'example.org',
]
invalid_hostnames = [
'http://example.com',
'http://example.com:8080',
'example.com/',
'example.com:8080/',
'example.com:0',
'localhost:0',
'127.0.0.1:0',
]
ports = [1, 80, 5000, 8080, 65535]
invalid_ports = [None, -80, -1, 0, 65536, 75000,
float('nan'), '', 'nan', 'hello', 'a string']
addresses = hostnames + ports + _join_all(hostnames, ports)
invalid_addresses = invalid_hostnames \
+ _join_all(hostnames, invalid_ports) \
+ _join_all(invalid_hostnames, ports) \
+ _join_all(invalid_hostnames, invalid_ports)
def test_valid_address():
for address in addresses:
assert valid_address(address), 'Invalid address, expected to be valid: ' + repr(address)
for address in invalid_addresses:
assert not valid_address(address), 'Valid address, expected to be invalid: ' + repr(address)
def test_valid_hostname():
for hostname in hostnames:
assert valid_hostname(hostname), 'Invalid hostname, expected to be valid: ' + repr(hostname)
for hostname in invalid_hostnames:
assert not valid_hostname(hostname), 'Valid hostname, expected to be invalid: ' + repr(hostname)
def test_valid_port():
for port in ports:
assert valid_port(port), 'Invalid port, expected to be valid: ' + repr(port)
for port in invalid_ports:
assert not valid_port(port), 'Valid port, expected to be invalid: ' + repr(port)
|
Python
| 0
|
@@ -249,24 +249,39 @@
stnames = %5B%0A
+ '0.0.0.0',%0A
'127.0.0
@@ -496,36 +496,15 @@
'
-localhost:0',%0A '127
+0
.0.0.
-1
+0
:0',
|
fd7454610f4cffcfc8c289539b3824f023fe973f
|
change cruise input dim
|
modules/tools/prediction/mlp_train/common/configure.py
|
modules/tools/prediction/mlp_train/common/configure.py
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
parameters = {
'mlp': {
'train_data_rate': 0.8,
'size_obstacle_feature': 22,
'size_lane_sequence_feature': 40,
'dim_input': 22 + 40,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 1
},
'cruise_mlp': {
'dim_input': 23 + 180,
'dim_hidden_1': 50,
'dim_hidden_2': 18,
'dim_output': 2
},
'junction_mlp': {
'dim_input': 3 + 60,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 12
},
'feature': {
'threshold_label_time_delta': 1.0,
'prediction_label_timeframe': 3.0,
'maximum_maneuver_finish_time': 6.0,
# Lane change is defined to be finished if the ratio of deviation
# from center-line to the lane width is within this: (must be < 0.5)
'lane_change_finish_condition': 0.1
}
}
labels = {'go_false': 0, 'go_true': 1, 'cutin_false': -1, 'cutin_true': 2}
|
Python
| 0.99845
|
@@ -1078,16 +1078,20 @@
t': 23 +
+ 8 +
180,%0A
|
f5aa5f3dae5f10288d67b7e4b51d04bcfb495fc3
|
Use .run for standalone packages.
|
scripts/packageIfw.py
|
scripts/packageIfw.py
|
#!/usr/bin/env python
################################################################################
# Copyright (C) 2013 Digia Plc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Digia Plc, nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import os
import sys
import datetime
import getopt
import subprocess
import fnmatch
import tempfile
import shutil
import inspect
def usage():
print 'Usage: %s [-v|--version-string=versionstring] [-i|--installer-path=/path/to/installerfw] [-a|--archive=archive.7z] <outputname>' % os.path.basename(sys.argv[0])
def substitute_file(infile, outfile, substitutions):
with open(infile, 'r') as f:
template = f.read()
with open(outfile, 'w') as f:
f.write(template.format(**substitutions))
def ifw_template_dir():
script_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
source_dir = os.path.normpath(os.path.join(script_dir, '..'));
return os.path.normpath(os.path.join(source_dir, 'dist', 'installer', 'ifw'))
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hv:i:a:', ['help', 'version-string=', 'installer-path=', 'archive'])
except:
usage()
sys.exit(2)
if len(args) < 1:
usage()
sys.exit(2)
version = ''
ifw_location = ''
archive = ''
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-v', '--version-string'):
version = a
if o in ('-i', '--installer-path'):
ifw_location = a
if o in ('-a', '--archive'):
archive = a
if (version == ''):
raise Exception('Version not specified (--version-string)!')
if (ifw_location == ''):
raise Exception('Installer framework location not specified (--installer-path)!')
if (archive == ''):
raise Exception('Archive not specified (--archive)!')
installer_name = args[0]
config_postfix = ''
if sys.platform == 'darwin':
installer_name = installer_name + '.dmg'
if sys.platform.startswith('win'):
config_postfix = '-windows'
if sys.platform.startswith('linux'):
config_postfix = '-linux'
installer_name = installer_name + '.bin'
config_name = 'config' + config_postfix + '.xml'
try:
temp_dir = tempfile.mkdtemp()
except:
raise Exception('Failed to create a temporary directory!')
try:
substs = {}
substs['version'] = version
substs['date'] = datetime.date.today().isoformat()
template_dir = ifw_template_dir()
out_config_dir = os.path.join(temp_dir,'config')
out_packages_dir = os.path.join(temp_dir, 'packages')
shutil.copytree(os.path.join(template_dir, 'packages'), os.path.join(temp_dir, 'packages'))
shutil.copytree(os.path.join(template_dir, 'config'), os.path.join(temp_dir, 'config'))
for root, dirnames, filenames in os.walk(out_packages_dir):
for template in fnmatch.filter(filenames, '*.in'):
substitute_file(os.path.join(root, template), os.path.join(root, template[:-3]), substs)
os.remove(os.path.join(root, template))
for root, dirnames, filenames in os.walk(out_config_dir):
for template in fnmatch.filter(filenames, '*.in'):
substitute_file(os.path.join(root, template), os.path.join(root, template[:-3]), substs)
os.remove(os.path.join(root, template))
data_path = os.path.join(out_packages_dir, 'org.qtproject.qtcreator.application', 'data')
if not os.path.exists(data_path):
os.makedirs(data_path)
shutil.copy(archive, data_path)
ifw_call = [os.path.join(ifw_location, 'bin', 'binarycreator'), '-c', os.path.join(out_config_dir, config_name), '-p', out_packages_dir, installer_name, '--offline-only' ]
subprocess.check_call(ifw_call, stderr=subprocess.STDOUT)
finally:
print 'Cleaning up...'
shutil.rmtree(temp_dir)
print 'Done.'
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -3709,18 +3709,18 @@
ame + '.
-bi
+ru
n'%0A%0A
|
8092efdd0bf5f5ca8d5498cf679b019920c00bfd
|
format with black
|
plugins/feeds/public/virustotal_apiv3.py
|
plugins/feeds/public/virustotal_apiv3.py
|
import logging
import re
import json
from datetime import timedelta, datetime
from core import Feed
from core.config.config import yeti_config
from core.observables import Hash, File
# Variable
VTAPI = yeti_config.get('vt', 'key')
headers = {"x-apikey": VTAPI}
limit = 10
params = {'limit': limit}
regex = "[A-Fa-f0-9]{64}" # Find SHA256
class VirusTotalPriv(Feed):
default_values = {
"frequency": timedelta(minutes=5),
"name": "VirusTotalHuntingV3",
"source": "https://www.virustotal.com/api/v3/intelligence/hunting_notifications",
"description": "Feed of hunting for VirusTotal API v3",
}
settings = {
'vt_url_hunting_v3': {
'name': 'VT Url Hunting v3',
'description': 'Hunting feed for VT API v3'
}
}
def update(self):
if VTAPI:
self.source = "https://www.virustotal.com/api/v3/intelligence/hunting_notifications"
for index, item in self.update_json(params=params, headers=headers, key="data"):
self.analyze(item)
else:
logging.error("Your VT API key is not set in the config file!")
def analyze(self, item):
tags = []
context = {'source': self.name}
# Parse value of interest
subject = item["attributes"]["rule_name"]
date = item["attributes"]["date"]
tags2 = item["attributes"]["tags"]
sha2 = re.search(regex, str(tags2)).group()
date_string = datetime.utcfromtimestamp(date).strftime('%d/%m/%Y %H:%M:%S')
tags2.remove(sha2)
# Update to Yeti DB
f_vt3 = File.get_or_create(value='FILE:{}'.format(sha2))
sha256 = Hash.get_or_create(value=sha2)
f_vt3.active_link_to(sha256, 'sha256', self.name)
tags.append(tags2)
context['date_added'] = date_string
context['snippet'] = item["attributes"]['snippet']
# context['source_country'] = item["attributes"]['source_country']
context['raw'] = item
f_vt3.tag(str(tags))
f_vt3.add_context(context)
|
Python
| 0.000001
|
@@ -217,19 +217,19 @@
get(
-'vt', 'key'
+%22vt%22, %22key%22
)%0Ahe
@@ -281,15 +281,15 @@
= %7B
-'
+%22
limit
-'
+%22
: li
@@ -655,21 +655,17 @@
- '
+%22
vt_url_h
@@ -673,17 +673,17 @@
nting_v3
-'
+%22
: %7B%0A
@@ -694,17 +694,17 @@
-'
+%22
name
-': '
+%22: %22
VT U
@@ -716,17 +716,17 @@
nting v3
-'
+%22
,%0A
@@ -731,17 +731,17 @@
-'
+%22
descript
@@ -747,12 +747,12 @@
tion
-': '
+%22: %22
Hunt
@@ -773,17 +773,18 @@
T API v3
-'
+%22,
%0A
@@ -858,16 +858,34 @@
source =
+ (%0A
%22https:
@@ -948,16 +948,30 @@
ations%22%0A
+ )%0A
@@ -1010,16 +1010,33 @@
te_json(
+%0A
params=p
@@ -1069,16 +1069,29 @@
y=%22data%22
+%0A
):%0A
@@ -1282,16 +1282,16 @@
= %7B
-'
+%22
source
-'
+%22
: se
@@ -1584,17 +1584,17 @@
trftime(
-'
+%22
%25d/%25m/%25Y
@@ -1602,17 +1602,17 @@
%25H:%25M:%25S
-'
+%22
)%0A
@@ -1706,17 +1706,17 @@
lue=
-'
+%22
FILE:%7B%7D
-'
+%22
.for
@@ -1815,16 +1815,16 @@
56,
-'
+%22
sha256
-'
+%22
, se
@@ -1876,17 +1876,17 @@
context%5B
-'
+%22
date_add
@@ -1887,17 +1887,17 @@
te_added
-'
+%22
%5D = date
@@ -1920,25 +1920,25 @@
context%5B
-'
+%22
snippet
-'
+%22
%5D = item
@@ -1956,17 +1956,17 @@
s%22%5D%5B
-'
+%22
snippet
-'
+%22
%5D%0A
@@ -2059,13 +2059,13 @@
ext%5B
-'raw'
+%22raw%22
%5D =
|
ddab6ff5760680cffd388fba96a8562baa2f28c8
|
support -l and -s arguments
|
scripts/pub_script.py
|
scripts/pub_script.py
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
"""
hbmqtt_pub - MQTT 3.1.1 publisher
Usage:
hbmqtt_pub --version
hbmqtt_pub (-h | --help)
hbmqtt_pub --url BROKER_URL -t TOPIC (-f FILE | -l | -m MESSAGE | -n | -s) [-c CONFIG_FILE] [-i CLIENT_ID] [-q | --qos QOS] [-d] [-k KEEP_ALIVE] [--clean-session] [--ca-file CAFILE] [--ca-path CAPATH] [--ca-data CADATA] [ --will-topic WILL_TOPIC [--will-message WILL_MESSAGE] [--will-qos WILL_QOS] [--will-retain] ]
Options:
-h --help Show this screen.
--version Show version.
--url BROKER_URL Broker connection URL (musr conform to MQTT URI scheme (see https://github.com/mqtt/mqtt.github.io/wiki/URI-Scheme>)
-c CONFIG_FILE Broker configuration file (YAML format)
-i CLIENT_ID Id to use as client ID.
-q | --qos QOS Quality of service to use for the message, from 0, 1 and 2. Defaults to 0.
-t TOPIC Message topic
-m MESSAGE Message data to send
-f FILE Read file by line and publish message for each line
-s Read from stdin and publish message for each line
-k KEEP_ALIVE Keep alive timeout in second
--clean-session Clean session on connect (defaults to False)
--ca-file CAFILE] CA file
--ca-path CAPATH] CA Path
--ca-data CADATA CA data
--will-topic WILL_TOPIC
--will-message WILL_MESSAGE
--will-qos WILL_QOS
--will-retain
-d Enable debug messages
"""
import sys
import logging
import asyncio
import os
from hbmqtt.client import MQTTClient, ConnectException
from hbmqtt.version import get_version
from docopt import docopt
try:
from .utils import read_yaml_config
except:
from utils import read_yaml_config
logger = logging.getLogger(__name__)
def _gen_client_id():
import os
import socket
pid = os.getpid()
hostname = socket.gethostname()
return "hbmqtt_pub/%d-%s" % (pid, hostname)
def _get_qos(arguments):
try:
return int(arguments['--qos'][0])
except:
return None
def _get_message(arguments):
if arguments['-n']:
yield b''
if arguments['-m']:
yield arguments['-m'].encode(encoding='utf-8')
if arguments['-f']:
try:
with open(arguments['-f'], 'r') as f:
for line in f:
yield line.encode(encoding='utf-8')
except:
logger.error("%s Failed to read file '%s'" % (client.client_id, arguments['-f']))
if arguments['-s']:
import sys
for line in sys.stdin:
yield line.encode(encoding='utf-8')
@asyncio.coroutine
def do_pub(client, arguments):
try:
logger.info("%s Connecting to broker" % client.client_id)
yield from client.connect(uri=arguments['--url'],
cleansession=arguments['--clean-session'],
cafile=arguments['--ca-file'],
capath=arguments['--ca-path'],
cadata=arguments['--ca-data'])
qos = _get_qos(arguments)
topic = arguments['-t']
for message in _get_message(arguments):
logger.info("%s Publishing to '%s'" % (client.client_id, topic))
yield from client.publish(topic, message, qos)
yield from client.disconnect()
logger.info("%s Disconnected from broker" % client.client_id)
except KeyboardInterrupt:
yield from client.disconnect()
logger.info("%s Disconnected from broker" % client.client_id)
except ConnectException as ce:
logger.fatal("connection to '%s' failed: %r" % (arguments['--url'], ce))
def main(*args, **kwargs):
if sys.version_info[:2] < (3, 4):
logger.fatal("Error: Python 3.4+ is required")
sys.exit(-1)
arguments = docopt(__doc__, version=get_version())
#print(arguments)
formatter = "[%(asctime)s] :: %(levelname)s - %(message)s"
if arguments['-d']:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format=formatter)
config = None
if arguments['-c']:
config = read_yaml_config(arguments['-c'])
else:
config = read_yaml_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_client.yaml'))
logger.debug("Using default configuration")
loop = asyncio.get_event_loop()
client_id = arguments.get("-i", None)
if not client_id:
client_id = _gen_client_id()
if arguments['-k']:
config['keep_alive'] = int(arguments['-k'])
if arguments['--will-topic'] and arguments['--will-message'] and arguments['--will-qos']:
config['will'] = dict()
config['will']['topic'] = arguments['--will-topic']
config['will']['message'] = arguments['--will-message'].encode('utf-8')
config['will']['qos'] = int(arguments['--will-qos'])
config['will']['retain'] = arguments['--will-retain']
client = MQTTClient(client_id=client_id, config=config, loop=loop)
loop.run_until_complete(do_pub(client, arguments))
loop.close()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2577,17 +2577,17 @@
ments%5B'-
-s
+l
'%5D:%0A
@@ -2632,16 +2632,41 @@
.stdin:%0A
+ if line:%0A
@@ -2705,16 +2705,200 @@
utf-8')%0A
+ if arguments%5B'-s'%5D:%0A import sys%0A message = bytearray()%0A for line in sys.stdin:%0A message.extend(line.encode(encoding='utf-8'))%0A yield message%0A
%0A%0A%0A@asyn
|
aae5146bd672fdec9a055666c9742acbc1dddd5b
|
remove obsolete comment
|
planetstack/core/dashboard/views/shell.py
|
planetstack/core/dashboard/views/shell.py
|
# /opt/planetstack/core/dashboard/views/helloworld.py
import datetime
import os
import sys
import time
import json
from django.http import HttpResponse, HttpResponseServerError, HttpResponseForbidden
from django.views.generic import TemplateView, View
from core.models import *
from django.forms.models import model_to_dict
def ensure_serializable(d):
d2={}
for (k,v) in d.items():
# datetime is not json serializable
if isinstance(v, datetime.datetime):
d2[k] = time.mktime(v.timetuple())
elif v.__class__.__name__ == "Geoposition":
pass
else:
d2[k] = v
return d2
def sliver_to_dict(sliver):
d = model_to_dict(sliver)
d["slice_id"] = sliver.slice.id
d["node_id"] = sliver.node.id
return d
def slice_to_dict(slice):
d = model_to_dict(slice)
d["slivers"] = [sliver_to_dict(x) for x in slice.slivers]
return d
def node_to_dict(node):
d = model_to_dict(node)
d["slivers"] = []
class OpenCloudData:
def __init__(self, user):
self.loadAll()
def loadAll(self):
self.allNodes = list(Node.objects.all())
self.allSlices = list(Slice.objects.all())
self.allSlivers = list(Sliver.objects.all())
self.allSites = list(Site.objects.all())
self.site_id = {}
for site in self.allSites:
d = model_to_dict(site)
d["node_ids"] = []
d["slice_ids"] = []
self.site_id[site.id] = ensure_serializable(d)
self.node_id = {}
for node in self.allNodes:
d = model_to_dict(node)
d["sliver_ids"] = []
self.node_id[node.id] = ensure_serializable(d)
self.site_id[node.site_id]["node_ids"].append(node.id)
self.slice_id = {}
for slice in self.allSlices:
d = model_to_dict(slice)
d["sliver_ids"] = []
self.slice_id[slice.id] = ensure_serializable(d)
self.site_id[slice.site_id]["slice_ids"].append(site.id)
print self.slice_id.keys()
self.sliver_id = {}
for sliver in self.allSlivers:
self.sliver_id[sliver.id] = model_to_dict(sliver)
self.slice_id[sliver.slice_id]["sliver_ids"].append(sliver.id)
self.node_id[sliver.node_id]["sliver_ids"].append(sliver.id)
def get_opencloud_data(self):
return {"slices": self.slice_id.values(),
"slivers": self.sliver_id.values(),
"nodes": self.node_id.values(),
"sites": self.site_id.values()}
class ShellDataView(View):
url = r'^shelldata/'
def get(self, request, **kwargs):
result = OpenCloudData(request.user).get_opencloud_data()
return HttpResponse(json.dumps(result), mimetype='application/json')
|
Python
| 0
|
@@ -1,58 +1,4 @@
-# /opt/planetstack/core/dashboard/views/helloworld.py%0A
impo
|
d60b460928c55c544b18c57c0eb697ae88fde9e0
|
Make masked fill values into nan before further processing to avoid issues with precision leading to different behaviours. (#632)
|
lib/improver/ensemble_calibration/ensemble_calibration_utilities.py
|
lib/improver/ensemble_calibration/ensemble_calibration_utilities.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2018 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This module defines all the utilities used by the "plugins"
specific for ensemble calibration.
"""
import numpy as np
import iris
def convert_cube_data_to_2d(
forecast, coord="realization", transpose=True):
"""
Function to convert data from a N-dimensional cube into a 2d
numpy array. The result can be transposed, if required.
Args:
forecast (iris.cube.Cube):
N-dimensional cube to be reshaped.
coord (string):
The data will be flattened along this coordinate.
transpose (boolean):
If True, the resulting flattened data is transposed.
This will transpose a 2d array of the format [:, coord]
to [coord, :].
If False, the resulting flattened data is not transposed.
This will result in a 2d array of format [:, coord].
Returns:
forecast_data (numpy.array):
Reshaped 2d array.
"""
forecast_data = []
for coord_slice in forecast.slices_over(coord):
forecast_data.append(coord_slice.data.flatten())
if transpose:
forecast_data = np.asarray(forecast_data).T
return np.array(forecast_data)
def check_predictor_of_mean_flag(predictor_of_mean_flag):
"""
Check the predictor_of_mean_flag at the start of the
estimate_coefficients_for_ngr method, to avoid having to check
and raise an error later.
Args:
predictor_of_mean_flag (string):
String to specify the input to calculate the calibrated mean.
Currently the ensemble mean ("mean") and the ensemble realizations
("realizations") are supported as the predictors.
"""
if predictor_of_mean_flag.lower() not in ["mean", "realizations"]:
msg = ("The requested value for the predictor_of_mean_flag {}"
"is not an accepted value."
"Accepted values are 'mean' or 'realizations'").format(
predictor_of_mean_flag.lower())
raise ValueError(msg)
|
Python
| 0
|
@@ -2628,16 +2628,116 @@
ta = %5B%5D%0A
+ if np.ma.is_masked(forecast.data):%0A forecast.data = np.ma.filled(forecast.data, np.nan)%0A%0A
for
|
51a9a02ccf4a133818f14f3ff6e864c1e041ec37
|
Update event_chat.py
|
ark/events/event_chat.py
|
ark/events/event_chat.py
|
from ark.chat_commands import ChatCommands
from ark.cli import *
from ark.database import Db
from ark.rcon import Rcon
class EventChat(object):
@classmethod
def output_chat_from_server(cls,text,line):
out(line)
@classmethod
def parse_chat_command(cls,steam_name,player_name,text,line):
ChatCommands.parse(steam_name,player_name,text)
@classmethod
def update_player_name(cls,steam_name,player_name,text,line):
steam_id = Rcon.find_online_steam_id(steam_name)
if steam_id:
Db.update_player(steam_id, steam_name=steam_name, name=player_name)
@classmethod
def store_chat(cls,steam_name,player_name,text,line):
player = Db.find_player(steam_name=player_name)
player_id = player.id if player is not None else None
Db.create_chat_entry(player_id,player_name,text)
@classmethod
def output_chat(cls,steam_name,player_name,text,line):
out(line)
|
Python
| 0.000002
|
@@ -930,28 +930,714 @@
xt,line):%0A out(line)%0A
+%0A @classmethod%0A def filter_chat(cls,steam_name,player_name,text,line):%0A words=text.split()%0A res=None%0A for word in words:%0A if res is None:%0A res=Db.check_word(word)%0A if res:%0A player=Db.find_player(steam_name=steam_name)%0A steamid=player.steam_id if player is not None else None%0A if steamid is not None:%0A %22%22%22Rcon.kick_player(steamid)%22%22%22%0A %22%22%22msg=Lang.get('chat_filter_player_kicked').format(player_name,res)%22%22%22%0A msg=Lang.get('chat_filter_forbidden_word').format(player_name,res)%0A Rcon.broadcast(msg, rcon.response_callback_response_only)%0A
|
e236b7d34cdf156cc16ba8c95b0526785e717898
|
update scenario
|
enquiry/tests/scenario.py
|
enquiry/tests/scenario.py
|
from enquiry.tests.model_maker import make_enquiry
def default_scenario_enquiry():
make_enquiry(
'Rick',
'Can I buy some hay?',
'',
'07840 538 357',
)
make_enquiry(
'Ryan',
(
'Can I see some of the fencing you have done?\n'
"I would like to see some of your standard agricultural "
"fencing on a local dairy farm. "
"I like this fencing: http://en.wikipedia.org/wiki/Fencing"
),
'test@pkimber.net',
'01234 567 890',
)
|
Python
| 0.000001
|
@@ -1,8 +1,89 @@
+from datetime import datetime%0A%0Afrom dateutil.relativedelta import relativedelta%0A%0A
from enq
@@ -625,14 +625,73 @@
7 890',%0A
+ email_sent=datetime.now() + relativedelta(days=1),%0A
)%0A
|
c13df93862fa154a51659dd119ca2390db10bfef
|
remove short circuit
|
rapidsms_httprouter/management/commands/send_messages.py
|
rapidsms_httprouter/management/commands/send_messages.py
|
import traceback
import time
from django.core.management.base import BaseCommand
from rapidsms.models import Backend, Connection, Contact
from rapidsms_httprouter.models import Message, MessageBatch
from rapidsms_httprouter.router import get_router
from django.conf import settings
from django.core.mail import send_mail
from django.db import transaction, close_connection
from urllib import quote_plus
from urllib2 import urlopen
from rapidsms.log.mixin import LoggerMixin
class Command(BaseCommand, LoggerMixin):
help = """sends messages from all project DBs
"""
def fetch_url(self, url):
"""
Wrapper around url open, mostly here so we can monkey patch over it in unit tests.
"""
response = urlopen(url, timeout=15)
return response.getcode()
def build_send_url(self, router_url, backend, recipients, text, **kwargs):
"""
Constructs an appropriate send url for the given message.
"""
# first build up our list of parameters
params = {
'backend': backend,
'recipient': recipients,
'text': text,
}
# make sure our parameters are URL encoded
params.update(kwargs)
for k, v in params.items():
try:
params[k] = quote_plus(str(v))
except UnicodeEncodeError:
params[k] = quote_plus(str(v.encode('UTF-8')))
# is this actually a dict? if so, we want to look up the appropriate backend
if type(router_url) is dict:
router_dict = router_url
backend_name = backend
# is there an entry for this backend?
if backend_name in router_dict:
router_url = router_dict[backend_name]
# if not, look for a default backend
elif 'default' in router_dict:
router_url = router_dict['default']
# none? blow the hell up
else:
self.error("No router url mapping found for backend '%s', check your settings.ROUTER_URL setting" % backend_name)
raise Exception("No router url mapping found for backend '%s', check your settings.ROUTER_URL setting" % backend_name)
# return our built up url with all our variables substituted in
full_url = router_url % params
return full_url
def send_backend_chunk(self, router_url, pks, backend_name):
msgs = Message.objects.using(self.db).filter(pk__in=pks).exclude(connection__identity__iregex="[a-z]")
try:
url = self.build_send_url(router_url, backend_name, ' '.join(msgs.values_list('connection__identity', flat=True)), msgs[0].text)
status_code = self.fetch_url(url)
# kannel likes to send 202 responses, really any
# 2xx value means things went okay
if int(status_code / 100) == 2:
self.info("SMS%s SENT" % pks)
msgs.update(status='S')
else:
self.info("SMS%s Message not sent, got status: %s .. queued for later delivery." % (pks, status_code))
msgs.update(status='Q')
except Exception as e:
self.error("SMS%s Message not sent: %s .. queued for later delivery." % (pks, str(e)))
msgs.update(status='Q')
def send_all(self, router_url, to_send):
pks = []
if len(to_send):
backend_name = to_send[0].connection.backend.name
for msg in to_send:
if backend_name != msg.connection.backend.name:
# send all of the same backend
self.send_backend_chunk(router_url, pks, backend_name)
# reset the loop status variables to build the next chunk of messages with the same backend
backend_name = msg.connection.backend.name
pks = [msg.pk]
else:
pks.append(msg.pk)
self.send_backend_chunk(router_url, pks, backend_name)
def send_individual(self, router_url):
to_process = Message.objects.using(self.db).filter(direction='O',
status__in=['Q']).order_by('priority', 'status', 'connection__backend__name')
if len(to_process):
self.send_all(router_url, [to_process[0]])
def handle(self, **options):
"""
"""
DBS = settings.DATABASES.keys()
#DBS.remove('default') # skip the dummy -we now check default DB as well
CHUNK_SIZE = getattr(settings, 'MESSAGE_CHUNK_SIZE', 400)
self.info("starting up")
recipients = getattr(settings, 'ADMINS', None)
if recipients:
recipients = [email for name, email in recipients]
while (True):
self.debug("entering main loop")
for db in DBS:
try:
self.debug("servicing db '%s'" % db)
router_url = settings.DATABASES[db]['ROUTER_URL']
transaction.enter_transaction_management(using=db)
self.db = db
to_process = MessageBatch.objects.using(db).filter(status='Q')
self.debug("looking for batch messages to process")
if to_process.count():
self.info("found %d batches in %s to process" % (to_process.count(), db))
try:
batch = to_process[0]
except IndexError:
self.info("%s is returning index error"% to_process)
batch = to_process
to_process = batch.messages.using(db).filter(direction='O',
status__in=['Q']).order_by('priority', 'status', 'connection__backend__name')[:CHUNK_SIZE]
if to_process.count():
self.debug("found batch message %d with Queued messages to send" % batch.pk)
self.send_all(router_url, to_process)
elif batch.messages.using(db).filter(status__in=['S', 'C']).count() == batch.messages.using(db).count():
self.info("found batch message %d ready to be closed" % batch.pk)
batch.status = 'S'
batch.save()
else:
self.debug("reverting to individual message sending")
self.send_individual(router_url)
else:
self.debug("no batches found, reverting to individual message sending")
self.send_individual(router_url)
transaction.commit(using=db)
except Exception, exc:
transaction.rollback(using=db)
print self.critical(traceback.format_exc(exc))
if recipients:
send_mail('[Django] Error: messenger command', str(traceback.format_exc(exc)), 'root@uganda.rapidsms.org', recipients, fail_silently=True)
continue
# yield from the messages table, messenger can cause
# deadlocks if it's contanstly polling the messages table
close_connection()
time.sleep(0.5)
|
Python
| 0.999907
|
@@ -5438,215 +5438,8 @@
-try:%0A batch = to_process%5B0%5D%0A except IndexError:%0A self.info(%22%25s is returning index error%22%25 to_process)%0A
batc
@@ -5452,16 +5452,19 @@
_process
+%5B0%5D
%0A
|
ecceb10500a395ce2cb79d913ab43187921468be
|
move fn towards dictionary comprehension
|
iatidataquality/dqparsetests.py
|
iatidataquality/dqparsetests.py
|
import re
import sys
import itertools
from functools import partial
import iatidataquality.models as models
class TestSyntaxError(Exception): pass
comment = re.compile('#')
blank = re.compile('^$')
def ignore_line(line):
return bool(comment.match(line) or blank.match(line))
def test_functions():
mappings = []
def add(regex):
def append_to_mappings(fn):
mappings.append((re.compile(regex),fn))
return fn
return append_to_mappings
def add_partial(regex):
def append_to_mappings(fn):
def partial_fn(groups):
return partial(fn, groups=groups)
mappings.append((re.compile(regex), partial_fn))
return fn
return append_to_mappings
@add('(\S*) is an? (.*)\?')
def is_an(groups):
if groups[1] == 'iso date':
return None
elif groups[1] == 'integer':
def int_check(x):
try:
int(x)
return True
except ValueError:
return False
def is_an_integer(activity):
return reduce(lambda x,y: x and y,
map(lambda x: int_check(x),
activity.xpath(groups[0])),
False)
return is_an_integer
@add_partial('(\S*) has more than (\S*) characters\?')
def text_chars(activity, groups):
return bool(reduce(lambda x,y: x or y,
map(lambda x: len(x)>int(groups[1]),
activity.xpath(groups[0])),
False))
def rm_blank(alist):
return filter(lambda x: x!='', alist)
@add_partial('(\S*) sum to (\S*)\?')
def sum(activity, groups):
return (reduce(lambda x,y: float(x)+float(y),
rm_blank(activity.xpath(groups[0])),
0)
== float(groups[1]))
@add_partial('(\S*) exists (\S*) times?\?')
def exist_times(activity, groups):
return len(rm_blank(activity.xpath(groups[0]))) == int(groups[1])
@add_partial('(\S*) exists more than (\S*) times?\?')
def exist_times(activity, groups):
return len(rm_blank(activity.xpath(groups[0]))) > int(groups[1])
def exist_check(activity, xpath):
return bool(rm_blank(activity.xpath(xpath)))
@add_partial('only one of (\S*) or (\S*) exists\?')
def exist_xor(activity, groups):
return (exist_check(activity, groups[0]) !=
exist_check(activity, groups[1]))
@add_partial('(\S*) or (\S*) exists\?')
def exist_or(activity, groups):
return (exist_check(activity, groups[0]) or
exist_check(activity, groups[1]))
@add_partial('(\S*) exists\?')
def exist(activity, groups):
return exist_check(activity, groups[0])
@add('(.*)')
def fail(line):
return None
def get_active_tests():
for test in models.Test.query.filter(models.Test.active == True).all():
yield test
def get_mappings(ms, line):
for regex, lam in ms:
yield regex.match(line), lam
first_true = lambda tupl: bool(tupl.__getitem__(0))
test_functions = {}
tests = get_active_tests()
tests = itertools.ifilter(lambda test: test.test_level == 1, tests)
tests = itertools.ifilter(lambda test: not ignore_line(test.name), tests)
for test in tests:
line = test.name
test_id = test.id
match_data = get_mappings(mappings, line)
matching_mappings = itertools.ifilter(first_true, match_data)
try:
m, lam = matching_mappings.next()
except StopIteration:
raise TestSyntaxError(line)
f = lam(m.groups())
test_functions[test_id] = f
return test_functions
|
Python
| 0.000018
|
@@ -3479,25 +3479,35 @@
+def function_
for
-
+_
test
- in
+(
test
-s
+)
:%0A
@@ -3848,44 +3848,131 @@
-test_functions%5B
+return test_id, f%0A%0A for test in tests:%0A
test_id
-%5D
+, f
= f
-%0A
+unction_for_test(test)%0A test_functions%5Btest_id%5D = f%0A
%0A
|
ddcd166b72ef96296a884f63f626c3ffd236059f
|
make tests pass without LMS settings
|
common/djangoapps/status/tests.py
|
common/djangoapps/status/tests.py
|
from django.conf import settings
from django.test import TestCase
from tempfile import NamedTemporaryFile
import os
from override_settings import override_settings
from status import get_site_status_msg
import xmodule.modulestore.django
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import Location
from xmodule.modulestore.xml_importer import import_from_xml
class TestStatus(TestCase):
"""Test that the get_site_status_msg function does the right thing"""
no_file = None
invalid_json = """{
"global" : "Hello, Globe",
}"""
global_only = """{
"global" : "Hello, Globe"
}"""
toy_only = """{
"edX/toy/2012_Fall" : "A toy story"
}"""
global_and_toy = """{
"global" : "Hello, Globe",
"edX/toy/2012_Fall" : "A toy story"
}"""
# json to use, expected results for course=None (e.g. homepage),
# for toy course, for full course. Note that get_site_status_msg
# is supposed to return global message even if course=None. The
# template just happens to not display it outside the courseware
# at the moment...
checks = [
(no_file, None, None, None),
(invalid_json, None, None, None),
(global_only, "Hello, Globe", "Hello, Globe", "Hello, Globe"),
(toy_only, None, "A toy story", None),
(global_and_toy, "Hello, Globe", "Hello, Globe<br>A toy story", "Hello, Globe"),
]
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
courses = modulestore().get_courses()
def find_course(course_id):
"""Assumes the course is present"""
return [c for c in courses if c.id==course_id][0]
self.full = find_course("edX/full/6.002_Spring_2012")
self.toy = find_course("edX/toy/2012_Fall")
def create_status_file(self, contents):
"""
Write contents to settings.STATUS_MESSAGE_PATH.
"""
with open(settings.STATUS_MESSAGE_PATH, 'w') as f:
f.write(contents)
def remove_status_file(self):
"""Delete the status file if it exists"""
if os.path.exists(settings.STATUS_MESSAGE_PATH):
os.remove(settings.STATUS_MESSAGE_PATH)
def tearDown(self):
self.remove_status_file()
def test_get_site_status_msg(self):
"""run the tests"""
for (json_str, exp_none, exp_toy, exp_full) in self.checks:
self.remove_status_file()
if json_str:
self.create_status_file(json_str)
print "checking results for {0}".format(json_str)
print "course=None:"
self.assertEqual(get_site_status_msg(None), exp_none)
print "course=toy:"
self.assertEqual(get_site_status_msg(self.toy), exp_toy)
print "course=full:"
self.assertEqual(get_site_status_msg(self.full), exp_full)
|
Python
| 0
|
@@ -68,42 +68,24 @@
rom
-tempfile import NamedTemporaryFile
+mock import Mock
%0Aimp
@@ -138,16 +138,56 @@
settings
+%0Afrom tempfile import NamedTemporaryFile
%0A%0Afrom s
@@ -224,196 +224,220 @@
sg%0A%0A
-import xmodule.modulestore.django%0Afrom xmodule.modulestore.django import modulestore%0Afrom xmodule.modulestore import Location%0Afrom xmodule.modulestore.xml_importer import import_from_xml%0A%0A
+# Get a name where we can put test files%0ATMP_FILE = NamedTemporaryFile(delete=False)%0ATMP_NAME = TMP_FILE.name%0A# Close it--we just want the path.%0ATMP_FILE.close()%0A%0A%0A@override_settings(STATUS_MESSAGE_PATH=TMP_NAME)
%0Acla
@@ -1543,69 +1543,32 @@
-xmodule.modulestore.django._MODULESTORES = %7B%7D%0A
+%22%22%22%0A
-
+Mock
courses
= m
@@ -1567,281 +1567,252 @@
rses
- = modulestore().get_courses()%0A%0A def find_course(course_id):%0A %22%22%22Assumes the course is present%22%22%22%0A return %5Bc for c in courses if c.id==course_id%5D%5B0%5D%0A%0A self.full = find_course(%22edX/full/6.002_Spring_2012%22)%0A self.toy = find_course(%22
+, since we don't have to have full django%0A settings (common tests run without the lms settings imported)%0A %22%22%22%0A self.full = Mock()%0A self.full.id = 'edX/full/2012_Fall'%0A self.toy = Mock()%0A self.toy.id = '
edX/
@@ -1824,18 +1824,17 @@
012_Fall
-%22)
+'
%0A%0A de
|
c2f99fe178ff853e87b3f034394b18956d395e87
|
Change credits verbose_name to autorship.
|
ideascube/mediacenter/models.py
|
ideascube/mediacenter/models.py
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from ideascube.models import (
LanguageField, SortedTaggableManager, TimeStampedModel)
from ideascube.search.models import SearchableQuerySet, SearchMixin
class DocumentQuerySet(SearchableQuerySet, models.QuerySet):
def image(self):
return self.filter(kind=Document.IMAGE)
def video(self):
return self.filter(kind=Document.VIDEO)
def pdf(self):
return self.filter(kind=Document.PDF)
def text(self):
return self.filter(kind=Document.TEXT)
def audio(self):
return self.filter(kind=Document.AUDIO)
class Document(SearchMixin, TimeStampedModel):
IMAGE = 'image'
VIDEO = 'video'
PDF = 'pdf'
EPUB = 'epub'
TEXT = 'text'
AUDIO = 'audio'
APP = 'app'
OTHER = 'other'
KIND_CHOICES = (
(IMAGE, _('image')),
(AUDIO, _('sound')),
(VIDEO, _('video')),
(PDF, _('pdf')),
(TEXT, _('text')),
(EPUB, _('epub')),
(APP, _('app')),
(OTHER, _('other')),
)
KIND_DICT = dict(KIND_CHOICES)
title = models.CharField(verbose_name=_('title'), max_length=100)
summary = models.TextField(verbose_name=_('summary'))
lang = LanguageField(verbose_name=_('Language'), max_length=10, blank=True)
original = models.FileField(verbose_name=_('original'),
upload_to='mediacenter/document',
max_length=10240)
preview = models.ImageField(verbose_name=_('preview'),
upload_to='mediacenter/preview',
max_length=10240,
blank=True)
credits = models.CharField(verbose_name=_('credit'), max_length=300)
kind = models.CharField(verbose_name=_('type'),
max_length=5,
choices=KIND_CHOICES,
default=OTHER)
objects = DocumentQuerySet.as_manager()
tags = TaggableManager(blank=True, manager=SortedTaggableManager)
package_id = models.CharField(verbose_name=_('package'), max_length=100,
blank=True)
class Meta:
ordering = ["-modified_at", ]
def __str__(self):
return self.title
def __repr__(self):
return '<{}: {}>'.format(self.kind, str(self))
def get_absolute_url(self):
return reverse('mediacenter:document_detail', kwargs={'pk': self.pk})
@property
def index_strings(self):
return (self.title, self.summary, self.credits,
u' '.join(self.tags.names()))
@property
def index_lang(self):
return self.lang
@property
def index_kind(self):
return self.kind
@property
def index_source(self):
return self.package_id
@property
def index_tags(self):
return self.tags.slugs()
@property
def slug(self):
return self.get_kind_display()
|
Python
| 0
|
@@ -1866,22 +1866,26 @@
name=_('
-credit
+Authorship
'), max_
|
f113aaae2232d0041e01a6f12ab2ba083df65d44
|
Change submit module to use new interface.
|
autocms/submit.py
|
autocms/submit.py
|
"""Functions to submit and register new jobs."""
import os
import socket
def submit_and_stamp(counter, testname, scheduler, config):
"""Submit a job to the scheduler and produce a newstamp file.
This function should be run from within the test directory.
If the submission fails an output log will be produced with the
standard output of the submitter.
The name of the newstamp file is returned."""
result = scheduler.submit_job(counter, testname, config)
newstamp = str(result.id) + ' ' + str(timestamp) + ' ' + str(returncode)
if returncode != 0:
logfile_name = (testname + '.submission.' + str(counter) +
'.' + str(timestamp) + '.log')
newstamp += ' ' + logfile_name
log = "Job submission failed at {0}\n".format(timestamp)
log += "On node {0}\n".format(socket.gethostname())
log += "Submission command output:\n\n"
for line in output:
log += line + '\n'
with open(logfile_name, 'w') as logfile:
logfile.write(log)
newstamp += "\n"
newstamp_filename = 'newstamp.' + str(timestamp)
with open(newstamp_filename, 'w') as nsfile:
nsfile.write(newstamp)
return newstamp_filename
def get_job_counter():
"""Return an integer for the counter to pass to the next job.
This should be called from within the test directory."""
if os.path.exists('counter'):
with open('counter') as handle:
count = handle.read()
else:
count = 1
return int(count)
def set_job_counter(count):
"""Write the job counter to file.
This should be called from within the test directory."""
with open('counter', 'w') as handle:
handle.write(str(count))
|
Python
| 0
|
@@ -57,22 +57,8 @@
os%0A
-import socket%0A
%0A%0Ade
@@ -192,185 +192,19 @@
Th
-is function should be run from within the test directory.%0A If the submission fails an output log will be produced with the%0A standard output of the submitter.%0A%0A The name
+e full path
of
@@ -306,606 +306,248 @@
-new
stamp
- = str(result.id) + ' ' + str(timestamp) + ' ' + str(returncode)%0A if returncode != 0:%0A logfile_name = (testname + '.submission.' + str(counter) +%0A '.' + str(timestamp) + '.log'
+_filename = ('stamp.' +%0A str(result.submit_time) +%0A str(counter)
)%0A
-
- newstamp += ' ' + logfile_name%0A log = %22Job submission failed at %7B0%7D%5Cn%22.format(timestamp)%0A log += %22On node %7B0%7D%5Cn%22.format(socket.gethostname())%0A log += %22Submission command output:%5Cn%5Cn%22%0A for line in output:%0A log += line + '%5Cn'%0A with open(logfile_name, 'w') as logfile:%0A logfile.write(log)%0A newstamp += %22%5Cn%22%0A
+stamp_path = os.path.join(config%5B'AUTOCMS_BASEDIR'%5D,%0A testname,%0A
-new
stam
@@ -560,38 +560,8 @@
name
- = 'newstamp.' + str(timestamp
)%0A
@@ -572,33 +572,26 @@
th open(
-new
stamp_
-filename
+path
, 'w') a
@@ -592,18 +592,21 @@
'w') as
-n
s
+tamp
file:%0A
@@ -611,18 +611,21 @@
-n
s
+tamp
file.wri
@@ -627,24 +627,30 @@
e.write(
-new
+result.
stamp
+()
)%0A re
@@ -658,25 +658,18 @@
urn
-new
stamp_
-filename
+path
%0A%0A%0Ad
@@ -687,16 +687,32 @@
counter(
+testname, config
):%0A %22
@@ -775,70 +775,155 @@
job.
-%0A
+%22%22%22
%0A
-This should be called from within the test directory.%22%22%22
+counter_path = os.path.join(config%5B'AUTOCMS_BASEDIR'%5D,%0A testname,%0A 'counter')
%0A
@@ -941,25 +941,28 @@
.exists(
-'
counter
-'
+_path
):%0A
@@ -974,25 +974,28 @@
th open(
-'
counter
-'
+_path
) as han
@@ -1110,16 +1110,34 @@
er(count
+, testname, config
):%0A %22
@@ -1172,70 +1172,155 @@
ile.
-%0A
+%22%22%22
%0A
-This should be called from within the test directory.%22%22%22
+counter_path = os.path.join(config%5B'AUTOCMS_BASEDIR'%5D,%0A testname,%0A 'counter')
%0A
@@ -1330,25 +1330,28 @@
th open(
-'
counter
-'
+_path
, 'w') a
|
0973acf04fd2fd59db4880d5ba4d994f4c1733db
|
Add length detection for PNG images.
|
identifiers/image_identifier.py
|
identifiers/image_identifier.py
|
# Identifier for basic image files
from identifier import Result
JPEG_PATTERNS = [
'FF D8 FF E0',
'FF D8 FF E1',
'FF D8 FF FE',
]
GIF_PATTERNS = [
'47 49 46 38 39 61',
'47 49 46 38 37 61',
]
PNG_PATTERNS = [
'89 50 4E 47'
]
BMP_PATTERNS = [
'42 4D 62 25',
'42 4D F8 A9',
'42 4D 76 02',
]
ICO_PATTERNS = [
'00 00 01 00'
]
class PngResolver:
def identify(self, stream):
return Result('PNG', 'PNG image file')
class JpegResolver:
def identify(self, stream):
return Result('JPEG', 'JPEG image file')
class GifResolver:
def identify(self, stream):
return Result('GIF', 'GIF image file')
class BmpResolver:
def identify(self, stream):
return Result('BMP', 'BMP image file')
class IcoResolver:
def identity(self, stream):
return Result('ICO', 'Windows icon file')
def load(hound):
# Register JPEGs
hound.add_matches(JPEG_PATTERNS, JpegResolver())
# Register PNGs
hound.add_matches(PNG_PATTERNS, PngResolver())
# Register GIFs
hound.add_matches(GIF_PATTERNS, GifResolver())
# Register BMPs
hound.add_matches(BMP_PATTERNS, BmpResolver())
# Register ICOs
hound.add_matches(ICO_PATTERNS, IcoResolver())
|
Python
| 0
|
@@ -1,69 +1,248 @@
%0A
-# Identifier for basic image files%0Afrom identifier import Result
+import io%0Afrom struct import unpack%0Aimport sys%0Afrom identifier import Result%0A%0A#############%0A# Constants #%0A#############%0A%0APNG_CHUNK_IEND = b'IEND'%0APNG_CHUNK_IHDR = b'IHDR'%0A%0A#######################%0A# Identifier Patterns #%0A#######################
%0A%0AJP
@@ -402,16 +402,28 @@
50 4E 47
+ 0D 0A 1A 0A
'%0A%5D%0A%0ABMP
@@ -526,96 +526,1140 @@
%0A%5D%0A%0A
-class PngResolver:%0A%09def identify(self, stream):%0A%09%09return Result('PNG', 'PNG image file')
+def read4UB(stream):%0A%09return unpack('%3EI', stream.read(4))%5B0%5D%0A%0Aclass PngResolver:%0A%09def next_chunk(self, stream):%0A%09%09%22%22%22%0A%09%09Assumes there is a chunk at the current position in the stream.%0A%09%09Returns the name of the current chunk and its length.%0A%09%09Also advances the stream to the start of the next chunk.%0A%09%09%22%22%22%0A%09%09chunk_len = read4UB(stream)%0A%09%09chunk_name = stream.read(4)%0A%09%09stream.seek(chunk_len + 4, io.SEEK_CUR)%0A%09%09return (chunk_name, chunk_len)%0A%0A%09def identify(self, stream):%0A%09%09try:%0A%09%09%09origin = stream.tell()%0A%09%09%09# Skip to the beginning of the first PNG chunk%0A%09%09%09stream.seek(origin + 8)%0A%09%09%09# Check to make sure the first chunk is the IHDR chunk%0A%09%09%09chunk_name, chunk_len = self.next_chunk(stream)%0A%09%09%09if chunk_name != PNG_CHUNK_IHDR or chunk_len != 0x0D:%0A%09%09%09%09return%0A%0A%09%09%09# Loop through till we find the final chunk%0A%09%09%09while chunk_name != PNG_CHUNK_IEND:%0A%09%09%09%09chunk_name, chunk_len = self.next_chunk(stream)%0A%0A%09%09%09# Now calculate the actual file length%0A%09%09%09end = stream.tell()%0A%09%09%09length = end - origin%0A%09%09%09return Result('PNG', 'PNG image file', length=length)%0A%09%09except BaseException as e:%0A%09%09%09print(e, file=sys.stderr)%0A%09%09%09# Ignore all errors%0A%09%09%09pass
%0A%0Acl
|
d7c5b8784fd747355884e3371f1c85ede9a9bf6f
|
Disable some packages for now, so that packaging can finish on the buildbots as they are. This should let wrench run the Mono test suite.
|
profiles/mono-mac-release-64/packages.py
|
profiles/mono-mac-release-64/packages.py
|
import os
from bockbuild.darwinprofile import DarwinProfile
class MonoReleasePackages:
def __init__(self):
# Toolchain
#package order is very important.
#autoconf and automake don't depend on CC
#ccache uses a different CC since it's not installed yet
#every thing after ccache needs a working ccache
self.packages.extend ([
'autoconf.py',
'automake.py',
'ccache.py',
'libtool.py',
'xz.py',
'tar.py',
'gettext.py',
'pkg-config.py'
])
#needed to autogen gtk+
self.packages.extend ([
'gtk-osx-docbook.py',
'gtk-doc.py',
])
# # Base Libraries
self.packages.extend([
'libpng.py', #needed by cairo
'libjpeg.py',
'libtiff.py',
'libgif.py',
'libxml2.py',
'freetype.py',
'fontconfig.py',
'pixman.py', #needed by cairo
'cairo.py', #needed by Mono graphics functions (System.Drawing)
'libffi.py', #needed by glib
'glib.py',
'pango.py',
'atk.py',
'intltool.py',
'gdk-pixbuf.py',
'gtk+.py',
'libglade.py',
'sqlite.py',
'expat.py',
'ige-mac-integration.py'
])
# # Theme
self.packages.extend([
'libcroco.py',
'librsvg.py',
'hicolor-icon-theme.py',
'gtk-engines.py',
'murrine.py',
'xamarin-gtk-theme.py',
'gtk-quartz-engine.py'
])
# Mono
self.packages.extend([
'mono-llvm.py',
'mono-master.py',
'libgdiplus.py',
'xsp.py',
'gtk-sharp-2.12-release.py',
'boo.py',
# 'nant.py',
'ironlangs.py',
'fsharp-3.1.py',
'mono-addins.py',
'mono-basic.py',
])
self.packages = [os.path.join('..', '..', 'packages', p) for p in self.packages]
|
Python
| 0
|
@@ -1367,24 +1367,25 @@
er.py',%0A%09%09%09%09
+#
'libgdiplus.
@@ -1393,16 +1393,17 @@
y',%0A%09%09%09%09
+#
'xsp.py'
@@ -1404,24 +1404,25 @@
sp.py',%0A%09%09%09%09
+#
'gtk-sharp-2
@@ -1442,16 +1442,17 @@
y',%0A%09%09%09%09
+#
'boo.py'
@@ -1474,16 +1474,17 @@
y',%0A%09%09%09%09
+#
'ironlan
@@ -1495,16 +1495,17 @@
y',%0A%09%09%09%09
+#
'fsharp-
@@ -1513,24 +1513,25 @@
.1.py',%0A%09%09%09%09
+#
'mono-addins
@@ -1536,24 +1536,25 @@
ns.py',%0A%09%09%09%09
+#
'mono-basic.
|
fe0d872c69280b5713a4ad6f0a1cd4a5623fdd75
|
Add createnapartcommand contents
|
cadnano/part/createnapartcommand.py
|
cadnano/part/createnapartcommand.py
|
Python
| 0
|
@@ -0,0 +1,705 @@
+from ast import literal_eval%0A%0Afrom cadnano.cnproxy import UndoCommand%0Afrom cadnano.part.nucleicacidpart import NucleicAcidPart%0A%0A%0Aclass CreateNucleicAcidPartCommand(UndoCommand):%0A def __init__(self, document, grid_type, use_undostack):%0A # TODO%5BNF%5D: Docstring%0A super(CreateNucleicAcidPartCommand, self).__init__(%22Create NA Part%22)%0A self.document = document%0A self.grid_type = grid_type%0A self.use_undostack = use_undostack%0A%0A def redo(self):%0A new_part = NucleicAcidPart(document=self.document, grid_type=self.grid_type)%0A self.document._addPart(new_part, use_undostack=self.use_undostack)%0A%0A%0A def undo(self):%0A self.document.deactivateActivePart()%0A
|
|
72117d55715b80df0a01fa519be09bfeec0bc272
|
fix generate empty tag bug
|
ezblog/blog/views.py
|
ezblog/blog/views.py
|
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from .models import Post, Category, Tag
# index
def index(request):
per_page = 2
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.all(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public'), per_page)
try:
contents = pg.page(page)
except PageNotAnInteger:
contents = pg.page(1)
except EmptyPage:
contents = []
ctx = {
'posts': contents,
}
return render(request, 'list.html', ctx)
# posts
def posts(request, pk):
if request.method == 'GET':
return __get_post(request, pk)
elif request.method == 'PUT':
url = reverse('blog:index')
return redirect(url)
elif request.method == 'DELETE':
return __delete_post(request, pk)
else:
raise Http404
def __get_post(request, pk):
post = get_object_or_404(Post, pk=pk)
ctx = {
'post': post,
}
return render(request, 'detail.html', ctx)
def __delete_post(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
response = HttpResponse()
response.status_code = 200
return response
# create_post
def create_post(request):
if request.method == 'POST':
return __create_post(request)
else:
raise Http404
def __create_post(request):
title = request.POST.get('title')
content = request.POST.get('content')
category_pk = request.POST.get('category')
status = request.POST.get('status')
tags = request.POST.get('tags').split(',')
new_post = Post()
new_post.title = title
new_post.content = content
if category_pk:
new_post.category = Category.objects.get(pk=category_pk)
new_post.status = status
new_post.save()
if tags:
for name in tags:
name = name.strip()
print(name)
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
new_post.tags.add(tag)
new_post.save()
url = reverse('blog:posts', kwargs={'pk': new_post.pk})
return redirect(url)
# create_form
def create_form(request):
if request.method == 'GET':
return __create_form(request)
else:
raise Http404
def __create_form(request):
categories = Category.objects.all()
post = Post()
status_choices = post.get_status_choices()
ctx = {
'categories': categories,
'status_choices': status_choices,
}
return render(request, 'edit.html', ctx)
|
Python
| 0.000003
|
@@ -1765,16 +1765,69 @@
tags =
+ request.POST.get('tags')%0A if tags:%0A tags =
request
@@ -2157,20 +2157,21 @@
-print(name)%0A
+if name:%0A
@@ -2175,32 +2175,36 @@
try:%0A
+
@@ -2244,24 +2244,28 @@
+
+
except Tag.D
@@ -2285,32 +2285,36 @@
+
tag = Tag()%0A
@@ -2321,24 +2321,28 @@
+
+
tag.name = n
@@ -2357,24 +2357,28 @@
+
tag.save()%0A
@@ -2372,24 +2372,28 @@
tag.save()%0A
+
|
b0858f61b29d1d4357c61f90cd45fd92a4b72c86
|
Add the configs in order that user is prompted
|
fabfile/configure.py
|
fabfile/configure.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import ConfigParser
from fabric.tasks import Task
from fabric.colors import green
from fabric.api import task, env, sudo
from fabric.operations import put, prompt
# default to configuring DEV environment
os.environ.setdefault("CALACCESS_WEBSITE_ENV", "DEV")
cp_sect = os.getenv('CALACCESS_WEBSITE_ENV').upper()
#
# Tasks
#
@task
def setconfig(key, value):
"""
Add or edit a key-value pair in the .env configuration file.
"""
# Get the existing config
cp = ConfigParser.SafeConfigParser()
cp.read(env.config_file)
# if the config file section is not there, add it
if not cp.has_section(cp_sect):
cp.add_section(cp_sect)
# Set the value provided by the user
cp.set(cp_sect, key, value)
# Write to the .env file
with open(env.config_file, 'wb') as f:
cp.write(f)
@task
def createconfig():
"""
Prompt users for settings to be stored in the config_file.
"""
# Kick it off
print('')
print('Configuration')
print('=================')
print('')
# Request data from the user
config = {}
config['AWS_ACCESS_KEY_ID'] = prompt('Your AWS access key:')
config['AWS_SECRET_ACCESS_KEY'] = prompt('Your AWS secret key:')
config['AWS_REGION_NAME'] = prompt(
'Your AWS region name:',
default='us-west-2',
)
config['KEY_NAME'] = prompt('Your AWS key name:', default='my-key-pair')
config['DB_NAME'] = prompt('Database name:', default='calaccess_website')
config['DB_USER'] = prompt('Database user:', default=env.app_user)
config['DB_PASSWORD'] = prompt('Database user password:')
config['S3_ARCHIVED_DATA_BUCKET'] = prompt(
'Name of the S3 bucket for archived data:',
default='django-calaccess-dev-data-archive',
)
config['S3_BAKED_CONTENT_BUCKET'] = prompt(
'Name of the S3 bucket for baked content:',
default='django-calaccess-dev-baked-content',
)
config['EMAIL_USER'] = prompt('E-mail user:')
config['EMAIL_PASSWORD'] = prompt('E-mail password:')
config['RDS_HOST'] = prompt('RDS host:')
config['EC2_HOST'] = prompt('EC2 host:')
# Save it to the configuration file
[setconfig(k, v) for k, v in config.items()]
# Tell the user they are done
print('')
print(green('That\'s it. All set up!'))
print('Configuration saved in {0}'.format(env.config_file))
print('')
@task
def printconfig():
"""
Print out the configuration settings for the local environment.
"""
# Loop through the current configuration
for k, v in getconfig().items():
# Print out each setting
print("{}:{}".format(k, v))
@task
def printenv():
"""
Print out the Fabric env settings.
"""
# Load settings from the config file
loadconfig()
# Loop through the Fabric env
for k, v in sorted(env.items()):
# Print out each setting
print("{}:{}".format(k, v))
@task
def copyconfig():
"""
Copy configurations in local dev environment to current ec2 instance.
"""
# Load settings from the config file
loadconfig()
put(env.config_file, env.repo_dir, use_sudo=True)
sudo('chown {}:{} {}'.format(
env.app_user,
env.app_group,
os.path.join(env.repo_dir, '.env'))
)
#
# Helpers
#
def getconfig():
"""
Return a dict of the vars currently in the config_file
"""
cp = ConfigParser.SafeConfigParser()
cp.read(env.config_file)
# Uppercase the settings
d = dict((k.upper(), v) for k, v in cp.items(cp_sect))
# Pass it out
return d
def loadconfig():
"""
Load configuration settings into the Fabric env
"""
# If the config file doesn't exist, force its creation
if not os.path.isfile(env.config_file):
createconfig()
# Load all of the configuration settings
config = getconfig()
for k, v in config.iteritems():
env[k] = v
# If there is an EC2_HOST set, patch it onto the Fabric env object
if hasattr(env, 'EC2_HOST'):
env.hosts = [env.EC2_HOST]
env.host = env.EC2_HOST
env.host_string = env.EC2_HOST
# If there is a KEY_NAME set, path it onto the Fabric env object
if hasattr(env, 'KEY_NAME'):
key_path = os.path.join(env.key_file_dir, "%s.pem" % env.KEY_NAME)
env.key_filename = (key_path,)
class ConfigTask(Task):
"""
A custom Fabric @task that loads settings from an external configuration
file before execution.
"""
def __init__(self, func, *args, **kwargs):
super(ConfigTask, self).__init__(*args, **kwargs)
self.func = func
def __call__(self):
self.run()
def run(self, *args, **kwargs):
loadconfig() # <-- the action
return self.func(*args, **kwargs)
|
Python
| 0.000039
|
@@ -49,16 +49,52 @@
port os%0A
+from collections import OrderedDict%0A
import C
@@ -1174,18 +1174,29 @@
onfig =
-%7B%7D
+OrderedDict()
%0A con
|
899254d3bd064ba8e5653ad9081674b7af1495fa
|
fix capture=True
|
fabfile/openstack.py
|
fabfile/openstack.py
|
#!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import yaml
from fabric.api import task, local, settings, warn_only
from cuisine import file_exists
@task
def up():
""" Boot instances """
# call class OpenStack
op = OpenStack()
# Check if fingerprint exists on the list
op.check_key()
class OpenStack:
def __init__(self):
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = '{0}/{1}'.format(cfg_dir, 'openstack.yml')
f = open(cfg_file)
self.cfg = yaml.safe_load(f)
self.cfg['key_file'] = os.path.abspath(os.path.expanduser(self.cfg['key_file']))
f.close()
self.key_fingerprint = \
local('ssh-keygen -l -f {}|awk \'{{print $2}}\''.format(self.cfg['key_file']), capture=True)
def check_key(self):
if not os.path.exists(self.cfg['key_file']):
print "{} doesn't exist".format(self.cfg['key_file'])
exit(1)
with settings(warn_only=True):
output = local('nova keypair-list|grep {}'.format(self.key_fingerprint))
print '#### ', output
if not output.return_code == 0:
print "ERROR: your key is not registered yet."
exit(1)
if not output.split()[1] == self.cfg['key_name']:
print "your key is already registered with a different name."
exit(1)
#def check_image(self):
# with settings(warn_only=True):
|
Python
| 0.998992
|
@@ -1133,43 +1133,23 @@
int)
-)%0A print '#### ', output
+, capture=True)
%0A
|
0fb32166825d630cc5e87b39588e280737567448
|
Fix AWS Athena Sensor object has no attribute 'mode' (#4844)
|
airflow/contrib/sensors/aws_athena_sensor.py
|
airflow/contrib/sensors/aws_athena_sensor.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_athena_hook import AWSAthenaHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
class AthenaSensor(BaseSensorOperator):
"""
Asks for the state of the Query until it reaches a failure state or success state.
If it fails, failing the task.
:param query_execution_id: query_execution_id to check the state of
:type query_execution_id: str
:param max_retires: Number of times to poll for query state before
returning the current state, defaults to None
:type max_retires: int
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
:param sleep_time: Time to wait between two consecutive call to
check query status on athena, defaults to 10
:type sleep_time: int
"""
INTERMEDIATE_STATES = ('QUEUED', 'RUNNING',)
FAILURE_STATES = ('FAILED', 'CANCELLED',)
SUCCESS_STATES = ('SUCCEEDED',)
template_fields = ['query_execution_id']
template_ext = ()
ui_color = '#66c3ff'
@apply_defaults
def __init__(self,
query_execution_id,
max_retires=None,
aws_conn_id='aws_default',
sleep_time=10,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
self.query_execution_id = query_execution_id
self.hook = None
self.sleep_time = sleep_time
self.max_retires = max_retires
def poke(self, context):
self.hook = self.get_hook()
self.hook.get_conn()
state = self.hook.poll_query_status(self.query_execution_id, self.max_retires)
if state in self.FAILURE_STATES:
raise AirflowException('Athena sensor failed')
if state in self.INTERMEDIATE_STATES:
return False
return True
def get_hook(self):
return AWSAthenaHook(self.aws_conn_id, self.sleep_time)
|
Python
| 0
|
@@ -2190,32 +2190,26 @@
super(
-BaseSensorOperat
+AthenaSens
or, self
|
0364ddd42b47aed0367746a3081f3064f9d1fa45
|
Fix typo in TypedFlagHolder docstring.
|
fancyflags/_flags.py
|
fancyflags/_flags.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Flag classes for defining dict, Item, MultiItem and Auto flags."""
import copy
import functools
from typing import Generic, TypeVar
from absl import flags
_EMPTY = ""
_T = TypeVar("_T")
class DictFlag(flags.Flag):
"""Implements the shared dict mechanism. See also `ItemFlag`."""
def __init__(self, shared_dict, *args, **kwargs):
self._shared_dict = shared_dict
super().__init__(*args, **kwargs)
def _parse(self, value):
# A `DictFlag` should not be overridable from the command line; only the
# dotted `Item` flags should be. However, the _parse() method will still be
# called in two situations:
# 1. Via the base `Flag`'s constructor, which calls `_parse()` to process
# the default value, which will be the shared dict.
# 2. When processing command line overrides. We don't want to allow this
# normally, however some libraries will serialize and deserialize all
# flags, e.g. to pass values between processes, so we accept a dummy
# empty serialized value for these cases. It's unlikely users will try to
# set the dict flag to an empty string from the command line.
if value is self._shared_dict or value == _EMPTY:
return self._shared_dict
raise flags.IllegalFlagValueError(
"Can't override a dict flag directly. Did you mean to override one of "
"its `Item`s instead?")
def serialize(self):
# When serializing flags, we return a sentinel value that the `DictFlag`
# will ignore when parsing. The value of this flag is determined by the
# corresponding `Item` flags for serialization and deserialization.
return _EMPTY
def flag_type(self):
return "dict"
# TODO(b/170423907): Pytype doesn't correctly infer that these have type
# `property`.
_flag_value_property = flags.Flag.value # type: property # pytype: disable=annotation-type-mismatch
_multi_flag_value_property = flags.MultiFlag.value # type: property # pytype: disable=annotation-type-mismatch
class ItemFlag(flags.Flag):
"""Updates a shared dict whenever its own value changes.
See also the `DictFlag` and `ff.Item` classes for usage.
"""
def __init__(self, shared_dict, namespace, *args, **kwargs):
self._shared_dict = shared_dict
self._namespace = namespace
super().__init__(*args, **kwargs)
# `super().value = value` doesn't work, see https://bugs.python.org/issue14965
@_flag_value_property.setter
def value(self, value):
_flag_value_property.fset(self, value)
self._update_shared_dict()
def parse(self, argument):
super().parse(argument)
self._update_shared_dict()
def _update_shared_dict(self):
d = self._shared_dict
for name in self._namespace[:-1]:
d = d[name]
d[self._namespace[-1]] = self.value
class MultiItemFlag(flags.MultiFlag):
"""Updates a shared dict whenever its own value changes.
Used for flags that can appear multiple times on the command line.
See also the `DictFlag` and `ff.Item` classes for usage.
"""
def __init__(self, shared_dict, namespace, *args, **kwargs):
self._shared_dict = shared_dict
self._namespace = namespace
super().__init__(*args, **kwargs)
# `super().value = value` doesn't work, see https://bugs.python.org/issue14965
@_multi_flag_value_property.setter
def value(self, value):
_multi_flag_value_property.fset(self, value)
self._update_shared_dict()
def parse(self, argument):
super().parse(argument)
self._update_shared_dict()
def _update_shared_dict(self):
d = self._shared_dict
for name in self._namespace[:-1]:
d = d[name]
d[self._namespace[-1]] = self.value
class AutoFlag(flags.Flag):
"""Implements the shared dict mechanism."""
def __init__(self, fn, fn_kwargs, *args, **kwargs):
self._fn = fn
self._fn_kwargs = fn_kwargs
super().__init__(*args, **kwargs)
@property
def value(self):
kwargs = copy.deepcopy(self._fn_kwargs)
return functools.partial(self._fn, **kwargs)
@value.setter
def value(self, value):
# The flags `.value` gets set as part of the `flags.FLAG` constructor to a
# default value. However the default value should be given by the initial
# `fn_kwargs` instead, so a) the semantics of setting the value are unclear
# and b) we may not be able to call `self._fn` at this point in execution.
del value
def _parse(self, value):
# An `AutoFlag` should not be overridable from the command line; only the
# dotted `Item` flags should be. However, the `_parse()` method will still
# be called in two situations:
# 1. In the base `Flag`'s constructor, which calls `_parse()` to process the
# default value, which will be None (as set in `DEFINE_auto`).
# 2. When processing command line overrides. We don't want to allow this
# normally, however some libraries will serialize and deserialize all
# flags, e.g. to pass values between processes, so we accept a dummy
# empty serialized value for these cases. It's unlikely users will try to
# set the auto flag to an empty string from the command line.
if value is None or value == _EMPTY:
return None
raise flags.IllegalFlagValueError(
"Can't override an auto flag directly. Did you mean to override one of "
"its `Item`s instead?")
def serialize(self):
# When serializing a `FlagHolder` container, we must return *some* value for
# this flag. We return an empty value that the `AutoFlag` will ignore when
# parsing. The value of this flag is instead determined by the
# corresponding `Item` flags for serialization and deserialization.
return _EMPTY
def flag_type(self):
return "auto"
class TypedFlagHolder(flags.FlagHolder, Generic[_T]):
"""A typed wrapper for a FlagHolder.
This is necessary until either Pytype supports PEP-562 (b/170419518), or
abseill-py drops Python 2 support (b/182444583) and moves their type hints
into the source.
"""
def __init__(self, flag_holder: flags.FlagHolder[_T]):
self._flag_holder = flag_holder
@property
def value(self) -> _T:
return self._flag_holder.value
@property
def default(self) -> _T:
return self._flag_holder.default
@property
def name(self) -> str:
return self._flag_holder.name
@property
def present(self) -> bool:
return self._flag_holder.present
|
Python
| 0.999993
|
@@ -6583,17 +6583,16 @@
abseil
-l
-py drop
|
384b59222c1da09dacdb7ee70218ac407b1a0a68
|
load Travis notification POST payload
|
service.py
|
service.py
|
#!/usr/bin/env python
# vim: set expandtab sw=4 ts=4:
'''
Retrieve Travis CI build data and log to Keen.io
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtimetrend/service
<https://github.com/buildtimetrend/service/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import cgi
import cherrypy
from buildtimetrend.travis import TravisData
from buildtimetrend.settings import Settings
from buildtimetrend.tools import get_logger
from buildtimetrend.tools import set_loglevel
from buildtimetrend.keenio import log_build_keen
from buildtimetrend.keenio import keen_is_writable
class TravisParser(object):
'''
Retrieve timing data from Travis CI, parse it and store it in Keen.io
'''
def __init__(self):
'''
Initialise class, by loading a config file and setting loglevel
'''
self.settings = Settings()
self.settings.load_config_file("config_service.yml")
# set loglevel
set_loglevel("INFO")
@cherrypy.expose
def index(self):
return "Coming soon, " \
"<a href='https://github.com/buildtimetrend/service'>" \
"Buildtime Trend as a Service</a>."
@cherrypy.expose
def travis(self, repo=None, build=None):
if repo is not None:
self.settings.set_project_name(repo)
if build is not None:
self.settings.add_setting('build', build)
# process travis build
return self.process_travis_buildlog()
def process_travis_buildlog(self):
'''
Check parameters, load build data from Travis CI,
process it and send to Keen.io for storage.
'''
logger = get_logger()
repo = self.settings.get_project_name()
if repo is None:
logger.warning("Repo is not set")
return "Repo is not set, use repo=user/repo"
# check if repo is allowed
allowed_repo = ["buildtimetrend", "ruleant"]
if not any(x in repo for x in allowed_repo):
message = "The supplied repo is not allowed : %s"
logger.warning(message, repo)
return message % cgi.escape(repo)
build = self.settings.get_setting('build')
if build is None:
logger.warning("Build number is not set")
return "Build number is not set, use build=build_id"
travis_data = TravisData(repo, build)
# retrieve build data using Travis CI API
logger.info("Retrieve build #%s data of %s from Travis CI",
build, repo)
travis_data.get_build_data()
# process all build jobs
travis_data.process_build_jobs()
if not keen_is_writable():
return "Keen IO write key not set, no data was sent"
# send build job data to Keen.io
for build_job in travis_data.build_jobs:
logger.info("Send build job #%s data to Keen.io", build_job)
log_build_keen(travis_data.build_jobs[build_job])
message = "Succesfully retrieved build #%s data of %s from Travis CI" \
" and sent to Keen.io"
logger.info(message, build, repo)
return message % (cgi.escape(build), cgi.escape(repo))
if __name__ == "__main__":
# configure cherrypy webserver host and port
# inspired by https://github.com/craigkerstiens/cherrypy-helloworld
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': int(os.environ.get('PORT', '5000')),
})
cherrypy.quickstart(TravisParser())
|
Python
| 0
|
@@ -892,16 +892,28 @@
port os%0A
+import json%0A
import c
@@ -1867,16 +1867,52 @@
=None):%0A
+ self.load_travis_payload()%0A%0A
@@ -3880,16 +3880,425 @@
repo))%0A%0A
+ def load_travis_payload(self):%0A '''%0A Load payload from Travis notification%0A '''%0A if 'Content-Length' in cherrypy.request.headers:%0A content_length = cherrypy.request.headers%5B'Content-Length'%5D%0A rawbody = cherrypy.request.body.read(int(content_length))%0A payload = json.load(rawbody)%0A get_logger().info(%22Travis Payload : %25r.%22, payload)%0A%0A
%0Aif __na
|
f2b25679ff906615906552810368092cc5321a3c
|
Add source and issue tracker link warnings
|
fdroidserver/lint.py
|
fdroidserver/lint.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# rewritemeta.py - part of the FDroid server tool
# Copyright (C) 2010-12, Ciaran Gultnieks, ciaran@ciarang.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See th
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public Licen
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import common, metadata
config = None
options = None
appid = None
def warn(message):
global appid
if appid:
print "%s:" % appid
appid = None
print(' %s' % message)
def main():
global config, options, appid
# Parse command line...
parser = OptionParser(usage="Usage: %prog [options] [APPID [APPID ...]]")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Spew out even more information than normal")
(options, args) = parser.parse_args()
config = common.read_config(options)
# Get all apps...
allapps = metadata.read_metadata(xref=False)
apps = common.read_app_args(args, allapps, False)
for app in apps:
appid = app['id']
lastcommit = ''
for build in app['builds']:
if 'commit' in build and 'disable' not in build:
lastcommit = build['commit']
if (app['Update Check Mode'] == 'RepoManifest' and
any(s in lastcommit for s in ('.', ',', '_', '-', '/'))):
warn("Last used commit '%s' looks like a tag, but Update Check Mode is RepoManifest" % lastcommit)
summ_chars = len(app['Summary'])
if summ_chars > config['char_limits']['Summary']:
warn("Summary of length %s is over the %i char limit" % (
summ_chars, config['char_limits']['Summary']))
if app['Summary']:
lastchar = app['Summary'][-1]
if any(lastchar==c for c in ['.', ',', '!', '?']):
warn("Summary should not end with a %s" % lastchar)
desc_chars = 0
for line in app['Description']:
desc_chars += len(line)
if desc_chars > config['char_limits']['Description']:
warn("Description of length %s is over the %i char limit" % (
desc_chars, config['char_limits']['Description']))
if not appid:
print
print "Finished."
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -843,16 +843,26 @@
nParser%0A
+import re%0A
import c
@@ -877,16 +877,16 @@
etadata%0A
-
%0Aconfig
@@ -1567,16 +1567,828 @@
False)%0A%0A
+ regex_warnings = %7B%0A 'Source Code': %5B%0A (re.compile(r'.*code%5C.google%5C.com/p/%5B%5E/%5D+/source/.*'),%0A %22/source is enough on its own%22),%0A (re.compile(r'.*code%5C.google%5C.com/p/%5B%5E/%5D+%5B/%5D*$'),%0A %22/source is missing%22)%0A %5D,%0A 'Issue Tracker': %5B%0A (re.compile(r'.*code%5C.google%5C.com/p/%5B%5E/%5D+/issues/.*'),%0A %22/issues is enough on its own%22),%0A (re.compile(r'.*code%5C.google%5C.com/p/%5B%5E/%5D+%5B/%5D*$'),%0A %22/issues is missing%22),%0A (re.compile(r'.*github%5C.com/%5B%5E/%5D+/%5B%5E/%5D+/issues/.*'),%0A %22/issues is enough on its own%22),%0A (re.compile(r'.*github%5C.com/%5B%5E/%5D+/%5B%5E/%5D+%5B/%5D*$'),%0A %22/issues is missing%22)%0A %5D%0A %7D%0A%0A
for
@@ -3204,16 +3204,16 @@
'?'%5D):%0A
-
@@ -3273,16 +3273,273 @@
tchar)%0A%0A
+ for f in %5B'Source Code', 'Issue Tracker'%5D:%0A if f not in regex_warnings:%0A continue%0A for m, r in regex_warnings%5Bf%5D:%0A if m.match(app%5Bf%5D):%0A warn(%22%25s url '%25s': %25s%22 %25 (f, app%5Bf%5D, r))%0A%0A
|
926b9aaaf6a3a3f7680377804b42eae953793797
|
call parent constructor in singpost.shipper
|
shipper.py
|
shipper.py
|
"""
Copyright (C) 2009, Tay Ray Chuan
Please see LICENCE for licensing details.
"""
"""
Each shipping option uses the data in an Order object to calculate the shipping cost and return the value
"""
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
from django.utils.translation import ugettext as _
from satchmo.configuration import config_value
from satchmo.shipping.modules.base import BaseShipper
WEIGHT_COST_MAP = {
'NONSTANDARD_MAIL': (
(40, Decimal('0.50')),
(100, Decimal('0.80')),
(250, Decimal('1.00')),
(500, Decimal('1.50')),
(1000, Decimal('2.55')),
(2000, Decimal('3.35'))
)
}
class Shipper(BaseShipper):
id = "SingPost"
def __init__(self, service_type='NONSTANDARD_MAIL'):
self.service_type = service_type
def __str__(self):
"""
This is mainly helpful for debugging purposes
"""
return "SingPost"
def description(self):
"""
A basic description that will be displayed to the user when selecting their shipping options
"""
return _("SingPost Shipping")
def _weight_for_shipment(self, shipment):
total_weight = Decimal('0')
for cartitem in shipment:
if cartitem.product.is_shippable:
total_weight += Decimal(cartitem.product.weight)
return total_weight
def _weight(self):
total_weight = Decimal('0')
for cartitem in self.cart.cartitem_set.all():
if cartitem.product.is_shippable:
total_weight += Decimal(cartitem.product.weight) * Decimal(cartitem.quantity)
return total_weight
def _cart_as_shipment(self):
shipment = []
for cartitem in self.cart.cartitem_set.all():
for i in xrange(cartitem.quantity):
shipment.append(cartitem)
return shipment
"""
Returns a list of shipments.
"""
def _partitioned_shipments(self):
pair = reduce(lambda x, y: x if x > y else y, \
WEIGHT_COST_MAP[self.service_type])
max_weight_class = pair[0]
if not self._weight() > max_weight_class:
return [self._cart_as_shipment()]
else:
shipments = []
a_shipment = []
the_weight = Decimal('0')
b = None
for cartitem in self.cart.cartitem_set.all():
for i in xrange(cartitem.quantity):
b = the_weight + Decimal(cartitem.product.weight)
if b <= max_weight_class:
the_weight = b
a_shipment.append(cartitem)
if b == max_weight_class:
shipments.append(a_shipment)
a_shipment = []
else:
shipments.append(a_shipment)
a_shipment = [cartitem]
the_weight = cartitem.product.weight
if len(a_shipment):
shipments.append(a_shipment)
return shipments
def _cost_for_shipment(self, shipment):
total_weight = self._weight_for_shipment(shipment)
prev = None
result_cost = None
for weight_class, weight_class_cost in \
WEIGHT_COST_MAP[self.service_type]:
if total_weight <= Decimal(weight_class):
if prev:
if total_weight > Decimal(prev):
result_cost = weight_class_cost
break
else:
prev = weight_class
return result_cost
def cost(self):
"""
Complex calculations can be done here as long as the return value is a dollar figure
"""
assert(self._calculated)
shipments = self._partitioned_shipments()
total_cost = Decimal('0.00')
for shipment in shipments:
total_cost += self._cost_for_shipment(shipment)
return total_cost
def method(self):
"""
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
"""
return _('SingPost')
def expectedDelivery(self):
"""
Can be a plain string or complex calcuation returning an actual date
"""
return _('3-4 business days')
def valid(self, order=None):
"""
Can do complex validation about whether or not this option is valid.
For example, may check to see if the recipient is in an allowed country
or location.
"""
return True
|
Python
| 0
|
@@ -756,16 +756,41 @@
__(self,
+ cart=None, contact=None,
service
@@ -816,16 +816,70 @@
MAIL'):%0A
+ super(Shipper, self).__init__(cart, contact)%0A%0A
|
9067da2973ff53c801811e0e414565784ab2910c
|
Fix tenant_id mismatch in test object creation
|
neutron_lbaas/tests/tempest/v2/api/test_pools_admin.py
|
neutron_lbaas/tests/tempest/v2/api/test_pools_admin.py
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import test
from neutron_lbaas.tests.tempest.v2.api import base
PROTOCOL_PORT = 80
class TestPools(base.BaseAdminTestCase):
"""
Tests the following operations in the Neutron-LBaaS API using the
REST client for Pools:
list pools
create pool
get pool
update pool
delete pool
"""
@classmethod
def resource_setup(cls):
super(TestPools, cls).resource_setup()
if not test.is_extension_enabled('lbaas', 'network'):
msg = "lbaas extension not enabled."
raise cls.skipException(msg)
network_name = data_utils.rand_name('network-')
cls.network = cls.create_network(network_name)
cls.subnet = cls.create_subnet(cls.network)
cls.load_balancer = cls._create_load_balancer(
tenant_id=cls.subnet.get('tenant_id'),
vip_subnet_id=cls.subnet.get('id'))
def increment_protocol_port(self):
global PROTOCOL_PORT
PROTOCOL_PORT += 1
def _prepare_and_create_pool(self, protocol=None, lb_algorithm=None,
listener_id=None, **kwargs):
self.increment_protocol_port()
if not protocol:
protocol = 'HTTP'
if not lb_algorithm:
lb_algorithm = 'ROUND_ROBIN'
if not listener_id:
listener = self._create_listener(
loadbalancer_id=self.load_balancer.get('id'),
protocol='HTTP', protocol_port=PROTOCOL_PORT)
listener_id = listener.get('id')
response = self._create_pool(protocol=protocol,
lb_algorithm=lb_algorithm,
listener_id=listener_id,
**kwargs)
return response
@test.attr(type='smoke')
def test_create_pool_using_empty_tenant_field(self):
"""Test create pool with empty tenant field"""
new_pool = self._prepare_and_create_pool(
tenant_id="")
pool = self.pools_client.get_pool(new_pool.get('id'))
pool_tenant = pool.get('tenant_id')
self.assertEqual(pool_tenant, '')
self._delete_pool(new_pool.get('id'))
@test.attr(type='smoke')
def test_create_pool_missing_tenant_id_for_other_tenant(self):
"""
Test create pool with a missing tenant id field. Verify
tenant_id does not match when creating pool vs.
pool (admin client)
"""
new_pool = self._prepare_and_create_pool(
protocol='HTTP',
lb_algorithm='ROUND_ROBIN')
pool = self.pools_client.get_pool(new_pool.get('id'))
pool_tenant = pool['tenant_id']
self.assertNotEqual(pool_tenant, self.subnet['tenant_id'])
self._delete_pool(new_pool.get('id'))
@test.attr(type='smoke')
def test_create_pool_missing_tenant_id_for_admin(self):
"""
Test create pool with a missing tenant id field. Verify
tenant_id matches when creating pool vs. pool (admin client)
"""
new_pool = self._prepare_and_create_pool(
protocol='HTTP',
lb_algorithm='ROUND_ROBIN')
pool = self.pools_client.get_pool(new_pool.get('id'))
pool_tenant = pool['tenant_id']
self.assertEqual(pool_tenant, pool.get('tenant_id'))
self._delete_pool(new_pool.get('id'))
@test.attr(type='smoke')
def test_create_pool_for_another_tenant(self):
"""Test create pool for other tenant field"""
tenant = 'deffb4d7c0584e89a8ec99551565713c'
new_pool = self._prepare_and_create_pool(
tenant_id=tenant)
pool = self.pools_client.get_pool(new_pool.get('id'))
pool_tenant = pool.get('tenant_id')
self.assertEqual(pool_tenant, tenant)
self._delete_pool(new_pool.get('id'))
|
Python
| 0.000014
|
@@ -2147,16 +2147,26 @@
COL_PORT
+, **kwargs
)%0A
|
4d1c465e5c946ac17334e29e0ded7b6134533d12
|
Disable save in Crop multi roi and show the image instead
|
plugins/Scripts/Plugins/Crop_Multi_Roi.py
|
plugins/Scripts/Plugins/Crop_Multi_Roi.py
|
from ij import IJ
from ij.plugin.frame import RoiManager
from io.scif.config import SCIFIOConfig
from io.scif.img import ImageRegion
from io.scif.img import ImgOpener
from io.scif.img import ImgSaver
from net.imagej.axis import Axes
import os
def main():
# Get current image filename
imp = IJ.getImage()
f = imp.getOriginalFileInfo()
if not f:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Iterate over all ROIs from ROI Manager
rois = RoiManager.getInstance()
if not rois:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
fname = os.path.join(f.directory, f.fileName)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Opening crop %i / %i" % (crop_id, len(rois_array)))
# Get ROI bounds
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
axes = [Axes.X, Axes.Y]
ranges = ["%i-%i" % (x, x+w), "%i-%i" % (y, y+h)]
config = SCIFIOConfig()
config.imgOpenerSetRegion(ImageRegion(axes, ranges))
opener = ImgOpener()
imps = opener.openImgs(fname, config)
imp = imps[0]
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, f.fileName)
crop_fname = os.path.join(f.directory, crop_basename)
IJ.log("Saving crop to %s" % crop_fname)
# Save cropped image
saver = ImgSaver()
saver.saveImg(crop_fname, imp)
IJ.log('Done')
main()
|
Python
| 0
|
@@ -226,16 +226,75 @@
ort Axes
+%0Afrom net.imglib2.img.display.imagej import ImageJFunctions
%0A%0Aimport
@@ -1686,43 +1686,29 @@
-IJ.log(%22Saving crop to %25s%22 %25
+imp.setName(
crop_
-f
+base
name
@@ -1747,24 +1747,75 @@
age%0A
+#IJ.log(%22Saving crop to %25s%22 %25 crop_fname)%0A #
saver = ImgS
@@ -1829,16 +1829,17 @@
+#
saver.sa
@@ -1860,16 +1860,80 @@
me, imp)
+%0A%0A # Show cropped image%0A ImageJFunctions.show(imp)
%0A %0A
|
5b3acc2d35c06554bce1b8d2245b14e1831304df
|
add the tests into the Sage doctest suite
|
sympy/test_external/test_sage.py
|
sympy/test_external/test_sage.py
|
# This testfile tests SymPy <-> Sage compatibility
# Execute this test inside Sage, e.g. with:
# sage -python bin/test sympy/test_external/test_sage.py
# Don't test any SymPy features here. Just pure interaction with Sage.
# Always write regular SymPy tests for anything, that can be tested in pure
# Python (without Sage). Here we test everything, that a user may need when
# using SymPy with Sage.
import os
import re
import sys
try:
import sage.all as sage
except ImportError:
#py.test will not execute any tests now
disabled = True
import sympy
def check_expression(expr, var_symbols):
"""Does eval(expr) both in Sage and SymPy and does other checks."""
# evaluate the expression in the context of Sage:
sage.var(var_symbols)
a = globals().copy()
# safety checks...
assert not "sin" in a
a.update(sage.__dict__)
assert "sin" in a
e_sage = eval(expr, a)
assert not isinstance(e_sage, sympy.Basic)
# evaluate the expression in the context of SymPy:
sympy.var(var_symbols)
b = globals().copy()
assert not "sin" in b
b.update(sympy.__dict__)
assert "sin" in b
b.update(sympy.__dict__)
e_sympy = eval(expr, b)
assert isinstance(e_sympy, sympy.Basic)
# Do the actual checks:
assert sympy.S(e_sage) == e_sympy
assert e_sage == sage.SR(e_sympy)
def test_basics():
check_expression("x", "x")
check_expression("x**2", "x")
check_expression("x**2+y**3", "x y")
check_expression("1/(x+y)**2-x**3/4", "x y")
def test_complex():
check_expression("I", "")
check_expression("23+I*4", "x")
check_expression("I*y", "y")
check_expression("x+I*y", "x y")
def test_integer():
check_expression("4*x", "x")
check_expression("-4*x", "x")
def test_real():
check_expression("1.123*x", "x")
check_expression("-18.22*x", "x")
def test_E():
assert sympy.sympify(sage.e) == sympy.E
assert sage.e == sage.SR(sympy.E)
def test_pi():
assert sympy.sympify(sage.pi) == sympy.pi
assert sage.pi == sage.SR(sympy.pi)
def test_euler_gamma():
assert sympy.sympify(sage.euler_gamma) == sympy.EulerGamma
assert sage.euler_gamma == sage.SR(sympy.EulerGamma)
def test_oo():
assert sympy.sympify(sage.oo) == sympy.oo
assert sage.oo == sage.SR(sympy.oo)
def test_NaN():
assert sympy.sympify(sage.NaN) == sympy.nan
assert sage.NaN == sage.SR(sympy.nan)
def test_Catalan():
assert sympy.sympify(sage.catalan) == sympy.Catalan
assert sage.catalan == sage.SR(sympy.Catalan)
def test_GoldenRation():
assert sympy.sympify(sage.golden_ratio) == sympy.GoldenRatio
assert sage.golden_ratio == sage.SR(sympy.GoldenRatio)
def test_functions():
check_expression("sin(x)", "x")
check_expression("cos(x)", "x")
check_expression("tan(x)", "x")
check_expression("cot(x)", "x")
check_expression("asin(x)", "x")
check_expression("acos(x)", "x")
check_expression("atan(x)", "x")
check_expression("atan2(y, x)", "x, y")
check_expression("acot(x)", "x")
check_expression("sinh(x)", "x")
check_expression("cosh(x)", "x")
check_expression("tanh(x)", "x")
check_expression("coth(x)", "x")
check_expression("asinh(x)", "x")
check_expression("acosh(x)", "x")
check_expression("atanh(x)", "x")
check_expression("acoth(x)", "x")
check_expression("exp(x)", "x")
check_expression("log(x)", "x")
check_expression("abs(x)", "x")
def test_issue924():
sage.var("a x")
log = sage.log
i = sympy.integrate(log(x)/a, (x, a, a+1))
i2 = sympy.simplify(i)
s = sage.SR(i2)
assert s == (a*log(1+a) - a*log(a) + log(1+a) - 1)/a
|
Python
| 0
|
@@ -44,16 +44,17 @@
ibility%0A
+#
%0A# Execu
@@ -147,16 +147,221 @@
sage.py%0A
+#%0A# This file can be tested by Sage itself by:%0A# sage -t sympy/test_external/test_sage.py%0A# and if all tests pass, it should be copied (verbatim) to Sage, so that it is%0A# automatically doctested by Sage.%0A#
%0A# Don't
@@ -3873,8 +3873,500 @@
- 1)/a%0A
+%0A# This string contains Sage doctests, that execute all the functions above.%0A# When you add a new function, please add it here as well.%0A%22%22%22%0A%0ATESTS::%0A%0A sage: test_basics()%0A sage: test_basics()%0A sage: test_complex()%0A sage: test_integer()%0A sage: test_real()%0A sage: test_E()%0A sage: test_pi()%0A sage: test_euler_gamma()%0A sage: test_oo()%0A sage: test_NaN()%0A sage: test_Catalan()%0A sage: test_GoldenRation()%0A sage: test_functions()%0A sage: test_issue924()%0A%0A%22%22%22%0A
|
9a19c34a104aabd0c5b34734f587573d5766a4bd
|
support multi-file results
|
finishTest/Finish.py
|
finishTest/Finish.py
|
from __future__ import print_function
from BaseTask import BaseTask
from Engine import MasterTbl, Error, get_platform
from Dbg import Dbg
import os, json, time, platform
dbg = Dbg()
validA = ("passed", "failed", "diff")
comment_block = """
Test Results:
'notfinished': means that the test has started but not completed.
'failed': means that the test has started but not completed.
'notrun': test has not started running.
'diff' : Test has run but is different from gold copy.
'passed': Test has run and matches gold copy.
"""
class Finish(BaseTask):
def __init__(self,name):
super(Finish, self).__init__(name)
def __parse_input_fn(self, fn):
if (not os.path.exists(fn)):
return "failed"
f = open(fn)
lineA = f.readlines()
f.close()
found = False
result = "passed"
for line in lineA:
line = line.strip()
if (line[0] == "#" or len(line) < 1):
continue
found = True
idx = line.find(",")
if (idx > 0):
line = line[0:idx]
line = line.lower()
if (line != "passed"):
result = line
break
if (not result in validA or not found):
result = "failed"
return result
def execute(self, *args, **kwargs):
masterTbl = MasterTbl()
result_fn = masterTbl['result_fn']
runtime_fn = masterTbl['runtime_fn']
input_fn = masterTbl['pargs'][0]
result = self.__parse_input_fn(input_fn)
my_result = { 'testresult' : result, "comment" : comment_block.split('\n') }
f = open(result_fn,"w")
f.write(json.dumps(my_result, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
if (not os.path.exists(runtime_fn)):
Error("Unable to open: ", runtime_fn)
f = open(runtime_fn)
runtime = json.loads(f.read())
f.close()
t1 = time.time()
runtime['T1'] = t1
runtime['TT'] = t1 - runtime['T0']
unameT = get_platform()
for k in unameT:
runtime[k] = unameT[k]
f = open(runtime_fn,"w")
f.write(json.dumps(runtime, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
|
Python
| 0
|
@@ -697,12 +697,57 @@
, fn
+A
):%0A%0A
+ result = %22passed%22%0A%0A for fn in fnA:%0A
@@ -777,24 +777,26 @@
fn)):%0A
+
+
return %22fail
@@ -804,24 +804,26 @@
d%22%0A %0A
+
f = open(fn)
@@ -823,24 +823,26 @@
pen(fn)%0A
+
+
lineA = f.re
@@ -851,24 +851,26 @@
lines()%0A
+
f.close()%0A%0A
@@ -868,24 +868,26 @@
lose()%0A%0A
+
+
found = Fal
@@ -889,39 +889,19 @@
= False%0A
+%0A
- result = %22passed%22%0A%0A
for
@@ -915,16 +915,18 @@
lineA:%0A
+
li
@@ -941,32 +941,34 @@
e.strip()%0A
+
if (line%5B0%5D == %22
@@ -989,16 +989,18 @@
) %3C 1):%0A
+
@@ -1011,24 +1011,26 @@
inue%0A%0A
+
found = True
@@ -1030,16 +1030,18 @@
= True%0A
+
id
@@ -1061,24 +1061,26 @@
(%22,%22)%0A
+
if (idx %3E 0)
@@ -1081,16 +1081,18 @@
x %3E 0):%0A
+
@@ -1112,24 +1112,26 @@
:idx%5D%0A
+
line = line.
@@ -1138,16 +1138,18 @@
lower()%0A
+
if
@@ -1173,24 +1173,26 @@
%22):%0A
+
result = lin
@@ -1193,16 +1193,18 @@
= line%0A
+
@@ -1210,16 +1210,25 @@
break%0A%0A
+ %0A
if (
@@ -1265,24 +1265,26 @@
und):%0A
+
result = %22fa
@@ -1288,16 +1288,37 @@
%22failed%22
+%0A break%0A
%0A%0A re
@@ -1490,17 +1490,17 @@
input_fn
-
+A
= mast
@@ -1513,19 +1513,16 @@
'pargs'%5D
-%5B0%5D
%0A%0A re
@@ -1562,16 +1562,17 @@
input_fn
+A
)%0A%0A m
|
bd710f64368d1606dbe52783e6fe2156a4c4e986
|
Test get_manager
|
armstrong/core/arm_sections/tests/backends.py
|
armstrong/core/arm_sections/tests/backends.py
|
from django.core.exceptions import ObjectDoesNotExist
from ._utils import *
from ..models import Section
from arm_sections_support.models import *
class ManyToManyBackendTestCase(ArmSectionsTestCase):
"""
Test fetching items for content with a many-to-many relationship to sections.
"""
def setUp(self):
super(ManyToManyBackendTestCase, self).setUp()
self.sports = Section.objects.get(slug='sports')
self.article = Article.objects.create(
title="Test Article",
slug='test_article',
)
self.article.sections = [self.sports]
self.article2 = Article.objects.create(
title="Second Article",
slug='second_article',
)
self.article2.sections = [self.sports]
def test_backend_with_articles(self):
with override_settings(ARMSTRONG_SECTION_ITEM_MODEL='armstrong.core.arm_sections.tests.arm_sections_support.models.Common'):
self.assert_(self.article in self.sports.items)
self.assert_(self.article2 in self.sports.items)
class ForeignKeyBackendTestCase(ArmSectionsTestCase):
"""
Test fetching items for content with a foreign key relationship to sections.
"""
def setUp(self):
super(ForeignKeyBackendTestCase, self).setUp()
self.sports = Section.objects.get(slug='sports')
self.weather = Section.objects.get(slug='weather')
self.foreign_key_article = SectionForeignKeyArticle.objects.create(
title="Test Foreign Key Article",
slug='test_foreign_key_article',
primary_section=self.sports
)
self.foreign_key_article2 = SectionForeignKeyArticle.objects.create(
title="Second Foreign Key Article",
slug='second_foreign_key',
primary_section=self.weather
)
def test_backend_with_foreign_key_articles(self):
with override_settings(ARMSTRONG_SECTION_ITEM_MODEL='armstrong.core.arm_sections.tests.arm_sections_support.models.SectionForeignKeyCommon'):
self.assert_(self.foreign_key_article in self.sports.items)
self.assert_(self.foreign_key_article2 in self.weather.items)
class ComplexBackendTestCase(ArmSectionsTestCase):
"""
Test fetching items for content with foreign key and many-to-many relationships to sections.
"""
def setUp(self):
super(ComplexBackendTestCase, self).setUp()
self.pro_sports = Section.objects.get(slug='pro')
self.weather = Section.objects.get(slug='weather')
self.complex_article = ComplexArticle.objects.create(
title="Test Complex Article",
slug='test_complex_article',
primary_section=self.pro_sports
)
self.complex_article.related_sections = [self.weather]
def test_backend_with_complex_articles(self):
with override_settings(ARMSTRONG_SECTION_ITEM_MODEL='armstrong.core.arm_sections.tests.arm_sections_support.models.ComplexCommon'):
self.assert_(self.complex_article in self.pro_sports.items)
self.assert_(self.complex_article in self.weather.items)
class HierarchyBackendTestCase(ArmSectionsTestCase):
"""
Test fetching items for a parent section of the associated section.
"""
def setUp(self):
super(HierarchyBackendTestCase, self).setUp()
self.sports = Section.objects.get(slug='sports')
self.pro_sports = Section.objects.get(slug='pro')
self.weather = Section.objects.get(slug='weather')
self.complex_article = ComplexArticle.objects.create(
title="Test Complex Article",
slug='test_complex_article',
primary_section=self.pro_sports
)
self.complex_article.related_sections = [self.weather]
def test_backend_with_section_hierarchy(self):
with override_settings(ARMSTRONG_SECTION_ITEM_MODEL='armstrong.core.arm_sections.tests.arm_sections_support.models.ComplexCommon'):
self.assert_(self.complex_article in self.sports.items)
self.assert_(self.complex_article in self.pro_sports.items)
|
Python
| 0.000001
|
@@ -142,16 +142,197 @@
port *%0A%0A
+from armstrong.core.arm_sections.backends import ItemFilter%0Afrom armstrong.core.arm_sections.managers import SectionSlugManager%0Afrom model_utils.managers import InheritanceManager%0A%0A
%0Aclass M
@@ -4391,28 +4391,621 @@
e in self.pro_sports.items)%0A
+%0Aclass ManagerTestCase(ArmSectionsTestCase):%0A %22%22%22%0A Test fetching items for a parent section of the associated section.%0A %22%22%22%0A def setUp(self):%0A super(ManagerTestCase, self).setUp()%0A self.item_filter = ItemFilter()%0A%0A def test_default_manager(self):%0A self.assertEquals(self.item_filter.get_manager(ComplexCommon).__class__,%0A InheritanceManager)%0A%0A def test_custom_manager(self):%0A self.item_filter.manager_attr = 'with_section'%0A self.assertEquals(self.item_filter.get_manager(ComplexCommon).__class__,%0A SectionSlugManager)%0A
|
3879f264e5faca6f96e4b16a30174aac4a5006e9
|
Fix syntax error in tests
|
astropy/coordinates/tests/test_erfa_astrom.py
|
astropy/coordinates/tests/test_erfa_astrom.py
|
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time
from astropy.utils.exceptions import AstropyWarning
from astropy.coordinates import EarthLocation, AltAz, GCRS, SkyCoord
from astropy.coordinates.erfa_astrom import (
erfa_astrom, ErfaAstrom, ErfaAstromInterpolator
)
def test_science_state():
assert erfa_astrom.get().__class__ is ErfaAstrom
res = 300 * u.s
with erfa_astrom.set(ErfaAstromInterpolator(res)):
assert isinstance(erfa_astrom.get(), ErfaAstromInterpolator)
erfa_astrom.get().mjd_resolution == res.to_value(u.day)
# context manager should have switched it back
assert erfa_astrom.get().__class__ is ErfaAstrom
# must be a subclass of BaseErfaAstrom
with pytest.raises(TypeError):
erfa_astrom.set('foo')
def test_warnings():
with pytest.warns(AstropyWarning):
with erfa_astrom.set(ErfaAstromInterpolator(9 * u.us)):
pass
def test_erfa_astrom():
# I was having a pretty hard time in coming
# up with a unit test only testing the astrom provider
# that would not just test its implementation with its implementation
# so we test a coordinate conversion using it
location = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
obstime = Time('2020-01-01T18:00') + np.linspace(0, 1, 100) * u.hour
altaz = AltAz(location=location, obstime=obstime)
coord = SkyCoord(ra=83.63308333, dec=22.0145, unit=u.deg)
# do the reference transformation, no interpolation
ref = coord.transform_to(altaz)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
interp_300s = coord.transform_to(altaz)
# make sure they are actually different
assert np.any(ref.separation(interp_300s) > 0.01 * u.microarcsecond))
# make sure the resolution is as good as we expect
assert np.all(ref.separation(interp_300s) < 1 * u.microarcsecond))
def test_interpolation_nd():
'''
Test that the interpolation also works for nd-arrays
'''
fact = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
interp_provider = ErfaAstromInterpolator(300 * u.s)
provider = ErfaAstrom()
for shape in [tuple(), (1, ), (10, ), (3, 2), (2, 10, 5), (4, 5, 3, 2)]:
# create obstimes of the desired shapes
delta_t = np.linspace(0, 12, np.prod(shape, dtype=int)) * u.hour
obstime = (Time('2020-01-01T18:00') + delta_t).reshape(shape)
altaz = AltAz(location=fact, obstime=obstime)
gcrs = GCRS(obstime=obstime)
for frame, tcode in zip([altaz, altaz, gcrs], ['apio13', 'apci', 'apcs']):
without_interp = getattr(provider, tcode)(frame)
assert without_interp.shape == shape
with_interp = getattr(interp_provider, tcode)(frame)
assert with_interp.shape == shape
def test_interpolation_broadcasting():
from astropy.coordinates.tests.utils import randomly_sample_sphere
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.time import Time
import astropy.units as u
from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator
# 1000 random locations on the sky
ra, dec, _ = randomly_sample_sphere(100)
coord = SkyCoord(ra, dec)
# 30 times over the space of 1 hours
times = Time('2020-01-01T20:00') + np.linspace(-0.5, 0.5, 30) * u.hour
lst1 = EarthLocation(
lon=-17.891498 * u.deg,
lat=28.761443 * u.deg,
height=2200 * u.m,
)
# note the use of broadcasting so that 300 times are broadcast against 1000 positions
aa_frame = AltAz(obstime=times[:, np.newaxis], location=lst1)
aa_coord = coord.transform_to(aa_frame)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
aa_coord_interp = coord.transform_to(aa_frame)
assert aa_coord.shape == aa_coord_interp.shape
assert np.all(aa_coord.separation(aa_coord_interp) < 1 * u.microarcsecond)
|
Python
| 0.000042
|
@@ -1849,17 +1849,16 @@
csecond)
-)
%0A%0A #
@@ -1975,17 +1975,16 @@
csecond)
-)
%0A%0A%0Adef t
|
079c466091fa44000b23f386bd3ff07c048c33e5
|
Move test imports to the top
|
astropy/coordinates/tests/test_erfa_astrom.py
|
astropy/coordinates/tests/test_erfa_astrom.py
|
import numpy as np
import pytest
def test_science_state():
import astropy.units as u
from astropy.coordinates.erfa_astrom import (
erfa_astrom, ErfaAstrom, ErfaAstromInterpolator
)
assert erfa_astrom.get().__class__ is ErfaAstrom
res = 300 * u.s
with erfa_astrom.set(ErfaAstromInterpolator(res)):
assert isinstance(erfa_astrom.get(), ErfaAstromInterpolator)
erfa_astrom.get().mjd_resolution == res.to_value(u.day)
# context manager should have switched it back
assert erfa_astrom.get().__class__ is ErfaAstrom
# must be a subclass of BaseErfaAstrom
with pytest.raises(TypeError):
erfa_astrom.set('foo')
def test_warnings():
import astropy.units as u
from astropy.coordinates.erfa_astrom import (
erfa_astrom, ErfaAstromInterpolator
)
from astropy.utils.exceptions import AstropyWarning
with pytest.warns(AstropyWarning):
with erfa_astrom.set(ErfaAstromInterpolator(9 * u.us)):
pass
def test_erfa_astrom():
# I was having a pretty hard time in coming
# up with a unit test only testing the astrom provider
# that would not just test its implementation with its implementation
# so we test a coordinate conversion using it
from astropy.coordinates.erfa_astrom import (
erfa_astrom, ErfaAstromInterpolator,
)
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.time import Time
import astropy.units as u
location = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
obstime = Time('2020-01-01T18:00') + np.linspace(0, 1, 100) * u.hour
altaz = AltAz(location=location, obstime=obstime)
coord = SkyCoord(ra=83.63308333, dec=22.0145, unit=u.deg)
# do the reference transformation, no interpolation
ref = coord.transform_to(altaz)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
interp_300s = coord.transform_to(altaz)
# make sure they are actually different
assert np.any(ref.separation(interp_300s) > u.Quantity(0.01, u.microarcsecond))
# make sure the resolution is as good as we expect
assert np.all(ref.separation(interp_300s) < u.Quantity(1, u.microarcsecond))
def test_interpolation_nd():
'''
Test that the interpolation also works for nd-arrays
'''
from astropy.coordinates.erfa_astrom import (
ErfaAstrom, ErfaAstromInterpolator
)
from astropy.coordinates import EarthLocation, AltAz, GCRS
from astropy.time import Time
import astropy.units as u
fact = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
interp_provider = ErfaAstromInterpolator(300 * u.s)
provider = ErfaAstrom()
for shape in [tuple(), (1, ), (10, ), (3, 2), (2, 10, 5), (4, 5, 3, 2)]:
# create obstimes of the desired shapes
delta_t = np.linspace(0, 12, np.prod(shape, dtype=int)) * u.hour
obstime = (Time('2020-01-01T18:00') + delta_t).reshape(shape)
altaz = AltAz(location=fact, obstime=obstime)
gcrs = GCRS(obstime=obstime)
for frame, tcode in zip([altaz, altaz, gcrs], ['apio13', 'apci', 'apcs']):
without_interp = getattr(provider, tcode)(frame)
assert without_interp.shape == shape
with_interp = getattr(interp_provider, tcode)(frame)
assert with_interp.shape == shape
def test_interpolation_broadcasting():
from astropy.coordinates.tests.utils import randomly_sample_sphere
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.time import Time
import astropy.units as u
from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator
# 1000 random locations on the sky
ra, dec, _ = randomly_sample_sphere(100)
coord = SkyCoord(ra, dec)
# 30 times over the space of 1 hours
times = Time('2020-01-01T20:00') + np.linspace(-0.5, 0.5, 30) * u.hour
lst1 = EarthLocation(
lon=-17.891498 * u.deg,
lat=28.761443 * u.deg,
height=2200 * u.m,
)
# note the use of broadcasting so that 300 times are broadcast against 1000 positions
aa_frame = AltAz(obstime=times[:, np.newaxis], location=lst1)
aa_coord = coord.transform_to(aa_frame)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
aa_coord_interp = coord.transform_to(aa_frame)
assert aa_coord.shape == aa_coord_interp.shape
assert np.all(aa_coord.separation(aa_coord_interp) < 1 * u.microarcsecond)
|
Python
| 0
|
@@ -31,69 +31,186 @@
st%0A%0A
-%0Adef test_science_state():%0A import astropy.units as u%0A
+import astropy.units as u%0Afrom astropy.time import Time%0Afrom astropy.utils.exceptions import AstropyWarning%0Afrom astropy.coordinates import EarthLocation, AltAz, GCRS, SkyCoord%0A%0A
from
@@ -243,36 +243,32 @@
astrom import (%0A
-
erfa_astrom,
@@ -299,29 +299,53 @@
nterpolator%0A
- )
+)%0A%0A%0Adef test_science_state():
%0A%0A assert
@@ -837,194 +837,8 @@
s():
-%0A import astropy.units as u%0A from astropy.coordinates.erfa_astrom import (%0A erfa_astrom, ErfaAstromInterpolator%0A )%0A from astropy.utils.exceptions import AstropyWarning
%0A%0A
@@ -1217,241 +1217,8 @@
it%0A%0A
- from astropy.coordinates.erfa_astrom import (%0A erfa_astrom, ErfaAstromInterpolator,%0A )%0A from astropy.coordinates import SkyCoord, EarthLocation, AltAz%0A from astropy.time import Time%0A import astropy.units as u%0A%0A
@@ -2105,235 +2105,8 @@
'''
-%0A from astropy.coordinates.erfa_astrom import (%0A ErfaAstrom, ErfaAstromInterpolator%0A )%0A%0A from astropy.coordinates import EarthLocation, AltAz, GCRS%0A from astropy.time import Time%0A import astropy.units as u
%0A%0A
|
a3750ff624d0f929a53cdb2dcd8f44bd2d9573d4
|
query get_solicitacoes busca última inclusão
|
src/cd/queries/novo_modulo/solicitacoes.py
|
src/cd/queries/novo_modulo/solicitacoes.py
|
from pprint import pprint
from utils.functions.models import dictlist
from utils.functions.queries import debug_cursor_execute
def get_solicitacoes(
cursor,
solicitacao=None,
pedido_destino=None,
ref_destino=None,
ref_reservada=None,
):
filtra_solicitacao = f"""--
AND sl.SOLICITACAO = {solicitacao}
""" if solicitacao else ''
filtra_pedido_destino = f"""--
AND sl.PEDIDO_DESTINO = {pedido_destino}
""" if pedido_destino else ''
filtra_ref_destino = f"""--
AND ( ( sl.GRUPO_DESTINO = '00000'
AND l.PROCONF_GRUPO = '{ref_destino}'
)
OR sl.GRUPO_DESTINO = '{ref_destino}'
)
""" if ref_destino else ''
filtra_ref_reservada = f"""--
AND l.PROCONF_GRUPO = '{ref_reservada}'
""" if ref_reservada else ''
sql = f"""
SELECT DISTINCT
sl.SOLICITACAO
, sum(CASE WHEN sl.SITUACAO = 1 THEN 1 ELSE 0 END) l1
, sum(CASE WHEN sl.SITUACAO = 1 THEN sl.QTDE ELSE 0 END) q1
, sum(CASE WHEN sl.SITUACAO = 2 THEN 1 ELSE 0 END) l2
, sum(CASE WHEN sl.SITUACAO = 2 THEN sl.QTDE ELSE 0 END) q2
, sum(CASE WHEN sl.SITUACAO = 3 THEN 1 ELSE 0 END) l3
, sum(CASE WHEN sl.SITUACAO = 3 THEN sl.QTDE ELSE 0 END) q3
, sum(CASE WHEN sl.SITUACAO = 4 THEN 1 ELSE 0 END) l4
, sum(CASE WHEN sl.SITUACAO = 4 THEN sl.QTDE ELSE 0 END) q4
, sum(CASE WHEN sl.SITUACAO = 5 THEN 1 ELSE 0 END) l5
, sum(CASE WHEN sl.SITUACAO = 5 THEN sl.QTDE ELSE 0 END) q5
, sum(CASE WHEN l.CODIGO_ESTAGIO IS NULL THEN 1 ELSE 0 END) lf
, sum(CASE WHEN l.CODIGO_ESTAGIO IS NULL THEN sl.QTDE ELSE 0 END) qf
, sum(CASE WHEN l.CODIGO_ESTAGIO IS NOT NULL THEN 1 ELSE 0 END) lp
, sum(CASE WHEN l.CODIGO_ESTAGIO IS NOT NULL THEN sl.QTDE ELSE 0 END) qp
, sum(1) lt
, sum(sl.QTDE) qt
FROM pcpc_044 sl -- solicitação / lote
-- Na tabela de solicitações aparece a OP de expedição também como
-- reservada, com situação 4. Para tentar evitar isso, não listo
-- lotes que pertençam a OP que não tem estágio 63
-- (OPs de expedição não tem 63)
JOIN PCPC_040 l_filtro
ON l_filtro.ORDEM_PRODUCAO = sl.ORDEM_PRODUCAO
AND l_filtro.ORDEM_CONFECCAO = sl.ORDEM_CONFECCAO
AND l_filtro.CODIGO_ESTAGIO = 63
LEFT JOIN PCPC_040 l
ON l.QTDE_EM_PRODUCAO_PACOTE > 0
AND l.ORDEM_PRODUCAO = sl.ORDEM_PRODUCAO
AND l.ORDEM_CONFECCAO = sl.ORDEM_CONFECCAO
WHERE sl.SOLICITACAO IS NOT NULL
{filtra_solicitacao} -- filtra_solicitacao
{filtra_pedido_destino} -- filtra_pedido_destino
{filtra_ref_destino} -- filtra_ref_destino
{filtra_ref_reservada} -- filtra_ref_reservada
GROUP BY
sl.SOLICITACAO
ORDER BY
sl.SOLICITACAO
"""
debug_cursor_execute(cursor, sql)
return dictlist(cursor)
def get_solicitacao(cursor, id):
sql = f"""
SELECT DISTINCT
sl.ORDEM_PRODUCAO
, sl.ORDEM_CONFECCAO
, sl.PEDIDO_DESTINO
, sl.OP_DESTINO
, sl.OC_DESTINO
, sl.DEP_DESTINO
, sl.QTDE
, sl.SITUACAO
, sl.SOLICITACAO
, sl.PERIODO_OC
, sl.GRUPO_DESTINO
, sl.ALTER_DESTINO
, sl.SUB_DESTINO
, sl.COR_DESTINO
, sl.INCLUSAO
, lest.CODIGO_ESTAGIO
, l.PERIODO_PRODUCAO PERIODO
, l.PROCONF_NIVEL99 NIVEL
, l.PROCONF_GRUPO REF
, l.PROCONF_SUBGRUPO TAM
, l.PROCONF_ITEM COR
, l.QTDE_PECAS_PROG QTD_ORI
FROM pcpc_044 sl -- solicitação / lote
-- Na tabela de solicitações aparece a OP de expedição também como
-- reservada, com situação 4. Para tentar evitar isso, não listo
-- lotes que pertençam a OP que não tem estágio 63
-- (OPs de expedição não tem 63)
JOIN PCPC_040 l_filtro
ON l_filtro.ORDEM_PRODUCAO = sl.ORDEM_PRODUCAO
AND l_filtro.ORDEM_CONFECCAO = sl.ORDEM_CONFECCAO
AND l_filtro.CODIGO_ESTAGIO = 63
LEFT JOIN PCPC_040 lest
ON lest.QTDE_EM_PRODUCAO_PACOTE > 0
AND lest.ORDEM_PRODUCAO = sl.ORDEM_PRODUCAO
AND lest.ORDEM_CONFECCAO = sl.ORDEM_CONFECCAO
LEFT JOIN PCPC_040 l
ON l.SEQUENCIA_ESTAGIO = 1
AND l.ORDEM_PRODUCAO = sl.ORDEM_PRODUCAO
AND l.ORDEM_CONFECCAO = sl.ORDEM_CONFECCAO
WHERE sl.SOLICITACAO = {id}
ORDER BY
sl.SITUACAO
, lest.CODIGO_ESTAGIO
, sl.ORDEM_PRODUCAO
, sl.ORDEM_CONFECCAO
"""
debug_cursor_execute(cursor, sql)
dados = dictlist(cursor)
for row in dados:
row['lote'] = '{}{:05}'.format(row['periodo'], row['ordem_confeccao'])
if not row['codigo_estagio']:
row['codigo_estagio'] = 'Finalizado'
row['int_parc'] = 'Inteiro' if row['qtde'] == row['qtd_ori'] else 'parcial'
if row['grupo_destino'] == '00000':
row['grupo_destino'] = row['ref']
if row['sub_destino'] == '0':
row['sub_destino'] = row['tam']
if row['cor_destino'] == '0':
row['cor_destino'] = row['cor']
return dados
|
Python
| 0.999458
|
@@ -1889,16 +1889,52 @@
TDE) qt%0A
+ , max(sl.INCLUSAO) inclusao%0A
|
8481cb40caa896b81386f4a9ddb6fda92e14cc76
|
Fix a typo
|
ironic/tests/unit/db/sqlalchemy/test_types.py
|
ironic/tests/unit/db/sqlalchemy/test_types.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for custom SQLAlchemy types via Ironic DB."""
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
import ironic.db.sqlalchemy.api as sa_api
from ironic.db.sqlalchemy import models
from ironic.tests.unit.db import base
class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
# NOTE(max_lobur): Since it's not straightforward to check this in
# isolation these tests use existing db models.
def test_JSONEncodedDict_default_value(self):
# Create chassis w/o extra specified.
ch1_id = uuidutils.generate_uuid()
self.dbapi.create_chassis({'uuid': ch1_id})
# Get chassis manually to test SA types in isolation from UOM.
ch1 = sa_api.model_query(models.Chassis).filter_by(uuid=ch1_id).one()
self.assertEqual({}, ch1.extra)
# Create chassis with extra specified.
ch2_id = uuidutils.generate_uuid()
extra = {'foo1': 'test', 'foo2': 'other extra'}
self.dbapi.create_chassis({'uuid': ch2_id, 'extra': extra})
# Get chassis manually to test SA types in isolation from UOM.
ch2 = sa_api.model_query(models.Chassis).filter_by(uuid=ch2_id).one()
self.assertEqual(extra, ch2.extra)
def test_JSONEncodedDict_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_chassis,
{'extra': ['this is not a dict']})
def test_JSONEncodedLict_default_value(self):
# Create conductor w/o extra specified.
cdr1_id = 321321
self.dbapi.register_conductor({'hostname': 'test_host1',
'drivers': None,
'id': cdr1_id})
# Get conductor manually to test SA types in isolation from UOM.
cdr1 = (sa_api
.model_query(models.Conductor)
.filter_by(id=cdr1_id)
.one())
self.assertEqual([], cdr1.drivers)
# Create conductor with drivers specified.
cdr2_id = 623623
drivers = ['foo1', 'other driver']
self.dbapi.register_conductor({'hostname': 'test_host2',
'drivers': drivers,
'id': cdr2_id})
# Get conductor manually to test SA types in isolation from UOM.
cdr2 = (sa_api
.model_query(models.Conductor)
.filter_by(id=cdr2_id)
.one())
self.assertEqual(drivers, cdr2.drivers)
def test_JSONEncodedList_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.register_conductor,
{'hostname': 'test_host3',
'drivers': {'this is not a list': 'test'}})
|
Python
| 0.999988
|
@@ -2032,17 +2032,17 @@
ncodedLi
-c
+s
t_defaul
|
38efb136609b645b0076c0aa1481330f9e28ee51
|
Add a rule for matching packages by regex.
|
fmn/rules/generic.py
|
fmn/rules/generic.py
|
# Generic rules for FMN
import fedmsg
import fmn.rules.utils
def user_filter(config, message, fasnick=None, *args, **kw):
""" All messages for a certain user
Use this rule to include messages that are associated with a
specific user.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
return fasnick in fedmsg.meta.msg2usernames(message, **config)
def not_user_filter(config, message, fasnick=None, *args, **kw):
""" All messages not concerning one or more users
Use this rule to exclude messages that are associated with one or more
users. Specify several users by separating them with a comma ','.
"""
fasnick = kw.get('fasnick', fasnick)
if not fasnick:
return False
fasnick = fasnick or [] and fasnick.split(',')
valid = True
for nick in fasnick:
if nick.strip() in fedmsg.meta.msg2usernames(message, **config):
valid = False
break
return valid
def user_package_filter(config, message, fasnick=None, *args, **kw):
""" All messages concerning user's packages
This rule includes messages that relate to packages where the
specified user has **commit** ACLs.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
user_packages = fmn.rules.utils.get_packages_of_user(config, fasnick)
msg_packages = fedmsg.meta.msg2packages(message, **config)
return user_packages.intersection(msg_packages)
def package_filter(config, message, package=None, *args, **kw):
""" All messages pertaining to a certain package
Use this rule to include messages that relate to a certain package
(*i.e., nethack*).
"""
package = kw.get('package', package)
if package:
return package in fedmsg.meta.msg2packages(message, **config)
def trac_hosted_filter(config, message, project=None, *args, **kw):
""" Filter the messages for one or more fedorahosted projects
Adding this rule allows you to get notifications for one or more
`fedorahosted <https://fedorahosted.org>`_ project. Specify multiple
projects by separating them with a comma ','.
"""
project = kw.get('project', project)
link = fedmsg.meta.msg2link(message, **config)
if not link:
return False
project = project or [] and project.split(',')
valid = False
for proj in project:
if '://fedorahosted.org/%s/' % proj.strip() in link:
valid = True
return valid
|
Python
| 0
|
@@ -17,16 +17,27 @@
for FMN%0A
+import re%0A%0A
import f
@@ -1819,24 +1819,561 @@
**config)%0A%0A%0A
+def package_regex_filter(config, message, pattern=None, *args, **kw):%0A %22%22%22 All messages pertaining to packages matching a given regex%0A%0A Use this rule to include messages that relate to packages that match%0A particular regular expressions%0A (*i.e., (maven%7Cjavapackages-tools%7Cmaven-surefire)*).%0A %22%22%22%0A%0A pattern = kw.get('pattern', pattern)%0A if pattern:%0A packages = fedmsg.meta.msg2packages(message, **config)%0A regex = re.compile(pattern)%0A return any(%5Bregex.match(package) for package in packages%5D)%0A%0A%0A
def trac_hos
|
7f974b87c278ef009535271461b5e49686057a9a
|
Fix for django >= 1.10
|
avatar/management/commands/rebuild_avatars.py
|
avatar/management/commands/rebuild_avatars.py
|
from django.core.management.base import NoArgsCommand
from avatar.conf import settings
from avatar.models import Avatar
class Command(NoArgsCommand):
help = ("Regenerates avatar thumbnails for the sizes specified in "
"settings.AVATAR_AUTO_GENERATE_SIZES.")
def handle_noargs(self, **options):
for avatar in Avatar.objects.all():
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
if options['verbosity'] != 0:
print("Rebuilding Avatar id=%s at size %s." % (avatar.id, size))
avatar.create_thumbnail(size)
|
Python
| 0
|
@@ -33,22 +33,20 @@
import
-NoArgs
+Base
Command%0A
@@ -132,14 +132,12 @@
and(
-NoArgs
+Base
Comm
@@ -285,20 +285,20 @@
ndle
-_noargs(self
+(self, *args
, **
|
f36cad198c45caa40f179e5a9de134610cc3f6fe
|
fix date filter
|
skylines/commands/flights/selector.py
|
skylines/commands/flights/selector.py
|
from flask.ext.script import Option
from sqlalchemy import func
from datetime import datetime
from skylines.model import Airport, Flight
selector_options = (
Option('--date-from', help='Date from (YYYY-MM-DD)'),
Option('--date-to', help='Date to (YYYY-MM-DD)'),
Option('--uploaded-from', help='Date from (YYYY-MM-DD)'),
Option('--uploaded-to', help='Date to (YYYY-MM-DD)'),
Option('--private', action='store_true',
help='Process private flights, too'),
Option('--country-code', help='Country code of the start airport'),
Option('--airport-name', help='Airport name of the start airport'),
Option('ids', metavar='ID', nargs='*', type=int,
help='Any number of flight IDs.'),
)
def select(q, **kwargs):
if kwargs.get('ids'):
print "ids == " + str(kwargs.get('ids'))
q = q.filter(Flight.id.in_(kwargs.get('ids')))
if kwargs.get('date_from'):
try:
date_from = datetime.strptime(kwargs.get('date_from'), "%Y-%m-%d")
q = q.filter(Flight.takeoff_time >= date_from)
print "takeoff_time >= " + str(date_from)
except:
print "Cannot parse date-from"
return None
if kwargs.get('date_to'):
try:
date_to = datetime.strptime(kwargs.get('date_to'), "%Y-%m-%d")
q = q.filter(Flight.takeoff_time >= date_to)
print "takeoff_time < " + str(date_to)
except:
print "Cannot parse date-to"
return None
if kwargs.get('uploaded_from'):
try:
uploaded_from = datetime.strptime(kwargs.get('uploaded_from'), "%Y-%m-%d")
q = q.filter(Flight.time_created >= uploaded_from)
print "time_created >= " + str(uploaded_from)
except:
print "Cannot parse uploaded-from"
return None
if kwargs.get('uploaded_to'):
try:
uploaded_to = datetime.strptime(kwargs.get('uploaded_to'), "%Y-%m-%d")
q = q.filter(Flight.time_created < uploaded_to)
print "time_created < " + str(uploaded_to)
except:
print "Cannot parse uploaded-to"
return None
if not kwargs.get('private'):
print "privacy_level == PUBLIC"
q = q.filter(Flight.privacy_level == Flight.PrivacyLevel.PUBLIC)
if kwargs.get('country_code'):
country_code = kwargs.get('country_code')
q = q.join(Flight.takeoff_airport)
q = q.filter(func.lower(Airport.country_code) == func.lower(country_code))
print "takeoff_airport country code: " + country_code
if kwargs.get('airport_name'):
airport_name = kwargs.get('airport_name')
q = q.join(Flight.takeoff_airport)
q = q.filter(func.lower(Airport.name) == func.lower(airport_name))
print "takeoff_airport name: " + airport_name
return q
|
Python
| 0.000011
|
@@ -1363,26 +1363,25 @@
akeoff_time
-%3E=
+%3C
date_to)%0A
|
ba1764a7a34e3560f96b857e7c1194a760a4bd86
|
fix layout of docstring, make paragraph a code block
|
feedinlib/weather.py
|
feedinlib/weather.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 9 16:01:02 2015
@author: uwe
"""
import pandas as pd
class FeedinWeather:
def __init__(self, **kwargs):
r"""
Class, containing all meta informations regarding the weather data set.
Parameters
----------
data : pandas.DataFrame, optional
Containing the time series of the different parameters as columns
timezone : string, optional
Containing the name of the time zone using the naming of the
IANA (Internet Assigned Numbers Authority) time zone database [40]_
longitude : float, optional
Longitude of the location of the weather data
latitude : float, optional
Latitude of the location of the weather data
geometry : shapely.geometry object
polygon or point representing the zone of the weather data
data_height : dictionary, optional
Containing the heights of the weather measurements or weather
model in meters with the keys of the data parameter
name : string
Name of the weather data object
Notes
-----
Depending on the used feedin modell some of the optional parameters
might be mandatory.
References
----------
.. [40] `IANA time zone database <http://www.iana.org/time-zones>`_.
"""
self.data = kwargs.get('data', None)
try:
self.timezone = self.data.index.tz
except:
self.timezone = kwargs.get('timezone', None)
self.longitude = kwargs.get('longitude', None)
self.latitude = kwargs.get('latitude', None)
self.geometry = kwargs.get('geometry', None)
self.data_height = kwargs.get('data_height', None)
self.name = kwargs.get('name', None)
def read_feedinlib_csv(self, filename, overwrite=True):
r"""
Reading a csv-file with a header containg the meta data of the time
series.
The header has to contain the time zone and has to end with a blank
line. To add data of the data_height dictionary there should be space
between the parameter name and the key name (e.g. # data_height
v_wind: 10). Further more any number of parameters can be added.
The file should have the following form:
# timezone=
# name: NAME
# longitude: xx.xxx
# latitude: yy.yyy
# timezone: Continent/City
# data_height temp_air: zz
# data_height v_wind: vv
,temp_air,v_wind,.....
2010-01-01 00:00:00+01:00,267.599,5.32697,...
2010-01-01 01:00:00+01:00,267.596,5.46199,....
....
Parameters
----------
filename : string
The filename with the full path and the suffix of the file.
overwrite : boolean
If False the only class attributes of NoneType will be overwritten
with the data of the csv file. If True all class attributes will
be overwriten with the data of the csv-file.
Raises
------
FileNotFoundError
If the file defined by filename can not be found.
"""
# Read meta data (location of weather data)
meta_dict = {}
skiprows = 0
with open(filename, 'r') as f:
while 1:
tmp = f.readline()[2:-1]
if not tmp.strip():
break
tmp = tmp.replace(' ', '')
[a, b] = tmp.split(':')
meta_dict[a] = b
skiprows += 1
# Define attributes
if self.latitude is None or overwrite:
self.latitude = float(meta_dict.get('latitude'))
if self.longitude is None or overwrite:
self.longitude = float(meta_dict.get('longitude'))
if self.timezone is None or overwrite:
self.timezone = meta_dict.get('timezone')
if self.name is None or overwrite:
self.name = meta_dict.get('name')
# Read weather data
if self.data is None or overwrite:
df = pd.read_csv(filename, skiprows=skiprows)
self.data = df.set_index(
pd.to_datetime(df['Unnamed: 0'])).tz_localize(
'UTC').tz_convert(self.timezone).drop('Unnamed: 0', 1)
# Define height dict
self.data_height = {}
for key in self.data.keys():
self.data_height[key] = float(
meta_dict.get('data_height' + key, 0))
return self
|
Python
| 0.00001
|
@@ -2362,16 +2362,40 @@
g form:%0A
+%0A .. code::%0A%0A
@@ -2414,16 +2414,20 @@
+
# name:
@@ -2431,16 +2431,20 @@
e: NAME%0A
+
@@ -2471,16 +2471,20 @@
+
# latitu
@@ -2494,16 +2494,20 @@
yy.yyy%0A
+
@@ -2533,32 +2533,36 @@
nt/City%0A
+
# data_height te
@@ -2572,16 +2572,20 @@
air: zz%0A
+
@@ -2618,16 +2618,20 @@
+
,temp_ai
@@ -2641,24 +2641,28 @@
_wind,.....%0A
+
2010
@@ -2703,32 +2703,36 @@
697,...%0A
+
2010-01-01 01:00
@@ -2758,24 +2758,28 @@
.46199,....%0A
+
....
|
e9727669db3d71e6cf1805306810ef296b390385
|
fix post_*allocation calls
|
src/clusto/drivers/base/resourcemanager.py
|
src/clusto/drivers/base/resourcemanager.py
|
import clusto
from clusto.schema import select, and_, ATTR_TABLE, Attribute, func, Counter
from clusto.drivers.base import Driver, ClustoMeta
from clusto.exceptions import ResourceTypeException, ResourceNotAvailableException, ResourceException
class ResourceManager(Driver):
"""The ResourceManager driver should be subclassed by a driver that will
manage a resource such as IP allocation, MAC Address lists, etc.
This base class just allocates unique integers.
Resources are attributes on Entities that are managed by a ResourceManger.
The implementation has the following properties:
1. The Entity being assigned the resource gets an attribute who's key is
defined by the resource manager, a number assigned by the resource manager
(sometimes related to the resource being allocated), and a value which is
a representation of the resource.
2. The Entity gets an additional attribute who's key, and number match the
allocated resource, but with subkey='manager', and value that is a
reference to the resource manager assigning the resource.
Any additional attributes with same attribute key and number are
considered part of the resource and can be managed by the resource
manager.
"""
_clusto_type = "resourcemanager"
_driver_name = "resourcemanager"
_attr_name = "resource"
_record_allocations = True
def allocator(self, thing=None):
"""return an unused resource from this resource manager"""
raise NotImplemented("No allocator implemented for %s you must explicitly specify a resource."
% self.name)
def ensure_type(self, resource, number=True, thing=None):
"""checks the type of a given resourece
if the resource is valid return it and optionally convert it to
another format. The format it returns has to be compatible with
attribute naming
"""
return (resource, number)
def get_resource_number(self, thing, resource):
"""Get the number for a resource on a given entity."""
resource, number = self.ensure_type(resource, thing=thing)
attrs = thing.attrs(self._attr_name, value=resource)
if attrs:
return attrs[0].number
else:
raise ResourceException("%s isn't assigned resource %s"
% (thing.name, str(resource)))
def get_resource_attr_values(self, thing, resource, key, number=True):
"""Get the value for the attrs on the resource assigned to a given entity matching the given key."""
return [x.value for x in self.get_resource_attrs(thing, resource,
key, number)]
def get_resource_attrs(self, thing, resource, key=(), number=True):
"""Get the Attributes for the attrs on the resource assigned to a given enttiy matching the given key."""
resource, number = self.ensure_type(resource, number, thing=thing)
return thing.attrs(self._attr_name, number=number, subkey=key)
def add_resource_attr(self, thing, resource, key, value, number=True):
"""Add an Attribute for the resource assigned to a given entity setting the given key and value"""
resource, number = self.ensure_type(resource, number, thing=thing)
attr = thing.add_attr(self._attr_name, number=number, subkey=key, value=value)
return attr
def set_resource_attr(self, thing, resource, key, value, number=True):
"""Set an Attribute for the resource assigned to a given entity with the given key and value"""
resource, number = self.ensure_type(resource, number, thing=thing)
attr = thing.set_attr(self._attr_name, number=number, subkey=key, value=value)
return attr
def del_resource_attr(self, thing, resource, key, value=(), number=True):
"""Delete an Attribute for the resource assigned to a given entity matching the given key and value"""
resource, number = self.ensure_type(resource, number, thing=thing)
thing.del_attrs(self._attr_name, number=number, subkey=key, value=value)
def additional_attrs(self, thing, resource, number):
pass
def post_automatic_allocation(self, thing, resource, number):
pass
def post_allocation(self, thing, resource, number):
pass
def allocate(self, thing, resource=(), number=True, force=False):
"""allocates a resource element to the given thing.
resource - is passed as an argument it will be checked
before assignment.
refattr - the attribute name on the entity that will refer back
this resource manager.
returns the resource that was either passed in and processed
or generated.
"""
try:
clusto.begin_transaction()
if not isinstance(thing, Driver):
raise TypeError("thing is not of type Driver")
if resource is ():
# allocate a new resource
resource, number = self.allocator(thing)
auto_allocated = True
else:
auto_allocated = False
resource, number = self.ensure_type(resource, number, thing)
if not force and not self.available(resource, number, thing):
raise ResourceException("Requested resource is not available.")
if self._record_allocations:
if number == True:
c = Counter.get(ClustoMeta().entity, self._attr_name)
attr = thing.add_attr(self._attr_name,
resource,
number=c.value
)
c.next()
else:
attr = thing.add_attr(self._attr_name, resource, number=number)
clusto.flush()
a=thing.add_attr(self._attr_name,
self.entity,
number=attr.number,
subkey='manager',
)
clusto.flush()
self.additional_attrs(thing, resource, attr.number)
else:
attr = None
clusto.commit()
except Exception, x:
clusto.rollback_transaction()
raise x
if auto_allocated:
self.post_automatic_allocation(thing, resource, attr.number)
self.post_allocation(thing, resource, attr.number)
return attr #resource
def deallocate(self, thing, resource=(), number=True):
"""deallocates a resource from the given thing."""
clusto.begin_transaction()
try:
if resource is ():
for res in self.resources(thing):
thing.del_attrs(self._attr_name, number=res.number)
elif resource and not self.available(resource, number):
resource, number = self.ensure_type(resource, number)
res = thing.attrs(self._attr_name, self, subkey='manager', number=number)
for a in res:
thing.del_attrs(self._attr_name, number=a.number)
clusto.commit()
except Exception, x:
clusto.rollback_transaction()
raise x
def available(self, resource, number=True, thing=None):
"""return True if resource is available, False otherwise.
"""
resource, number = self.ensure_type(resource, number)
if self.owners(resource, number):
return False
return True
def owners(self, resource, number=True):
"""return a list of driver objects for the owners of a given resource.
"""
resource, number = self.ensure_type(resource, number)
return Driver.get_by_attr(self._attr_name, resource, number=number)
@classmethod
def get_resource_manager(cls, resource_attr):
"""Return the resource manager for a given resource_attr"""
thing = Driver(resource_attr.entity)
return thing.attr_value(key=resource_attr.key,
subkey='manager',
number=resource_attr.number)
@classmethod
def resources(cls, thing):
"""return a list of resources from the resource manager that is
associated with the given thing.
A resource is a resource attribute in a resource manager.
"""
attrs = [x for x in thing.attrs(cls._attr_name, subkey='manager')
if isinstance(Driver(x.value), cls)]
res = []
for attr in attrs:
t=thing.attrs(cls._attr_name, number=attr.number, subkey=None)
res.extend(t)
return res
@property
def count(self):
"""Return the number of resources used."""
return len(self.references(self._attr_name, self, subkey='manager'))
|
Python
| 0
|
@@ -6699,37 +6699,32 @@
hing, resource,
-attr.
number)%0A%0A
@@ -6754,37 +6754,32 @@
hing, resource,
-attr.
number)%0A
|
0da189464703837e212bff06c24cc6eb5b62eeea
|
Fix name of room
|
blackbelt/slack.py
|
blackbelt/slack.py
|
from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#sre'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
|
Python
| 0.999953
|
@@ -676,11 +676,19 @@
m='#
-sre
+engine-room
'):%0A
|
f2b6b31702126062d1f930b053cb0f2ba3e46c7c
|
add logging to file loading
|
smif/cli/parse_sector_model_config.py
|
smif/cli/parse_sector_model_config.py
|
# -*- coding: utf-8 -*-
"""Read and parse the config for sector models
"""
import os
from glob import glob
import fiona
from . parse_config import ConfigParser
class SectorModelReader(object):
"""Parses the configuration and input data for a sector model
Arguments
=========
model_name : str
The name of the model
model_path : str
The path to the python module file that contains an implementation
of SectorModel
model_classname : str
The name of the class that implements SectorModel
model_config_dir : str
The root path of model config/data to use
"""
def __init__(self, initial_config):
self.model_name = initial_config["model_name"]
self.model_path = initial_config["model_path"]
self.model_classname = initial_config["model_classname"]
self.model_config_dir = initial_config["model_config_dir"]
self.initial_conditions_paths = initial_config["initial_conditions"]
self.interventions_paths = initial_config["interventions"]
self.inputs = None
self.outputs = None
self.time_intervals = None
self.regions = None
self.initial_conditions = None
self.interventions = None
def load(self):
"""Load and check all config
"""
self.inputs = self._load_inputs()
self.outputs = self._load_outputs()
self.time_intervals = self._load_time_intervals()
self.regions = self._load_regions()
self.initial_conditions = self._load_initial_conditions()
self.interventions = self._load_interventions()
@property
def data(self):
"""Expose all loaded config data
"""
return {
"name": self.model_name,
"path": self.model_path,
"classname": self.model_classname,
"inputs": self.inputs,
"outputs": self.outputs,
"time_intervals": self.time_intervals,
"regions": self.regions,
"initial_conditions": self.initial_conditions,
"interventions": self.interventions
}
def _load_inputs(self):
"""Input spec is located in the ``data/<sectormodel>/inputs.yaml`` file
"""
path = os.path.join(self.model_config_dir, 'inputs.yaml')
if not os.path.exists(path):
msg = "inputs config file not found for {} model"
raise FileNotFoundError(msg.format(self.model_name))
return ConfigParser(path).data
def _load_outputs(self):
"""Output spec is located in ``data/<sectormodel>/output.yaml`` file
"""
path = os.path.join(self.model_config_dir, 'outputs.yaml')
if not os.path.exists(path):
msg = "outputs config file not found for {} model"
raise FileNotFoundError(msg.format(self.model_name))
return ConfigParser(path).data
def _load_initial_conditions(self):
"""Inital conditions are located in yaml files
specified in sector model blocks in the sos model config
"""
data = []
paths = self.initial_conditions_paths
if len(paths) == 0:
msg = "No inital_conditions config files provided for {} model"
raise FileNotFoundError(msg.format(self.model_name))
else:
for path in paths:
new_data = ConfigParser(path).data
data.extend(new_data)
return data
def _load_interventions(self):
"""Interventions are located in yaml files
specified in sector model blocks in the sos model config
"""
data = []
paths = self.interventions_paths
if len(paths) == 0:
msg = "No interventions config files provided for {} model"
raise FileNotFoundError(msg.format(self.model_name))
else:
for path in paths:
new_data = ConfigParser(path).data
data.extend(new_data)
return data
def _load_time_intervals(self):
"""Within-year time intervals are specified in ``data/<sectormodel>/time_intervals.yaml``
These specify the mapping of model timesteps to durations within a year
(assume modelling 365 days: no extra day in leap years, no leap seconds)
Each time interval must have
- start (period since beginning of year)
- end (period since beginning of year)
- id (label to use when passing between integration layer and sector model)
use ISO 8601[1]_ duration format to specify periods::
P[n]Y[n]M[n]DT[n]H[n]M[n]S
References
----------
.. [1] https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
path = os.path.join(self.model_config_dir, 'time_intervals.yaml')
if not os.path.exists(path):
msg = "time_intervals config file not found for {} model"
raise FileNotFoundError(msg.format(self.model_name))
return ConfigParser(path).data
def _load_regions(self):
"""Model regions are specified in ``data/<sectormodel>/regions.*``
The file format must be possible to parse with GDAL, and must contain
an attribute "name" to use as an identifier for the region.
"""
path = os.path.join(self.model_config_dir, 'regions.shp')
if not os.path.exists(path):
paths = glob("{}/regions.*".format(self.model_config_dir))
if len(paths) == 1:
path = paths[0]
else:
msg = "regions config file not found for {} model"
raise FileNotFoundError(msg.format(self.model_name))
with fiona.drivers():
with fiona.open(path) as src:
data = [f for f in src]
return data
|
Python
| 0.000001
|
@@ -68,16 +68,31 @@
els%0A%22%22%22%0A
+import logging%0A
import o
@@ -678,16 +678,66 @@
onfig):%0A
+ self.logger = logging.getLogger(__name__)%0A
@@ -3419,32 +3419,117 @@
path in paths:%0A
+ self.logger.debug(%22Loading initial conditions from %7B%7D%22.format(path))%0A
@@ -4046,32 +4046,112 @@
path in paths:%0A
+ self.logger.debug(%22Loading interventions from %7B%7D%22.format(path))%0A
|
eb3a332cf5aeb6b213c333cbfba78b26b776db49
|
fix facebook api
|
social_publisher/backends/facebook.py
|
social_publisher/backends/facebook.py
|
# -*- coding: utf-8 -*-
from social_publisher import facebook
from social_publisher.backends import base
class FacebookBackend(base.BaseBackend):
name = 'facebook'
auth_provider = 'facebook'
def get_api(self, social_user):
return facebook.GraphAPI(social_user.extra_data.get('access_token'))
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
image = kwargs.get('image')
if image:
res = self.get_api(social_user).post(
'{}/photos'.format(owner_id), image=image)
kwargs['object_attachment'] = res['id']
return self.get_api(social_user).post(
'{}/feed'.format(owner_id),
params=kwargs
)
return _post
class FacebookPostImageBackend(FacebookBackend):
name = 'facebook_post_image'
auth_provider = 'facebook'
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file>
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
return self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params=kwargs
)
return _post
|
Python
| 0.000014
|
@@ -736,20 +736,52 @@
id),
-
+%0A params=%7B'
image
-=
+':
image
+%7D
)%0A
|
07c8888a3623ea40c4f2047e11445726e61e2438
|
Fix lint.
|
packs/csv/tests/test_action_parse.py
|
packs/csv/tests/test_action_parse.py
|
import unittest2
from parse_csv import ParseCSVAction
__all__ = [
'ParseCSVActionTestCase'
]
MOCK_DATA = """
first,last,year
name1,surename1,1990
""".strip()
class ParseCSVActionTestCase(unittest2.TestCase):
def test_run(self):
result = ParseCSVAction().run(data=MOCK_DATA, delimiter=',')
expected = [
['first', 'last', 'year'],
['name1', 'surename1', '1990']
]
self.assertEqual(result, expected)
|
Python
| 0.000001
|
@@ -159,16 +159,17 @@
trip()%0A%0A
+%0A
class Pa
|
8d288053574753fcd2bc5d163b7035f2bfbd9f8e
|
Make categorical projection code simpler
|
chainerrl/agents/categorical_dqn.py
|
chainerrl/agents/categorical_dqn.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import chainer
import chainer.functions as F
import numpy as np
from chainerrl.agents import dqn
def _apply_categorical_projection(y, y_probs, z):
"""Apply categorical projection.
See Algorithm 1 in https://arxiv.org/abs/1707.06887.
Args:
y (ndarray): Values of atoms before projection. Its shape must be
(batch_size, n_atoms).
y_probs (ndarray): Probabilities of atoms whose values are y.
Its shape must be (batch_size, n_atoms).
z (ndarray): Values of atoms before projection. Its shape must be
(n_atoms,). It is assumed that the values are sorted in ascending
order and evenly spaced.
Returns:
ndarray: Probabilities of atoms whose values are z.
"""
batch_size, n_atoms = y.shape
assert z.shape == (n_atoms,)
assert y_probs.shape == (batch_size, n_atoms)
delta_z = z[1] - z[0]
v_min = z[0]
v_max = z[-1]
xp = chainer.cuda.get_array_module(z)
y = xp.clip(y, v_min, v_max)
# bj: (batch_size, n_atoms)
bj = (y - v_min) / delta_z
assert bj.shape == (batch_size, n_atoms)
# l, u: (batch_size, n_atoms)
l, u = xp.floor(bj), xp.ceil(bj)
assert l.shape == (batch_size, n_atoms)
assert u.shape == (batch_size, n_atoms)
if chainer.cuda.available and xp is chainer.cuda.cupy:
scatter_add = xp.scatter_add
else:
scatter_add = np.add.at
z_probs = xp.zeros((batch_size, n_atoms), dtype=xp.float32)
offset = xp.arange(
0, batch_size * n_atoms, n_atoms, dtype=xp.int32)[..., None]
# Accumulate m_l
scatter_add(
z_probs.ravel(),
(l.astype(xp.int32) + offset).ravel(),
(y_probs * (u - bj)).ravel())
# Accumulate m_u
scatter_add(
z_probs.ravel(),
(u.astype(xp.int32) + offset).ravel(),
(y_probs * (bj - l)).ravel())
# Deal with the case when bj is an integer, i.e., l = u = bj
scatter_add(
z_probs.ravel(),
(u.astype(xp.int32) + offset).ravel(),
(y_probs * (u == l)).ravel())
return z_probs
class CategoricalDQN(dqn.DQN):
"""Categorical DQN.
See https://arxiv.org/abs/1707.06887.
Arguments are the same as those of DQN except q_function must return
DistributionalDiscreteActionValue and clip_delta is ignored.
"""
def _compute_target_values(self, exp_batch, gamma):
"""Compute a batch of target return distributions."""
batch_next_state = exp_batch['next_state']
target_next_qout = self.target_model(batch_next_state)
batch_rewards = exp_batch['reward']
batch_terminal = exp_batch['is_state_terminal']
batch_size = exp_batch['reward'].shape[0]
z_values = target_next_qout.z_values
n_atoms = z_values.size
# next_q_max: (batch_size, n_atoms)
next_q_max = target_next_qout.max_as_distribution.data
assert next_q_max.shape == (batch_size, n_atoms), next_q_max.shape
# Tz: (batch_size, n_atoms)
Tz = (batch_rewards[..., None]
+ (1.0 - batch_terminal[..., None]) * gamma * z_values[None])
return _apply_categorical_projection(Tz, next_q_max, z_values)
def _compute_y_and_t(self, exp_batch, gamma):
"""Compute a batch of predicted/target return distributions."""
batch_size = exp_batch['reward'].shape[0]
# Compute Q-values for current states
batch_state = exp_batch['state']
# (batch_size, n_actions, n_atoms)
qout = self.model(batch_state)
n_atoms = qout.z_values.size
batch_actions = exp_batch['action']
batch_q = qout.evaluate_actions_as_distribution(batch_actions)
assert batch_q.shape == (batch_size, n_atoms)
with chainer.no_backprop_mode():
batch_q_target = self._compute_target_values(exp_batch, gamma)
assert batch_q_target.shape == (batch_size, n_atoms)
return batch_q, batch_q_target
def _compute_loss(self, exp_batch, gamma, errors_out=None):
"""Compute a loss of categorical DQN."""
y, t = self._compute_y_and_t(exp_batch, gamma)
# minimize the cross entropy
eltwise_loss = -t * F.log(F.clip(y, 1e-10, 1.))
if self.batch_accumulator == 'sum':
loss = F.sum(eltwise_loss)
else:
loss = F.mean(F.sum(eltwise_loss, axis=1))
return loss
|
Python
| 0.031496
|
@@ -1813,16 +1813,159 @@
ate m_l%0A
+ # Note that u - bj in the original paper is replaced with 1 - (bj - l) to%0A # deal with the case when bj is an integer, i.e., l = u = bj%0A
scat
@@ -1965,32 +1965,32 @@
scatter_add(%0A
-
z_probs.
@@ -2065,22 +2065,28 @@
robs * (
-u
+1
-
+(
bj
+ - l)
)).ravel
@@ -2191,32 +2191,32 @@
ffset).ravel(),%0A
+
(y_probs
@@ -2241,200 +2241,8 @@
())%0A
- # Deal with the case when bj is an integer, i.e., l = u = bj%0A scatter_add(%0A z_probs.ravel(),%0A (u.astype(xp.int32) + offset).ravel(),%0A (y_probs * (u == l)).ravel())%0A
|
7c75a9c01aec6427bef573e69605087e7b30ff33
|
test cases for createview
|
parcellate/apps/winparcel/tests.py
|
parcellate/apps/winparcel/tests.py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from .models import (RSSObject,
RSSEntry)
from .lib import ReadRSS
class SimpleTest(TestCase):
test_data = dict(
url = 'http://feeds.feedburner.com/seriouseats',
title = 'Serious Eats'
)
testvals = {'title': 'Test Test Test',
'url': 'http://www.google.com',
'summary': 'This is a test save',
'author': 'Viv',
'uri': 'http://vivyly.github.io',
'content': '<div class="blah">TESTING</div>'
}
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def create_rss(self):
rss = RSSObject()
for key, val in self.test_data.iteritems():
setattr(rss, key, val)
rss.save()
return rss
def test_add_rss(self):
rss_obj = self.create_rss()
for key, val in self.test_data.iteritems():
self.assertEqual(getattr(rss_obj, key), val)
def test_add_rss_entry(self):
rss_obj = self.create_rss()
rss_entry = RSSEntry()
for key, val in self.testvals.iteritems():
setattr(rss_entry, key, val)
rss_entry.rssatom = rss_obj
rss_entry.save()
for key, val in self.test_data.iteritems():
self.assertEqual(getattr(rss_obj, key), val)
for key, val in self.testvals.iteritems():
self.assertEqual(getattr(rss_entry, key), val)
def test_add_rss_entry_lib(self):
rss_obj = self.create_rss()
read_rss = ReadRSS(rss=rss_obj)
created = read_rss.save_entries()
self.assertEqual(created, 15)
|
Python
| 0
|
@@ -1,309 +1,1326 @@
-%22%22%22%0AThis file demonstrates writing tests using the unittest module. These will pass%0Awhen you run %22manage.py test%22.%0A%0AReplace this with more appropriate tests for your application.%0A%22%22%22%0A%0Afrom django.test import TestCase%0A%0Afrom .models import (RSSObject,%0A RSSEntry)%0Afrom .lib import ReadRSS
+from django.test import TestCase%0Afrom django.test.client import (Client,%0A RequestFactory)%0A%0Afrom .models import (RSSObject,%0A RSSEntry)%0Afrom .lib import ReadRSS%0Afrom .views import RSSObjectCreateView%0A%0Aclass RSSObjectAddViewTests(TestCase):%0A %22%22%22 RSS Object Add View tests.%22%22%22%0A def test_add_rss_in_the_context(self):%0A client = Client()%0A response = client.get('/rss/add')%0A self.assertEquals(%0A list(response.context.get('object_list')),%5B%5D)%0A RSSObject.objects.create(title='Serious Eats',%0A url='http://feeds.feedburner.com/seriouseats')%0A response = client.get('/rss/add')%0A self.assertEquals(response.context.get('object_list').count(), 1)%0A%0A def test_add_rss_in_the_context_request_factory(self):%0A factory = RequestFactory()%0A request = factory.get('/')%0A response = RSSObjectCreateView.as_view()(request)%0A self.assertEquals(%0A list(response.context_data.get('object_list')),%5B%5D)%0A RSSObject.objects.create(title='Serious Eats',%0A url='http://feeds.feedburner.com/seriouseats')%0A response = RSSObjectCreateView.as_view()(request)%0A self.assertEquals(%0A response.context_data.get('object_list').count(), 1)
%0A%0A%0Ac
|
c68792c50f91445ed733c5e5ed0c226a04b1e173
|
Use chromium snapshots for Linux_64 and Mac.
|
chrome/test/chromedriver/archive.py
|
chrome/test/chromedriver/archive.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads items from the Chromium continuous archive."""
import os
import platform
import urllib
import util
CHROME_34_REVISION = '251854'
CHROME_35_REVISION = '260135'
CHROME_36_REVISION = '269696'
_SITE = 'http://commondatastorage.googleapis.com'
class Site(object):
CONTINUOUS = _SITE + '/chromium-browser-continuous'
CHROMIUM_SNAPSHOT = _SITE + '/chromium-browser-snapshots'
BLINK_SNAPSHOT = _SITE + '/chromium-webkit-snapshots'
def GetLatestRevision(site=Site.CONTINUOUS):
"""Returns the latest revision (as a string) available for this platform.
Args:
site: the archive site to check against, default to the continuous one.
"""
url = site + '/%s/LAST_CHANGE'
return urllib.urlopen(url % _GetDownloadPlatform()).read()
def DownloadChrome(revision, dest_dir, site=Site.CONTINUOUS):
"""Downloads the packaged Chrome from the archive to the given directory.
Args:
revision: the revision of Chrome to download.
dest_dir: the directory to download Chrome to.
site: the archive site to download from, default to the continuous one.
Returns:
The path to the unzipped Chrome binary.
"""
def GetZipName():
if util.IsWindows():
return 'chrome-win32'
elif util.IsMac():
return 'chrome-mac'
elif util.IsLinux():
return 'chrome-linux'
def GetChromePathFromPackage():
if util.IsWindows():
return 'chrome.exe'
elif util.IsMac():
return 'Chromium.app/Contents/MacOS/Chromium'
elif util.IsLinux():
return 'chrome'
zip_path = os.path.join(dest_dir, 'chrome-%s.zip' % revision)
if not os.path.exists(zip_path):
url = site + '/%s/%s/%s.zip' % (_GetDownloadPlatform(), revision,
GetZipName())
print 'Downloading', url, '...'
urllib.urlretrieve(url, zip_path)
util.Unzip(zip_path, dest_dir)
return os.path.join(dest_dir, GetZipName(), GetChromePathFromPackage())
def _GetDownloadPlatform():
"""Returns the name for this platform on the archive site."""
if util.IsWindows():
return 'Win'
elif util.IsMac():
return 'Mac'
elif util.IsLinux():
if platform.architecture()[0] == '64bit':
return 'Linux_x64'
else:
return 'Linux'
def GetLatestSnapshotVersion():
"""Returns the latest revision of snapshot build."""
return GetLatestRevision(GetSnapshotDownloadSite())
def GetSnapshotDownloadSite():
"""Returns the site to download snapshot build according to the platform.
For Linux 32-bit, it is chromium snapshot build.
For other platform, it is blink snapshot build.
Because there is no linux32 blink snapshot build.
"""
if _GetDownloadPlatform() == 'Linux':
return Site.CHROMIUM_SNAPSHOT
else:
return Site.BLINK_SNAPSHOT
|
Python
| 0.000001
|
@@ -2825,18 +2825,40 @@
m()
-==
+in (
'Linux'
+, 'Linux_x64', 'Mac')
:%0A
|
4fba6f7473014c11558864d9425a790adf45baa5
|
add midnight race condition note per Chris Petrilli
|
parsedatetime/tests/TestPhrases.py
|
parsedatetime/tests/TestPhrases.py
|
"""
Test parsing of strings that are phrases
"""
import unittest, time, datetime
import parsedatetime as pdt
# a special compare function is used to allow us to ignore the seconds as
# the running of the test could cross a minute boundary
def _compareResults(result, check, dateOnly=False, debug=False):
target, t_flag = result
value, v_flag = check
t_yr, t_mth, t_dy, t_hr, t_min, _, _, _, _ = target
v_yr, v_mth, v_dy, v_hr, v_min, _, _, _, _ = value
if dateOnly:
return ((t_yr == v_yr) and (t_mth == v_mth) and (t_dy == v_dy)) and (t_flag == v_flag)
else:
return ((t_yr == v_yr) and (t_mth == v_mth) and (t_dy == v_dy) and
(t_hr == v_hr) and (t_min == v_min)) and (t_flag == v_flag)
class test(unittest.TestCase):
def setUp(self):
self.cal = pdt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testPhrases(self):
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(self.yr, self.mth, self.dy, 16, 0, 0).timetuple()
self.assertTrue(_compareResults(self.cal.parse('flight from SFO at 4pm', start), (target, 2)))
target = datetime.datetime(self.yr, self.mth, self.dy, 17, 0, 0).timetuple()
self.assertTrue(_compareResults(self.cal.parse('eod', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('meeting eod', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('eod meeting', start), (target, 2)))
target = datetime.datetime(self.yr, self.mth, self.dy, 17, 0, 0) + datetime.timedelta(days=1)
target = target.timetuple()
self.assertTrue(_compareResults(self.cal.parse('tomorrow eod', start), (target, 3)))
self.assertTrue(_compareResults(self.cal.parse('eod tomorrow', start), (target, 3)))
def testPhraseWithDays_DOWStyle_1_False(self):
s = datetime.datetime.now()
# find out what day we are currently on
# and determine what the next day of week is
t = s + datetime.timedelta(days=1)
start = s.timetuple()
(yr, mth, dy, _, _, _, wd, yd, isdst) = t.timetuple()
target = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
d = self.wd + 1
if d > 6:
d = 0
day = self.cal.ptc.Weekdays[d]
self.assertTrue(_compareResults(self.cal.parse('eod %s' % day, start), (target, 3)))
# find out what day we are currently on
# and determine what the previous day of week is
t = s + datetime.timedelta(days=6)
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = t.timetuple()
target = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
d = self.wd - 1
if d < 0:
d = 6
day = self.cal.ptc.Weekdays[d]
self.assertTrue(_compareResults(self.cal.parse('eod %s' % day, start), (target, 3)))
def testEndOfPhrases(self):
s = datetime.datetime.now()
# find out what month we are currently on
# set the day to 1 and then go back a day
# to get the end of the current month
(yr, mth, _, hr, mn, sec, _, _, _) = s.timetuple()
mth += 1
if mth > 12:
mth = 1
yr += 1
t = datetime.datetime(yr, mth, 1, 9, 0, 0) + datetime.timedelta(days=-1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('eom', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('meeting eom', start), (target, 2)))
s = datetime.datetime.now()
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = s.timetuple()
t = datetime.datetime(yr, 12, 31, 9, 0, 0)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('eoy', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('meeting eoy', start), (target, 2)))
def testLastPhrases(self):
for day in (11, 12, 13, 14, 15, 16, 17):
start = datetime.datetime(2012, 11, day, 9, 0, 0)
(yr, mth, dy, _, _, _, wd, yd, isdst) = start.timetuple()
n = 4 - wd
if n >= 0:
n -= 7
target = start + datetime.timedelta(days=n)
#print '*********', start, target, n, self.cal.parse('last friday', start.timetuple())
self.assertTrue(_compareResults(self.cal.parse('last friday', start.timetuple()), (target.timetuple(), 1), dateOnly=True))
|
Python
| 0
|
@@ -966,32 +966,437 @@
tPhrases(self):%0A
+ #%0A # NOTE - this test will fail under certain conditions%0A # It is building an absolute date for comparison and then testing%0A # the parsing of relative phrases and as such will fail if run%0A # near the midnight transition.%0A # Thanks to Chris Petrilli for asking about it and prompting me%0A # to create this note!%0A #%0A
start =
@@ -5091,8 +5091,9 @@
y=True))
+%0A
|
9c898d7e547b13bb289c0d1cada0bbd4078803dc
|
Allow passing of size_cutoff to preassembler methods.
|
indra/db/pre_assemble_script.py
|
indra/db/pre_assemble_script.py
|
import indra.tools.assemble_corpus as ac
from indra.db.util import get_statements, insert_pa_stmts
from indra.preassembler import Preassembler
from indra.preassembler.hierarchy_manager import hierarchies
def make_unique_statement_set(preassembler, stmts):
stmt_groups = preassembler.get_stmt_matching_groups(stmts)
unique_stmts = []
for _, duplicates in stmt_groups:
# Get the first statement and add the evidence of all subsequent
# Statements to it
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
first_stmt = stmt.get_new_copy()
first_stmt.evidence.append(stmt.uuid)
# This should never be None or anything else
assert isinstance(first_stmt, type(stmt))
unique_stmts.append(first_stmt)
return unique_stmts
def get_match_key_maps(preassembler, unique_stmts, num_procs=1):
id_maps = preassembler.generate_id_maps(unique_stmts, num_procs)
return [[unique_stmts[idx].matches_key() for idx in idx_pair]
for idx_pair in id_maps]
def process_statements(stmts, num_procs=1):
stmts = ac.map_grounding(stmts)
stmts = ac.map_sequence(stmts)
pa = Preassembler(hierarchies)
unique_stmts = make_unique_statement_set(pa, stmts)
match_key_maps = get_match_key_maps(pa, unique_stmts, num_procs)
return unique_stmts, match_key_maps
def preassemble_db_stmts(db, num_procs, *clauses):
"""Run pre-assembly on a set of statements in the database."""
stmts = get_statements(clauses, db=db, do_stmt_count=False)
pa_stmts = process_statements(stmts, num_procs)
insert_pa_stmts(db, pa_stmts)
return pa_stmts
|
Python
| 0
|
@@ -870,35 +870,48 @@
ique_stmts,
-num_procs=1
+**generate_id_map_kwargs
):%0A id_ma
@@ -954,33 +954,92 @@
nique_stmts,
- num_proc
+%0A **generate_id_map_kwarg
s)%0A retur
@@ -1040,17 +1040,23 @@
return
-%5B
+%7Btuple(
%5Bunique_
@@ -1100,16 +1100,17 @@
dx_pair%5D
+)
%0A
@@ -1137,17 +1137,17 @@
id_maps
-%5D
+%7D
%0A%0A%0Adef p
@@ -1175,19 +1175,32 @@
ts,
-num_procs=1
+**generate_id_map_kwargs
):%0A
@@ -1413,33 +1413,88 @@
nique_stmts,
- num_proc
+%0A **generate_id_map_kwarg
s)%0A retur
@@ -1563,17 +1563,16 @@
num_proc
-s
, *claus
@@ -1711,23 +1711,43 @@
se)%0A
-pa_stmt
+unique_stmts, match_key_map
s = proc
@@ -1768,24 +1768,33 @@
(stmts,
+poolsize=
num_proc
s)%0A i
@@ -1785,17 +1785,16 @@
num_proc
-s
)%0A in
@@ -1811,18 +1811,22 @@
mts(db,
-pa
+unique
_stmts)%0A
@@ -1836,17 +1836,37 @@
return
-pa_stmt
+unique_stmts, match_key_map
s%0A
|
4df8aafb1d4ab12ad795b30f1f75937072216f1b
|
Implement proper event detection, lots of debugging code
|
hdltools/vcd/event.py
|
hdltools/vcd/event.py
|
"""VCD Event tracker."""
from typing import Tuple, Dict
from hdltools.vcd.parser import BaseVCDParser, VCDParserError
from hdltools.vcd.trigger import VCDTriggerDescriptor
from hdltools.vcd.mixins.conditions import VCDConditionMixin
from hdltools.vcd.mixins.time import VCDTimeRestrictionMixin
from hdltools.vcd.trigger.condtable import ConditionTableTrigger
# an event is a VCDTriggerDescriptor
class VCDEventTracker(
BaseVCDParser, VCDConditionMixin, VCDTimeRestrictionMixin
):
"""Event tracker."""
def __init__(
self, events: Dict[str, Tuple[VCDTriggerDescriptor]], **kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self._events = events
self._evt_triggers = {
evt_name: [
ConditionTableTrigger(conditions=conds, evt_name=evt_name),
None,
]
for evt_name, conds in events.items()
}
# arm immediately
for trigger, _ in self._evt_triggers.values():
trigger.trigger_callback = self._evt_trigger_callback
trigger.arm_trigger()
def _evt_trigger_callback(self, trigger_fsm):
"""Event trigger callback."""
# update last triggered time
self._evt_triggers[trigger_fsm.evt_name][1] = self.current_time
print(f"DEBUG: {self.current_time}: evt fired: {trigger_fsm.evt_name}")
def _state_change_handler(self, old_state, new_state):
"""Detect state transition."""
super()._state_change_handler(old_state, new_state)
# when header state finishes, we have list of variables
if old_state == "header":
# add VCD variable identifiers to condition table elements
for _, (condtable, _) in self._evt_triggers.items():
for cond in condtable.conditions:
# post-process now
candidates = self.variable_search(
cond.name, cond.scope, True
)
if not candidates:
raise RuntimeError("cannot locate VCD variable")
# associate with first candidate
cond.vcd_var = list(candidates)[0].identifiers[0]
print("DEBUG: header parsing completed")
def clock_change_handler(self, time):
"""Handle time."""
for condtable, _ in self._evt_triggers.values():
# re-arm
if condtable.trigger_armed is False:
condtable.arm_trigger()
def value_change_handler(self, stmt, fields):
"""Handle value change."""
if self.time_valid is False or self.waiting_precondition:
return
# feed event triggers
var = self.variables[fields["var"]]
for trigger, _ in self._evt_triggers.values():
trigger.match_and_advance(var, fields["value"])
|
Python
| 0.000001
|
@@ -51,16 +51,54 @@
e, Dict%0A
+from colorama import Fore, Back, init%0A
from hdl
@@ -151,16 +151,16 @@
erError%0A
-
from hdl
@@ -393,16 +393,38 @@
rigger%0A%0A
+init(autoreset=True)%0A%0A
# an eve
@@ -1375,16 +1375,52 @@
print(
+%0A Back.RED%0A +
f%22DEBUG:
@@ -1422,31 +1422,34 @@
EBUG: %7Bself.
-current
+last_cycle
_time%7D: evt
@@ -1478,16 +1478,25 @@
t_name%7D%22
+%0A
)%0A%0A d
@@ -2454,24 +2454,65 @@
le time.%22%22%22%0A
+ if time == 0:%0A return%0A
for
@@ -2657,32 +2657,1362 @@
le.arm_trigger()
+%0A for condtable, _ in self._evt_triggers.values():%0A # update consolidated values%0A changed = %5B%5D%0A for cond in condtable.conditions:%0A # pick variables directly for speed%0A var = self.variables%5Bcond.vcd_var%5D%0A if var.last_changed == self.last_cycle_time:%0A _changed, state = condtable.advance(cond, var.value)%0A if _changed:%0A changed.append((cond, state))%0A%0A if changed:%0A print(%0A Fore.CYAN%0A + f%22DEBUG: @%7Btime%7D: table %7Bcondtable.triggerid%7D changes:%22%0A )%0A for cond, state in changed:%0A msg_color = Fore.RED if state is False else Fore.GREEN%0A print(msg_color + f%22DEBUG: cond %7Bcond%7D -%3E %7Bstate%7D%22)%0A%0A # check and fire trigger%0A condtable.check_and_fire()%0A # for var in self.variables.values():%0A # # print(var.value)%0A # if var.last_changed == self.last_cycle_time:%0A # condtable.match_and_advance(var, var.value)%0A%0A def initial_value_handler(self, stmt, fields):%0A %22%22%22Handle initial value assignment.%22%22%22%0A var = self.variables%5Bfields%5B%22var%22%5D%5D%0A var.value = fields%5B%22value%22%5D
%0A%0A def value_
@@ -4167,17 +4167,16 @@
return%0A
-%0A
@@ -4181,27 +4181,35 @@
#
-feed event triggers
+update local variable value
%0A
@@ -4248,32 +4248,115 @@
%5B%22var%22%5D%5D%0A
+ var.value = fields%5B%22value%22%5D%0A var.last_changed = self.current_time%0A #
for trigger, _
@@ -4386,32 +4386,34 @@
alues():%0A
+ #
trigger.mat
|
37fd4ea15564d6a3fb8a2486c17d30984a6675d5
|
Use more generic variable
|
phileo/templatetags/phileo_tags.py
|
phileo/templatetags/phileo_tags.py
|
from django import template
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
from phileo.models import Like
from phileo.utils import _allowed, widget_context
from phileo.settings import LIKABLE_MODELS
register = template.Library()
@register.assignment_tag
def who_likes(athlete):
return Like.objects.filter(
receiver_content_type=ContentType.objects.get_for_model(athlete),
receiver_object_id=athlete.pk
)
class LikesNode(template.Node):
def __init__(self, user, model_list, varname):
self.user = template.Variable(user)
# Default to all the registered models
if len(model_list) == 0:
# These need to look like strings, otherwise they will be treated as variables
# when they are `resolve()`d later
model_list = ['"{}"'.format(model) for model in LIKABLE_MODELS]
self.model_list = [template.Variable(m) for m in model_list]
self.varname = varname
def render(self, context):
user = self.user.resolve(context)
content_types = []
for raw_model_name in self.model_list:
try:
model_name = raw_model_name.resolve(context)
except template.VariableDoesNotExist:
continue
if not _allowed(model_name):
continue
app, model = model_name.split(".")
content_type = ContentType.objects.get(app_label=app, model__iexact=model)
content_types.append(content_type)
context[self.varname] = Like.objects.filter(
sender=user,
receiver_content_type__in=content_types
)
return ""
@register.tag
def likes(parser, token):
"""
{% likes user "app.Model" "app.Model" "app.Model" as like_objs %}
"""
tokens = token.split_contents()
user = tokens[1]
varname = tokens[-1]
model_list = tokens[2:-2]
return LikesNode(user, model_list, varname)
class LikeRenderer(template.Node):
def __init__(self, varname):
self.varname = template.Variable(varname)
def render(self, context):
like = self.varname.resolve(context)
instance = like.receiver
content_type = like.receiver_content_type
app_name = content_type.app_label
model_name = content_type.model.lower()
like_context = {
'instance': instance,
'like': like,
}
return render_to_string([
'phileo/{0}/{1}.html'.format(app_name, model_name),
'phileo/{0}/like.html'.format(app_name),
'phileo/_like.html',
], like_context, context)
@register.tag
def render_like(parser, token):
"""
{% likes user as like_list %}
<ul>
{% for like in like_list %}
<li>{% render_like like %}</li>
{% endfor %}
</ul>
"""
tokens = token.split_contents()
var = tokens[1]
return LikeRenderer(var)
@register.filter
def likes_count(obj):
"""
Something like:
<div class="likes_count">{{ obj|likes_count }}</div>
will render:
<div class="likes_count">34</div>
"""
return Like.objects.filter(
receiver_content_type=ContentType.objects.get_for_model(obj),
receiver_object_id=obj.pk
).count()
@register.inclusion_tag("phileo/_widget.html")
def phileo_widget(user, obj):
return widget_context(user, obj)
@register.inclusion_tag("phileo/_widget_brief.html")
def phileo_widget_brief(user, obj):
return widget_context(user, obj)
class ObjectDecorator(object):
def __init__(self, user, objects):
self.user = user
self._objects = objects
self._is_stream = None
def is_stream(self):
if self._is_stream is None and len(self._objects) > 0:
self._is_stream = not hasattr(self._objects[0], "_meta")
return self._is_stream
def get_id(self, obj):
return self.is_stream() and obj.item.id or obj.id
@property
def indexed(self):
if not hasattr(self, "_indexed"):
self._indexed = {}
for obj in self._objects:
if hasattr(obj, "cast") and callable(obj.cast):
obj = obj.cast()
ct = ContentType.objects.get_for_model(self.is_stream() and obj.item or obj)
if ct not in self._indexed.keys():
self._indexed[ct] = []
obj.liked = False
self._indexed[ct].append(obj)
return self._indexed
def objects(self):
for ct in self.indexed.keys():
likes = Like.objects.filter(
sender=self.user,
receiver_content_type=ct,
receiver_object_id__in=[self.get_id(o) for o in self.indexed[ct]]
)
for obj in self.indexed[ct]:
for like in likes:
if like.receiver_object_id == self.get_id(obj):
obj.liked = True
yield obj
class LikedObjectsNode(template.Node):
def __init__(self, objects, user, varname):
self.objects = template.Variable(objects)
self.user = template.Variable(user)
self.varname = varname
def render(self, context):
user = self.user.resolve(context)
objects = self.objects.resolve(context)
context[self.varname] = ObjectDecorator(user, objects).objects()
return ""
@register.tag
def liked(parser, token):
"""
{% liked objects by user as varname %}
"""
tag, objects, _, user, _, varname = token.split_contents()
return LikedObjectsNode(objects, user, varname)
|
Python
| 0.000001
|
@@ -330,23 +330,19 @@
o_likes(
-athlete
+obj
):%0A r
@@ -432,23 +432,19 @@
r_model(
-athlete
+obj
),%0A
@@ -469,15 +469,11 @@
_id=
-athlete
+obj
.pk%0A
|
13c070948b5f8e22d853a85b6b711b0ead2b3ac9
|
Fix pep8 issues in profile_roles (#25458)
|
lib/ansible/plugins/callback/profile_roles.py
|
lib/ansible/plugins/callback/profile_roles.py
|
# (C) 2017, Tennis Smith, http://github.com/gamename
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# File is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See <http://www.gnu.org/licenses/> for a copy of the
# GNU General Public License
#
# This will track the use of each role during the life of a playbook's
# execution. The total time spent in each role will be printed at the
# end.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import time
from ansible.plugins.callback import CallbackBase
from ansible.module_utils.six import reduce
# define start time
t0 = tn = time.time()
def secondsToStr(t):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(
reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp(self):
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
self.totals[self.current] += self.stats[self.current]
def tasktime():
global tn
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
tn = time.time()
return filled('%s (%s)%s%s' %
(time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
"""
This callback module provides profiling for ansible roles.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'profile_roles'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.stats = collections.Counter()
self.totals = collections.Counter()
self.current = None
super(CallbackModule, self).__init__()
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
timestamp(self)
if task._role:
self.current = task._role._role_name
else:
self.current = task.action
self.stats[self.current] = time.time()
def v2_playbook_on_task_start(self, task, is_conditional):
self._record_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display.display(filled("", fchar="="))
timestamp(self)
total_time = sum(self.totals.values())
# Print the timings starting with the largest one
for result in self.totals.most_common():
msg = u"{0:-<70}{1:->9}".format(result[0] + u' ',
u' {0:.02f}s'.format(result[1]))
self._display.display(msg)
msg_total = u"{0:-<70}{1:->9}".format(u'total ',
u' {0:.02f}s'.format(total_time))
self._display.display(filled("", fchar="~"))
self._display.display(msg_total)
|
Python
| 0
|
@@ -1001,16 +1001,22 @@
tils.six
+.moves
import
@@ -1210,29 +1210,40 @@
+def
rediv
- = lambda ll, b:
+(ll, b):%0A return
lis
@@ -1271,16 +1271,17 @@
ll%5B1:%5D%0A
+%0A
retu
@@ -3879,9 +3879,8 @@
_total)%0A
-%0A
|
c3ea0ebccd1f40f38cb9ebcc724986f65e8c7a4a
|
use samehost instead of the host's ip address.
|
helpers/postgresql.py
|
helpers/postgresql.py
|
import os, psycopg2, re, time
import logging
from urlparse import urlparse
logger = logging.getLogger(__name__)
class Postgresql:
def __init__(self, config):
self.name = config["name"]
self.host, self.port = config["listen"].split(":")
self.data_dir = config["data_dir"]
self.replication = config["replication"]
self.config = config
self.cursor_holder = None
self.connection_string = "postgres://%s:%s@%s:%s/postgres" % (self.replication["username"], self.replication["password"], self.host, self.port)
self.conn = None
def cursor(self):
if not self.cursor_holder:
self.conn = psycopg2.connect("postgres://%s:%s/postgres" % (self.host, self.port))
self.conn.autocommit = True
self.cursor_holder = self.conn.cursor()
return self.cursor_holder
def disconnect(self):
try:
self.conn.close()
except Exception as e:
logger.error("Error disconnecting: %s" % e)
def query(self, sql):
max_attempts = 0
while True:
try:
self.cursor().execute(sql)
break
except psycopg2.OperationalError as e:
if self.conn:
self.disconnect()
self.cursor_holder = None
if max_attempts > 4:
raise e
max_attempts += 1
time.sleep(5)
return self.cursor()
def data_directory_empty(self):
return not os.path.exists(self.data_dir) or os.listdir(self.data_dir) == []
def initialize(self):
if os.system("initdb -D %s" % self.data_dir) == 0:
self.write_pg_hba()
return True
return False
def sync_from_leader(self, leader):
leader = urlparse(leader["address"])
f = open("./pgpass", "w")
f.write("%(hostname)s:%(port)s:*:%(username)s:%(password)s\n" %
{"hostname": leader.hostname, "port": leader.port, "username": leader.username, "password": leader.password})
f.close()
os.system("chmod 600 pgpass")
return os.system("PGPASSFILE=pgpass pg_basebackup -R -D %(data_dir)s --host=%(host)s --port=%(port)s -U %(username)s" %
{"data_dir": self.data_dir, "host": leader.hostname, "port": leader.port, "username": leader.username}) == 0
def is_leader(self):
return not self.query("SELECT pg_is_in_recovery();").fetchone()[0]
def is_running(self):
return os.system("pg_ctl status -D %s > /dev/null" % self.data_dir) == 0
def start(self):
if self.is_running():
logger.error("Cannot start PostgreSQL because one is already running.")
return False
pid_path = "%s/postmaster.pid" % self.data_dir
if os.path.exists(pid_path):
os.remove(pid_path)
logger.info("Removed %s" % pid_path)
command_code = os.system("postgres -D %s %s &" % (self.data_dir, self.server_options()))
time.sleep(5)
return command_code != 0
def stop(self):
return os.system("pg_ctl stop -w -D %s -m fast -w" % self.data_dir) != 0
def reload(self):
return os.system("pg_ctl reload -w -D %s" % self.data_dir) == 0
def restart(self):
return os.system("pg_ctl restart -w -D %s -m fast" % self.data_dir) == 0
def server_options(self):
options = "-c listen_addresses=%s -c port=%s" % (self.host, self.port)
for setting, value in self.config["parameters"].iteritems():
options += " -c \"%s=%s\"" % (setting, value)
return options
def is_healthy(self):
if not self.is_running():
logger.warning("Postgresql is not running.")
return False
return True
def is_healthiest_node(self, members):
for member in members:
if member["hostname"] == self.name:
continue
try:
member_conn = psycopg2.connect(member["address"])
member_conn.autocommit = True
member_cursor = member_conn.cursor()
member_cursor.execute("SELECT '%s'::pg_lsn - pg_last_xlog_replay_location() AS bytes;" % self.xlog_position())
xlog_diff = member_cursor.fetchone()[0]
logger.info([self.name, member["hostname"], xlog_diff])
if xlog_diff < 0:
member_cursor.close()
return False
member_cursor.close()
except psycopg2.OperationalError:
continue
return True
def replication_slot_name(self):
member = os.environ.get("MEMBER")
(member, _) = re.subn(r'[^a-z0-9]+', r'_', member)
return member
def write_pg_hba(self):
f = open("%s/pg_hba.conf" % self.data_dir, "a")
f.write("host replication %(username)s %(network)s md5" %
{"username": self.replication["username"], "network": self.replication["network"]})
# allow TCP connections from the host's own address
f.write("\nhost postgres postgres %(network)s/32 trust\n" % {"network": self.host})
f.close()
def write_recovery_conf(self, leader_hash):
leader = urlparse(leader_hash["address"])
f = open("%s/recovery.conf" % self.data_dir, "w")
f.write("""
standby_mode = 'on'
primary_slot_name = '%(recovery_slot)s'
primary_conninfo = 'user=%(user)s password=%(password)s host=%(hostname)s port=%(port)s sslmode=prefer sslcompression=1'
recovery_target_timeline = 'latest'
""" % {"recovery_slot": self.name, "user": leader.username, "password": leader.password, "hostname": leader.hostname, "port": leader.port})
if "recovery_conf" in self.config:
for name, value in self.config["recovery_conf"].iteritems():
f.write("%s = '%s'" % (name, value))
f.close()
def follow_the_leader(self, leader_hash):
leader = urlparse(leader_hash["address"])
if os.system("grep 'host=%(hostname)s port=%(port)s' %(data_dir)s/recovery.conf > /dev/null" % {"hostname": leader.hostname, "port": leader.port, "data_dir": self.data_dir}) != 0:
self.write_recovery_conf(leader_hash)
self.restart()
return True
def promote(self):
return os.system("pg_ctl promote -w -D %s" % self.data_dir) == 0
def demote(self, leader):
self.write_recovery_conf(leader)
self.restart()
def create_replication_user(self):
self.query("CREATE USER \"%s\" WITH REPLICATION ENCRYPTED PASSWORD '%s';" % (self.replication["username"], self.replication["password"]))
def xlog_position(self):
return self.query("SELECT pg_last_xlog_replay_location();").fetchone()[0]
|
Python
| 0
|
@@ -5180,22 +5180,16 @@
res
-%25(network)s/32
+samehost
tru
|
37c65efa1b78abcc75d506554e6fb877678ec2f2
|
Fix a typo
|
editorsnotes/api/views/topics.py
|
editorsnotes/api/views/topics.py
|
from editorsnotes.main.models import Topic
from .. import filters as es_filters
from ..serializers.topics import TopicSerializer
from .base import BaseListAPIView, BaseDetailView, DeleteConfirmAPIView
from .mixins import (ElasticSearchListMixin, EmbeddedMarkupReferencesMixin,
HydraProjectPermissionsMixin)
__all__ = ['TopicList', 'TopicDetail', 'TopicConfirmDelete']
class TopicList(ElasticSearchListMixin, HydraProjectPermissionsMixin,
BaseListAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
es_filter_backends = (
es_filters.ProjectFilterBackend,
es_filters.QFilterBackend,
es_filters.UpdaterFilterBackend,
)
hydra_project_perms = ('main.add_note',)
class TopicDetail(EmbeddedMarkupReferencesMixin, HydraProjectPermissionsMixin,
BaseDetailView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
hydra_project_perms = ('main.change_note', 'main.delete_note',)
class TopicConfirmDelete(DeleteConfirmAPIView):
queryset = Topic.objects.all()
permissions = {
'GET': ('main.delete_topic',),
'HEAD': ('main.delete_topic',)
}
|
Python
| 1
|
@@ -752,20 +752,21 @@
ain.add_
-note
+topic
',)%0A%0A%0Acl
@@ -991,20 +991,21 @@
.change_
-note
+topic
', 'main
@@ -1012,20 +1012,21 @@
.delete_
-note
+topic
',)%0A%0A%0Acl
|
0091c41d8dd064b40ccf35d4d24c01ae4438f028
|
Set sender in signal handlers
|
cityhallmonitor/signals/handlers.py
|
cityhallmonitor/signals/handlers.py
|
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils import timezone
@receiver(pre_save)
def handle_pre_save(sender, instance, *args, **kwargs):
"""
Set updated_at timestamp if model is actually dirty
"""
if hasattr(sender, 'is_dirty'):
if instance.is_dirty():
instance.updated_at = timezone.now()
@receiver(post_save)
def handle_post_save(sender, instance, **kwargs):
"""
Reset dirty state
"""
if hasattr(sender, 'reset_state'):
instance.reset_state()
|
Python
| 0.000001
|
@@ -125,28 +125,105 @@
one%0A
-%0A%0A@receiver(pre_save
+from cityhallmonitor.models import DirtyFieldsModel%0A%0A%0A@receiver(pre_save, sender=DirtyFieldsModel
)%0Ade
@@ -283,21 +283,16 @@
%0A %22%22%22
-%0A
Set upda
@@ -334,21 +334,16 @@
ly dirty
-%0A
%22%22%22%0A
@@ -480,16 +480,41 @@
ost_save
+, sender=DirtyFieldsModel
)%0Adef ha
@@ -564,21 +564,16 @@
%0A %22%22%22
-%0A
Reset di
@@ -581,21 +581,16 @@
ty state
-%0A
%22%22%22%0A
|
2409bf1377ceaee99e4d4b49d0c8c2a2fef57687
|
Generate a new 'name' if necessary
|
ckanext/ddi/importer/ddiimporter.py
|
ckanext/ddi/importer/ddiimporter.py
|
import requests
import traceback
from pprint import pprint
from ckan.lib.munge import munge_title_to_name
from ckanext.harvest.harvesters import HarvesterBase
from ckanext.ddi.importer import metadata
import ckanapi
import logging
log = logging.getLogger(__name__)
class DdiImporter(HarvesterBase):
def run(self, file_path=None, url=None):
pkg_dict = None
ckan_metadata = metadata.DdiCkanMetadata()
if file_path is not None:
with open(file_path) as xml_file:
pkg_dict = ckan_metadata.load(xml_file.read())
elif url is not None:
log.debug('Fetch file from %s' % url)
r = requests.get(url)
xml_file = r.text
pkg_dict = ckan_metadata.load(xml_file)
if pkg_dict['url'] == '':
pkg_dict['url'] = url
resources = []
resources.append({
'url': url,
'name': pkg_dict['title'],
'format': 'xml'
})
pkg_dict['resources'] = resources
pkg_dict = self.cleanup_pkg_dict(pkg_dict)
self.insert_or_update_pkg(pkg_dict)
def insert_or_update_pkg(self, pkg_dict):
try:
registry = ckanapi.LocalCKAN()
pprint(pkg_dict)
if pkg_dict['id'] and pkg_dict['id'] != '':
try:
registry.call_action('package_update', pkg_dict)
except ckanapi.NotFound:
del pkg_dict['id']
registry.call_action('package_create', pkg_dict)
else:
del pkg_dict['id']
registry.call_action('package_create', pkg_dict)
except:
traceback.print_exc()
def cleanup_pkg_dict(self, pkg_dict):
if pkg_dict['name'] != '':
pkg_dict['name'] = munge_title_to_name(pkg_dict['name'])
else:
pkg_dict['name'] = munge_title_to_name(pkg_dict['title'])
if pkg_dict['url'] == '':
del pkg_dict['url']
return pkg_dict
|
Python
| 1
|
@@ -1502,32 +1502,108 @@
pkg_dict%5B'id'%5D%0A
+ pkg_dict%5B'name'%5D = self._gen_new_name(pkg_dict%5B'name'%5D)%0A
|
fbe9de1d8f019b6f1c263337f04e5866131d0e60
|
drop the chunk size of the kafka feed down
|
corehq/apps/change_feed/pillow.py
|
corehq/apps/change_feed/pillow.py
|
import json
from kafka import KeyedProducer
from kafka.common import KafkaUnavailableError
from casexml.apps.case.models import CommCareCase
from corehq.apps.change_feed import data_sources
from corehq.apps.change_feed.connection import get_kafka_client
from corehq.apps.change_feed.models import ChangeMeta
from corehq.apps.change_feed.topics import get_topic
from couchforms.models import all_known_formlike_doc_types
import logging
from pillowtop.checkpoints.manager import PillowCheckpoint, get_django_checkpoint_store
from pillowtop.couchdb import CachedCouchDB
from pillowtop.listener import PythonPillow
class ChangeFeedPillow(PythonPillow):
def __init__(self, couch_db, kafka, checkpoint):
super(ChangeFeedPillow, self).__init__(couch_db=couch_db, checkpoint=checkpoint)
self._kafka = kafka
self._producer = KeyedProducer(self._kafka)
def get_db_name(self):
return self.get_couch_db().dbname
def process_change(self, change, is_retry_attempt=False):
document_type = _get_document_type(change.document)
if document_type:
assert change.document is not None
change_meta = ChangeMeta(
document_id=change.id,
data_source_type=data_sources.COUCH,
data_source_name=self.get_db_name(),
document_type=document_type,
document_subtype=_get_document_subtype(change.document),
domain=change.document.get('domain', None),
is_deletion=change.deleted,
)
self._producer.send_messages(
bytes(get_topic(document_type)),
bytes(change_meta.domain),
bytes(json.dumps(change_meta.to_json())),
)
def get_default_couch_db_change_feed_pillow():
default_couch_db = CachedCouchDB(CommCareCase.get_db().uri, readonly=False)
try:
kafka_client = get_kafka_client()
except KafkaUnavailableError:
logging.warning('Ignoring missing kafka client during unit testing')
kafka_client = None
return ChangeFeedPillow(
couch_db=default_couch_db,
kafka=kafka_client,
checkpoint=PillowCheckpoint(get_django_checkpoint_store(), 'default-couch-change-feed')
)
def _get_document_type(document_or_none):
return document_or_none.get('doc_type', None) if document_or_none else None
def _get_document_subtype(document_or_none):
type = _get_document_type(document_or_none)
if type in ('CommCareCase', 'CommCareCase-Deleted'):
return document_or_none.get('type', None)
elif type in all_known_formlike_doc_types():
return document_or_none.get('xmlns', None)
return None
|
Python
| 0
|
@@ -785,16 +785,31 @@
eckpoint
+, chunk_size=10
)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.