commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
740cf4e1a25533b4d3279a17e23b1ff9f6c13006 | Update Watchers.py | examples/Watchers.py | examples/Watchers.py | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open('stockstwits.com') # Navigate to the web page
self.assert_element('sentiment-tab') # Assert element on page
self.click('sentiment-tab') # Click element on page
| Import openpyxl
from seleniumbase import BaseCase
los = []
url = 'https://stocktwits.com/symbol/'
workbook = openpyxl.load_workbook('Test.xlsx')
worksheet = workbook.get_sheet_by_name(name = 'Sheet1')
for col in worksheet['A']:
los.append(col.value)
los2 = []
print(los)
class MyTestClass(BaseCase):
#for i in los:
# stocksite = url +i + '?q=' +i
#driver.get(stocksite)
#driver.find_element_by_id('sentiment-tab').click()
#Bullish = driver.find_elements_by_css_selector('span.bullish:nth-child(1)')
#Sentiment = [x.text for x in Bullish]
#los2.append(Sentiment[0])
| Python | 0.000001 |
3462a4755eac0ea74b9c90f867e769c47504c5bd | add license to top of __init__ in examples | examples/__init__.py | examples/__init__.py | # Licensed to the Cloudkick, Inc under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python | 0.000007 | |
87da5bcf5b11762605c60f57b3cb2019d458fcd3 | Set version to v2.1.0a3 | spacy/about.py | spacy/about.py | # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.1.0a3'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = 'contact@explosion.ai'
__license__ = 'MIT'
__release__ = False
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
| # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.1.0a3.dev0'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = 'contact@explosion.ai'
__license__ = 'MIT'
__release__ = False
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
| Python | 0.000041 |
939f7a9e91022c8dab5da13e9e3f738f6c25c524 | Update perception_obstacle_sender.py | modules/tools/record_analyzer/tools/perception_obstacle_sender.py | modules/tools/record_analyzer/tools/perception_obstacle_sender.py | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import time
import argparse
import google.protobuf.text_format as text_format
from cyber_py import cyber
from modules.perception.proto import perception_obstacle_pb2
def update(perception_obstacles):
"""update perception obstacles timestamp"""
now = time.time()
perception_obstacles.header.timestamp_sec = now
perception_obstacles.header.lidar_timestamp = \
(long(now) - long(0.5)) * long(1e9)
for perception_obstacle in perception_obstacles.perception_obstacle:
perception_obstacle.timestamp = now - 0.5
for measure in perception_obstacle.measurements:
measure.timestamp = now - 0.5
return perception_obstacles
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the message file for sending.")
args = parser.parse_args()
cyber.init()
node = cyber.Node("perception_obstacle_sender")
perception_pub = node.create_writer(
"/apollo/perception/obstacles",
perception_obstacle_pb2.PerceptionObstacles)
perception_obstacles = perception_obstacle_pb2.PerceptionObstacles()
with open(args.file, 'r') as f:
text_format.Merge(f.read(), perception_obstacles)
while not cyber.is_shutdown():
now = time.time()
perception_obstacles = update(perception_obstacles)
perception_pub.write(perception_obstacles)
sleep_time = 0.1 - (time.time() - now)
if sleep_time > 0:
time.sleep(sleep_time)
cyber.shutdown()
| #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import time
import argparse
import google.protobuf.text_format as text_format
from cyber_py import cyber
from modules.perception.proto import perception_obstacle_pb2
def update(perception_obstacles):
"""update perception obstacles timestamp"""
now = time.time()
perception_obstacles.header.timestamp_sec = now
perception_obstacles.header.lidar_timestamp = \
(long(now) - long(0.5)) * long(1e9)
for perception_obstacle in perception_obstacles.perception_obstacle:
perception_obstacle.timestamp = now - 0.5
for measure in perception_obstacle.measurements:
measure.timestamp = now - 0.5
return perception_obstacles
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the message file for sending.")
args = parser.parse_args()
record_file = args.file
cyber.init()
node = cyber.Node("perception_obstacle_sender")
perception_pub = node.create_writer(
"/apollo/perception/obstacles",
perception_obstacle_pb2.PerceptionObstacles)
perception_obstacles = perception_obstacle_pb2.PerceptionObstacles()
with open(args.file, 'r') as f:
text_format.Merge(f.read(), perception_obstacles)
while not cyber.is_shutdown():
now = time.time()
perception_obstacles = update(perception_obstacles)
perception_pub.write(perception_obstacles)
sleep_time = 0.1 - (time.time() - now)
if sleep_time > 0:
time.sleep(sleep_time)
cyber.shutdown()
| Python | 0.000001 |
8fa528696393c18f74ceb5d6bbcf87231e072b21 | update gentle/transcriber.py __main__ | gentle/transcriber.py | gentle/transcriber.py | import math
import logging
import wave
from gentle import transcription
from multiprocessing.pool import ThreadPool as Pool
class MultiThreadedTranscriber:
def __init__(self, kaldi_queue, chunk_len=20, overlap_t=2, nthreads=4):
self.chunk_len = chunk_len
self.overlap_t = overlap_t
self.nthreads = nthreads
self.kaldi_queue = kaldi_queue
def transcribe(self, wavfile, progress_cb=None):
wav_obj = wave.open(wavfile, 'r')
duration = wav_obj.getnframes() / float(wav_obj.getframerate())
n_chunks = int(math.ceil(duration / float(self.chunk_len - self.overlap_t)))
chunks = []
def transcribe_chunk(idx):
wav_obj = wave.open(wavfile, 'r')
start_t = idx * (self.chunk_len - self.overlap_t)
# Seek
wav_obj.setpos(int(start_t * wav_obj.getframerate()))
# Read frames
buf = wav_obj.readframes(int(self.chunk_len * wav_obj.getframerate()))
k = self.kaldi_queue.get()
k.push_chunk(buf)
ret = k.get_final()
k.reset()
self.kaldi_queue.put(k)
chunks.append({"start": start_t, "words": ret})
logging.info('%d/%d' % (len(chunks), n_chunks))
if progress_cb is not None:
progress_cb({"message": ' '.join([X['word'] for X in ret]),
"percent": len(chunks) / float(n_chunks)})
pool = Pool(min(n_chunks, self.nthreads))
pool.map(transcribe_chunk, range(n_chunks))
pool.close()
chunks.sort(key=lambda x: x['start'])
# Combine chunks
words = []
for c in chunks:
chunk_start = c['start']
chunk_end = chunk_start + self.chunk_len
chunk_words = [transcription.Word(**wd).shift(time=chunk_start) for wd in c['words']]
# At chunk boundary cut points the audio often contains part of a
# word, which can get erroneously identified as one or more different
# in-vocabulary words. So discard one or more words near the cut points
# (they'll be covered by the ovlerap anyway).
#
trim = min(0.25 * self.overlap_t, 0.5)
if c is not chunks[0]:
while len(chunk_words) > 1:
chunk_words.pop(0)
if chunk_words[0].end > chunk_start + trim:
break
if c is not chunks[-1]:
while len(chunk_words) > 1:
chunk_words.pop()
if chunk_words[-1].start < chunk_end - trim:
break
words.extend(chunk_words)
# Remove overlap: Sort by time, then filter out any Word entries in
# the list that are adjacent to another entry corresponding to the same
# word in the audio.
words.sort(key=lambda word: word.start)
words.append(transcription.Word(word="__dummy__"))
words = [words[i] for i in range(len(words)-1) if not words[i].corresponds(words[i+1])]
return words
if __name__=='__main__':
# full transcription
from Queue import Queue
import json
import sys
import logging
logging.getLogger().setLevel('INFO')
import gentle
from gentle import standard_kaldi
resources = gentle.Resources()
k_queue = Queue()
for i in range(3):
k_queue.put(standard_kaldi.Kaldi(resources.nnet_gpu_path, resources.full_hclg_path, resources.proto_langdir))
trans = MultiThreadedTranscriber(k_queue)
with gentle.resampled(sys.argv[1]) as filename:
out = trans.transcribe(filename)
open(sys.argv[2], 'w').write(transcription.Transcription(words=out).to_json())
| import math
import logging
import wave
from gentle import transcription
from multiprocessing.pool import ThreadPool as Pool
class MultiThreadedTranscriber:
def __init__(self, kaldi_queue, chunk_len=20, overlap_t=2, nthreads=4):
self.chunk_len = chunk_len
self.overlap_t = overlap_t
self.nthreads = nthreads
self.kaldi_queue = kaldi_queue
def transcribe(self, wavfile, progress_cb=None):
wav_obj = wave.open(wavfile, 'r')
duration = wav_obj.getnframes() / float(wav_obj.getframerate())
n_chunks = int(math.ceil(duration / float(self.chunk_len - self.overlap_t)))
chunks = []
def transcribe_chunk(idx):
wav_obj = wave.open(wavfile, 'r')
start_t = idx * (self.chunk_len - self.overlap_t)
# Seek
wav_obj.setpos(int(start_t * wav_obj.getframerate()))
# Read frames
buf = wav_obj.readframes(int(self.chunk_len * wav_obj.getframerate()))
k = self.kaldi_queue.get()
k.push_chunk(buf)
ret = k.get_final()
k.reset()
self.kaldi_queue.put(k)
chunks.append({"start": start_t, "words": ret})
logging.info('%d/%d' % (len(chunks), n_chunks))
if progress_cb is not None:
progress_cb({"message": ' '.join([X['word'] for X in ret]),
"percent": len(chunks) / float(n_chunks)})
pool = Pool(min(n_chunks, self.nthreads))
pool.map(transcribe_chunk, range(n_chunks))
pool.close()
chunks.sort(key=lambda x: x['start'])
# Combine chunks
words = []
for c in chunks:
chunk_start = c['start']
chunk_end = chunk_start + self.chunk_len
chunk_words = [transcription.Word(**wd).shift(time=chunk_start) for wd in c['words']]
# At chunk boundary cut points the audio often contains part of a
# word, which can get erroneously identified as one or more different
# in-vocabulary words. So discard one or more words near the cut points
# (they'll be covered by the ovlerap anyway).
#
trim = min(0.25 * self.overlap_t, 0.5)
if c is not chunks[0]:
while len(chunk_words) > 1:
chunk_words.pop(0)
if chunk_words[0].end > chunk_start + trim:
break
if c is not chunks[-1]:
while len(chunk_words) > 1:
chunk_words.pop()
if chunk_words[-1].start < chunk_end - trim:
break
words.extend(chunk_words)
# Remove overlap: Sort by time, then filter out any Word entries in
# the list that are adjacent to another entry corresponding to the same
# word in the audio.
words.sort(key=lambda word: word.start)
words.append(transcription.Word(word="__dummy__"))
words = [words[i] for i in range(len(words)-1) if not words[i].corresponds(words[i+1])]
return words
if __name__=='__main__':
# full transcription
from Queue import Queue
from util import ffmpeg
from gentle import standard_kaldi
import sys
import logging
logging.getLogger().setLevel('INFO')
k_queue = Queue()
for i in range(3):
k_queue.put(standard_kaldi.Kaldi())
trans = MultiThreadedTranscriber(k_queue)
with gentle.resampled(sys.argv[1]) as filename:
out = trans.transcribe(filename)
open(sys.argv[2], 'w').write(out.to_json())
| Python | 0 |
203cba83527ed39cc478c4f0530e513c71f2a6ad | format date in title | examples/daynight.py | examples/daynight.py | import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date.strftime("%d %b %Y %H:%M:%S"))
plt.show()
| import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date)
plt.show()
| Python | 0.000172 |
ddb0a5d3b684c96b8fe8c4678cdb5e018f1b3d7b | Revert last change. Just in case... | rbm2m/action/downloader.py | rbm2m/action/downloader.py | # -*- coding: utf-8 -*-
import urllib
import sys
import requests
from .debug import dump_exception
HOST = 'http://www.recordsbymail.com/'
GENRE_LIST_URL = '{host}browse.php'.format(host=HOST)
SEARCH_URL = '{host}search.php?genre={genre_slug}&format=LP&instock=1'
IMAGE_LIST_URL = '{host}php/getImageArray.php?item={rec_id}'
TIMEOUTS = (3.05, 30) # Connect, read
def fetch(url):
"""
Download content from url and return response object.
Raises `DownloadError` if operation fails
"""
resp = None
try:
resp = requests.get(url, timeout=TIMEOUTS)
resp.raise_for_status()
except requests.RequestException as e:
exc_type, exc_val, tb = sys.exc_info()
notes = resp.text if resp else ''
dump_exception('download', exc_type, exc_val, tb, notes)
raise DownloadError(e)
else:
assert resp is not None
return resp
def fetch_text(url):
"""
Download text content from url and return it.
Raises `DownloadError` if operation fails
"""
return fetch(url).text
def genre_list():
"""
Download page with the list of genres
"""
return fetch_text(GENRE_LIST_URL)
def get_results_page(genre_title, page):
"""
Download search result page
"""
url = SEARCH_URL.format(host=HOST,
genre_slug=urllib.quote_plus(genre_title))
if page:
url = url + '&page={}'.format(page)
return fetch_text(url)
def get_image_list(rec_id):
"""
Download list of images for a record
"""
url = IMAGE_LIST_URL.format(host=HOST, rec_id=rec_id)
return fetch_text(url)
def get_content(url):
"""
Downloads content from url
"""
return fetch(url).content
class DownloadError(requests.RequestException):
"""
Raised for all download errors (timeouts, http errors etc)
"""
pass
| # -*- coding: utf-8 -*-
import urllib
import sys
import requests
from .debug import dump_exception
HOST = 'http://www.recordsbymail.com/'
GENRE_LIST_URL = '{host}browse.php'.format(host=HOST)
SEARCH_URL = '{host}search.php?genre={genre_slug}&instock=1'
IMAGE_LIST_URL = '{host}php/getImageArray.php?item={rec_id}'
TIMEOUTS = (3.05, 30) # Connect, read
def fetch(url):
"""
Download content from url and return response object.
Raises `DownloadError` if operation fails
"""
resp = None
try:
resp = requests.get(url, timeout=TIMEOUTS)
resp.raise_for_status()
except requests.RequestException as e:
exc_type, exc_val, tb = sys.exc_info()
notes = resp.text if resp else ''
dump_exception('download', exc_type, exc_val, tb, notes)
raise DownloadError(e)
else:
assert resp is not None
return resp
def fetch_text(url):
"""
Download text content from url and return it.
Raises `DownloadError` if operation fails
"""
return fetch(url).text
def genre_list():
"""
Download page with the list of genres
"""
return fetch_text(GENRE_LIST_URL)
def get_results_page(genre_title, page):
"""
Download search result page
"""
url = SEARCH_URL.format(host=HOST,
genre_slug=urllib.quote_plus(genre_title))
if page:
url = url + '&page={}'.format(page)
return fetch_text(url)
def get_image_list(rec_id):
"""
Download list of images for a record
"""
url = IMAGE_LIST_URL.format(host=HOST, rec_id=rec_id)
return fetch_text(url)
def get_content(url):
"""
Downloads content from url
"""
return fetch(url).content
class DownloadError(requests.RequestException):
"""
Raised for all download errors (timeouts, http errors etc)
"""
pass
| Python | 0 |
480e55794c5f06129b8b2fb7ed02a787f70275e2 | add --silent option to update-toplist | mygpo/directory/management/commands/update-toplist.py | mygpo/directory/management/commands/update-toplist.py | from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--silent', action='store_true', dest='silent',
default=False, help="Don't show any output"),
)
def handle(self, *args, **options):
silent = options.get('silent')
# couchdbkit doesn't preserve microseconds
started = datetime.utcnow().replace(microsecond=0)
podcasts = Podcast.all_podcasts()
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, podcast in enumerate(podcasts):
subscriber_count = self.get_subscriber_count(podcast.get_id())
self.update(podcast=podcast, started=started, subscriber_count=subscriber_count)
if not silent:
progress(n, total)
@repeat_on_conflict(['podcast'])
def update(self, podcast, started, subscriber_count):
# We've already updated this podcast
if started in [e.timestamp for e in podcast.subscribers]:
return
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp)
podcast.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
reduce = True,
group = True,
group_level = 2,
)
return x.count()
| from datetime import datetime
from django.core.management.base import BaseCommand
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
def handle(self, *args, **options):
# couchdbkit doesn't preserve microseconds
started = datetime.utcnow().replace(microsecond=0)
podcasts = Podcast.all_podcasts()
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, podcast in enumerate(podcasts):
subscriber_count = self.get_subscriber_count(podcast.get_id())
self.update(podcast=podcast, started=started, subscriber_count=subscriber_count)
progress(n, total)
@repeat_on_conflict(['podcast'])
def update(self, podcast, started, subscriber_count):
# We've already updated this podcast
if started in [e.timestamp for e in podcast.subscribers]:
return
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp)
podcast.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
reduce = True,
group = True,
group_level = 2,
)
return x.count()
| Python | 0 |
006e6b67af6cfb2cca214666ac48dc9fd2cc0339 | Update test values | scopus/tests/test_CitationOverview.py | scopus/tests/test_CitationOverview.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `CitationOverview` module."""
from collections import namedtuple
from nose.tools import assert_equal, assert_true
import scopus
co = scopus.CitationOverview("2-s2.0-84930616647", refresh=True,
start=2015, end=2018)
def test_authors():
Author = namedtuple('Author', 'name surname initials id url')
url = 'https://api.elsevier.com/content/author/author_id/7004212771'
expected = [Author(name='Kitchin J.R.', surname='Kitchin',
initials='J.R.', id='7004212771',
url=url)]
assert_equal(co.authors, expected)
def test_cc():
assert_equal(co.cc, [(2015, '0'), (2016, '4'), (2017, '2'), (2018, '1')])
def test_citationType_long():
assert_equal(co.citationType_long, 'Review')
def test_citationType_short():
assert_equal(co.citationType_short, 're')
def test_doi():
assert_equal(co.doi, '10.1021/acscatal.5b00538')
def test_endingPage():
assert_equal(co.endingPage, '3899')
def test_h_index():
assert_equal(co.h_index, '1')
def test_issn():
assert_equal(co.issn, '2155-5435')
def test_issueIdentifier():
assert_equal(co.issueIdentifier, '6')
def test_lcc():
assert_equal(co.lcc, '0')
def test_pcc():
assert_equal(co.pcc, '0')
def test_pii():
assert_equal(co.pii, None)
def test_publicationName():
assert_equal(co.publicationName, 'ACS Catalysis')
def test_rangeCount():
assert_equal(co.rangeCount, '7')
def test_rowTotal():
assert_equal(co.rowTotal, '7')
def test_scopus_id():
assert_equal(co.scopus_id, '84930616647')
def test_startingPage():
assert_equal(co.startingPage, '3894')
def test_title():
expected = 'Examples of effective data sharing in scientific publishing'
assert_equal(co.title, expected)
def test_url():
expected = 'https://api.elsevier.com/content/abstract/scopus_id/84930616647'
assert_equal(co.url, expected)
def test_volume():
assert_equal(co.volume, '5')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `CitationOverview` module."""
from collections import namedtuple
from nose.tools import assert_equal, assert_true
import scopus
co = scopus.CitationOverview("2-s2.0-84930616647", refresh=True,
start=2015, end=2017)
def test_authors():
Author = namedtuple('Author', 'name surname initials id url')
url = 'https://api.elsevier.com/content/author/author_id/7004212771'
expected = [Author(name='Kitchin J.R.', surname='Kitchin',
initials='J.R.', id='7004212771',
url=url)]
assert_equal(co.authors, expected)
def test_cc():
assert_equal(co.cc, [(2015, '0'), (2016, '4'), (2017, '2')])
def test_citationType_long():
assert_equal(co.citationType_long, 'Review')
def test_citationType_short():
assert_equal(co.citationType_short, 're')
def test_doi():
assert_equal(co.doi, '10.1021/acscatal.5b00538')
def test_endingPage():
assert_equal(co.endingPage, '3899')
def test_h_index():
assert_equal(co.h_index, '1')
def test_issn():
assert_equal(co.issn, '2155-5435')
def test_issueIdentifier():
assert_equal(co.issueIdentifier, '6')
def test_lcc():
assert_equal(co.lcc, '0')
def test_pcc():
assert_equal(co.pcc, '0')
def test_pii():
assert_equal(co.pii, None)
def test_publicationName():
assert_equal(co.publicationName, 'ACS Catalysis')
def test_rangeCount():
assert_equal(co.rangeCount, '6')
def test_rowTotal():
assert_equal(co.rowTotal, '6')
def test_scopus_id():
assert_equal(co.scopus_id, '84930616647')
def test_startingPage():
assert_equal(co.startingPage, '3894')
def test_title():
expected = 'Examples of effective data sharing in scientific publishing'
assert_equal(co.title, expected)
def test_url():
expected = 'https://api.elsevier.com/content/abstract/scopus_id/84930616647'
assert_equal(co.url, expected)
def test_volume():
assert_equal(co.volume, '5')
| Python | 0.000001 |
ca8bbd03f57bf6e15abd406533dc7088d449e9ab | add published date to properties | scrapi/consumers/figshare/consumer.py | scrapi/consumers/figshare/consumer.py | """
Figshare harvester of public projects for the SHARE Notification Service
Example API query: http://api.figshare.com/v1/articles/search?search_for=*&from_date=2015-2-1&end_date=2015-2-1
"""
from __future__ import unicode_literals
import time
import json
import logging
from dateutil.parser import parse
from datetime import date, timedelta
import requests
from nameparser import HumanName
from scrapi.linter import lint
from scrapi.linter.document import RawDocument, NormalizedDocument
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
NAME = 'figshare'
URL = 'http://api.figshare.com/v1/articles/search?search_for=*&from_date='
def consume(days_back=0):
start_date = date.today() - timedelta(days_back) - timedelta(1)
end_date = date.today() - timedelta(1)
search_url = '{0}{1}-{2}-{3}&end_date={4}-{5}-{6}'.format(URL,
start_date.year,
start_date.month,
start_date.day,
end_date.year,
end_date.month,
end_date.day)
records = get_records(search_url)
record_list = []
for record in records:
doc_id = record['article_id']
record_list.append(
RawDocument(
{
'doc': json.dumps(record),
'source': NAME,
'docID': unicode(doc_id),
'filetype': 'json'
}
)
)
return record_list
def get_records(search_url):
records = requests.get(search_url)
total_records = records.json()['items_found']
page = 1
all_records = []
while len(all_records) < total_records:
logger.info('Requesting records for url: {}&page={}'.format(search_url, str(page)))
record_list = records.json()['items']
for record in record_list:
if len(all_records) < total_records:
all_records.append(record)
page += 1
records = requests.get(search_url + '&page={}'.format(str(page)))
time.sleep(3)
return all_records
def get_contributors(record):
authors = record['authors']
contributor_list = []
for person in authors:
name = HumanName(person['author_name'])
contributor = {
'prefix': name.title,
'given': name.first,
'middle': name.middle,
'family': name.last,
'suffix': name.suffix,
'email': '',
'ORCID': '',
}
contributor_list.append(contributor)
return contributor_list
def get_ids(record):
# Right now, only take the last DOI - others in properties
doi = record['DOI']
try:
doi = doi.replace('http://dx.doi.org/', '')
except AttributeError:
for item in doi:
item.replace('http://dx.doi.org/', '')
doi = item
return {
'serviceID': unicode(record['article_id']),
'url': record['url'],
'doi': doi
}
def get_properties(record):
return {
'article_id': record['article_id'],
'defined_type': record['defined_type'],
'type': record['type'],
'links': record['links'],
'doi': record['DOI'],
'publishedDate': record['published_date']
}
def normalize(raw_doc):
doc = raw_doc.get('doc')
record = json.loads(doc)
normalized_dict = {
'title': record['title'],
'contributors': get_contributors(record),
'properties': get_properties(record),
'description': record['description'],
'tags': [],
'id': get_ids(record),
'source': NAME,
'dateUpdated': unicode(parse(record['modified_date']).isoformat())
}
return NormalizedDocument(normalized_dict)
if __name__ == '__main__':
print(lint(consume, normalize))
| """
Figshare harvester of public projects for the SHARE Notification Service
Example API query: http://api.figshare.com/v1/articles/search?search_for=*&from_date=2015-2-1&end_date=2015-2-1
"""
from __future__ import unicode_literals
import time
import json
import logging
from dateutil.parser import parse
from datetime import date, timedelta
import requests
from nameparser import HumanName
from scrapi.linter import lint
from scrapi.linter.document import RawDocument, NormalizedDocument
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
NAME = 'figshare'
URL = 'http://api.figshare.com/v1/articles/search?search_for=*&from_date='
def consume(days_back=0):
start_date = date.today() - timedelta(days_back) - timedelta(1)
end_date = date.today() - timedelta(1)
search_url = '{0}{1}-{2}-{3}&end_date={4}-{5}-{6}'.format(URL,
start_date.year,
start_date.month,
start_date.day,
end_date.year,
end_date.month,
end_date.day)
records = get_records(search_url)
record_list = []
for record in records:
doc_id = record['article_id']
record_list.append(
RawDocument(
{
'doc': json.dumps(record),
'source': NAME,
'docID': unicode(doc_id),
'filetype': 'json'
}
)
)
return record_list
def get_records(search_url):
records = requests.get(search_url)
total_records = records.json()['items_found']
page = 1
all_records = []
while len(all_records) < total_records:
logger.info('Requesting records for url: {}&page={}'.format(search_url, str(page)))
record_list = records.json()['items']
for record in record_list:
if len(all_records) < total_records:
all_records.append(record)
page += 1
records = requests.get(search_url + '&page={}'.format(str(page)))
time.sleep(3)
return all_records
def get_contributors(record):
authors = record['authors']
contributor_list = []
for person in authors:
name = HumanName(person['author_name'])
contributor = {
'prefix': name.title,
'given': name.first,
'middle': name.middle,
'family': name.last,
'suffix': name.suffix,
'email': '',
'ORCID': '',
}
contributor_list.append(contributor)
return contributor_list
def get_ids(record):
# Right now, only take the last DOI - others in properties
doi = record['DOI']
try:
doi = doi.replace('http://dx.doi.org/', '')
except AttributeError:
for item in doi:
item.replace('http://dx.doi.org/', '')
doi = item
return {
'serviceID': unicode(record['article_id']),
'url': record['url'],
'doi': doi
}
def get_properties(record):
return {
'article_id': record['article_id'],
'defined_type': record['defined_type'],
'type': record['type'],
'links': record['links'],
'doi': record['DOI']
}
def normalize(raw_doc):
doc = raw_doc.get('doc')
record = json.loads(doc)
normalized_dict = {
'title': record['title'],
'contributors': get_contributors(record),
'properties': get_properties(record),
'description': record['description'],
'tags': [],
'id': get_ids(record),
'source': NAME,
'dateUpdated': unicode(parse(record['modified_date']).isoformat())
}
return NormalizedDocument(normalized_dict)
if __name__ == '__main__':
print(lint(consume, normalize))
| Python | 0 |
4464b72eac2cc995a3276341f066bee30497d621 | Bump version to 1.1.0 for release | globus_sdk/version.py | globus_sdk/version.py | # single source of truth for package version,
# see https://packaging.python.org/en/latest/single_source_version/
__version__ = "1.1.0"
| # single source of truth for package version,
# see https://packaging.python.org/en/latest/single_source_version/
__version__ = "1.0.0"
| Python | 0 |
bb9d1255548b46dc2ba7a85e26606b7dd4c926f3 | Update original "Hello, World!" parser to latest coding, plus runTests | examples/greeting.py | examples/greeting.py | # greeting.py
#
# Demonstration of the pyparsing module, on the prototypical "Hello, World!"
# example
#
# Copyright 2003, 2019 by Paul McGuire
#
import pyparsing as pp
# define grammar
greet = pp.Word(pp.alphas) + "," + pp.Word(pp.alphas) + pp.oneOf("! ? .")
# input string
hello = "Hello, World!"
# parse input string
print(hello, "->", greet.parseString( hello ))
# parse a bunch of input strings
greet.runTests("""\
Hello, World!
Ahoy, Matey!
Howdy, Pardner!
Morning, Neighbor!
""") | # greeting.py
#
# Demonstration of the pyparsing module, on the prototypical "Hello, World!"
# example
#
# Copyright 2003, by Paul McGuire
#
from pyparsing import Word, alphas
# define grammar
greet = Word( alphas ) + "," + Word( alphas ) + "!"
# input string
hello = "Hello, World!"
# parse input string
print(hello, "->", greet.parseString( hello ))
| Python | 0.000077 |
04d0bb1bf71ee3a17efbb4bb15bb808cc832f04b | Update examples.py | examples/examples.py | examples/examples.py | from py_fuzz.generator import *
print random_language(language="russian")
print random_ascii(
seed="this is a test", randomization="byte_jitter",
mutation_rate=0.25
)
print random_regex(
length=20, regex="[a-zA-Z]"
)
print random_utf8(
min_length=10,
max_length=50
)
print random_bytes()
print random_utf8()
print random_regex(regex="[a-zA-Z]")
with open("test.png", "wb") as dump:
dump.write(random_image())
with open("fake.png", 'wb') as dump:
dump.write(random_image(randomization="byte_jitter", height=300, width=500, mutation_rate=0))
with open("randomLenna.png", "wb") as dump:
dump.write("")
random_valid_image(seed="Lenna.png", mutation_rate=0.1)
| from py_fuzz import *
print random_language(language="russian")
print random_ascii(
seed="this is a test", randomization="byte_jitter",
mutation_rate=0.25
)
print random_regex(
length=20, regex="[a-zA-Z]"
)
print random_utf8(
min_length=10,
max_length=50
)
print random_bytes()
print random_utf8()
print random_regex(regex="[a-zA-Z]")
with open("test.png", "wb") as dump:
dump.write(random_image())
with open("fake.png", 'wb') as dump:
dump.write(random_image(randomization="byte_jitter", height=300, width=500, mutation_rate=0))
with open("randomLenna.png", "wb") as dump:
dump.write("")
random_valid_image(seed="Lenna.png", mutation_rate=0.1)
| Python | 0 |
bc6c3834cd8383f7e1f9e109f0413bb6015a92bf | Remove unneeded datetime from view | go/scheduler/views.py | go/scheduler/views.py | from django.views.generic import ListView
from go.scheduler.models import Task
class SchedulerListView(ListView):
paginate_by = 12
context_object_name = 'tasks'
template = 'scheduler/task_list.html'
def get_queryset(self):
return Task.objects.filter(
account_id=self.request.user_api.user_account_key
).order_by('-scheduled_for')
| import datetime
from django.views.generic import ListView
from go.scheduler.models import Task
class SchedulerListView(ListView):
paginate_by = 12
context_object_name = 'tasks'
template = 'scheduler/task_list.html'
def get_queryset(self):
now = datetime.datetime.utcnow()
return Task.objects.filter(
account_id=self.request.user_api.user_account_key
).order_by('-scheduled_for')
| Python | 0 |
654034d3a0c6ec4e023af6118d6e628336bc39dd | Upgrade to Python 3 | rpt2csv.py | rpt2csv.py | import sys
import csv
import codecs
def convert(inputFile,outputFile):
"""
Convert a RPT file to a properly escaped CSV file
RPT files are usually sourced from old versions of Microsoft SQL Server Management Studio
RPT files are fixed width with column names on the first line, a second line with dashes and spaces,
and then on one row per record.
The column widths are calculated from the longest field in a column, so the format varies
depending on the results. Thankfully, we can reliably infer column widths by looking at the indexes
of spaces on the second line.
Here we chop each record at the index of the space on the second line and strip the result.
Note, if the source data has significant whitespace, the striping will remove this, but likely significant
whitespace was destroyed by the RPT field padding anyway.
"""
writer = csv.writer(outputFile)
fieldIndexes = []
headers = ""
for idx, val in enumerate(inputFile):
if(idx == 0):
headers = val
elif(idx == 1):
fieldIndexes = list(getFieldIndexes(val," "))
row = list(getFields(headers,fieldIndexes))
writer.writerow(row)
else:
row = list(getFields(val,fieldIndexes))
writer.writerow(row)
def getFieldIndexes(input, sep):
lastIndex = 0
for idx, c in enumerate(input):
if(c == sep):
yield (lastIndex,idx)
lastIndex = idx+1
yield lastIndex, len(input)
def getFields(input, indexes):
for index in indexes:
yield input[index[0]:index[1]].strip()
if __name__ == '__main__':
if(len(sys.argv) == 3):
with open(sys.argv[1],encoding='utf-8-sig') as inputFile:
with open(sys.argv[2],'w',newline='') as outputFile:
convert(inputFile,outputFile)
else:
print("Usage: rpt2csv.py inputFile outputFile")
| import sys
import csv
def convert(inputFile,outputFile):
"""
Convert a RPT file to a properly escaped CSV file
RPT files are usually sourced from old versions of Microsoft SQL Server Management Studio
RPT files are fixed width with column names on the first line, a second line with dashes and spaces,
and then on one row per record.
The column widths are calculated from the longest field in a column, so the format varies
depending on the results. Thankfully, we can reliably infer column widths by looking at the indexes
of spaces on the second line.
Here we chop each record at the index of the space on the second line and strip the result.
Note, if the source data has significant whitespace, the striping will remove this, but likely significant
whitespace was destroyed by the RPT field padding anyway.
"""
writer = csv.writer(outputFile)
fieldIndexes = []
headers = ""
for idx, val in enumerate(inputFile):
if(idx == 0):
headers = val.decode('utf-8-sig')
elif(idx == 1):
fieldIndexes = list(getFieldIndexes(val," "))
row = list(getFields(headers,fieldIndexes))
writer.writerow(row)
else:
row = list(getFields(val,fieldIndexes))
writer.writerow(row)
def getFieldIndexes(input, sep):
lastIndex = 0
for idx, c in enumerate(input):
if(c == sep):
yield (lastIndex,idx)
lastIndex = idx+1
yield lastIndex, len(input)
def getFields(input, indexes):
for index in indexes:
yield input[index[0]:index[1]].strip()
if __name__ == '__main__':
if(len(sys.argv) == 3):
with open(sys.argv[1]) as inputFile:
with open(sys.argv[2],'wb') as outputFile:
convert(inputFile,outputFile)
else:
print("Usage: rpt2csv.py inputFile outputFile")
| Python | 0.000672 |
7571b4519e54e2e747a21f7f900e486ccee19aa0 | Update job_crud.py | examples/job_crud.py | examples/job_crud.py | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates, updates, and deletes a job object.
"""
from os import path
import yaml
from time import sleep
from kubernetes import client, config
JOB_NAME = "pi"
def create_job_object():
# Configureate Pod template container
container = client.V1Container(
name="pi",
image="perl",
command=["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"])
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": "pi"}),
spec=client.V1PodSpec(restart_policy="Never", containers=[container]))
# Create the specification of deployment
spec = client.V1JobSpec(
template=template,
backoff_limit=4)
# Instantiate the job object
job = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(name=JOB_NAME),
spec=spec)
return job
def create_job(api_instance, job):
api_response = api_instance.create_namespaced_job(
body=job,
namespace="default")
print("Job created. status='%s'" % str(get_job_status(api_instance)))
def get_job_status(api_instance):
job_completed = False
while not job_completed:
api_response = api_instance.read_namespaced_job_status(
name=JOB_NAME,
namespace="default")
if api_response.status.succeeded is not None or api_response.status.failed is not None:
job_completed = True
return api_response.status
def update_job(api_instance, job):
# Update container image
job.spec.template.spec.containers[0].image = "perl"
api_response = api_instance.patch_namespaced_job(
name=JOB_NAME,
namespace="default",
body=job)
print("Job updated. status='%s'" % str(api_response.status))
def delete_job(api_instance):
api_response = api_instance.delete_namespaced_job(
name=JOB_NAME,
namespace="default",
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Job deleted. status='%s'" % str(api_response.status))
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
batch_v1 = client.BatchV1Api()
# Create a job object with client-python API. The job we
# created is same as the `pi-job.yaml` in the /examples folder.
job = create_job_object()
create_job(batch_v1, job)
update_job(batch_v1, job)
delete_job(batch_v1)
if __name__ == '__main__':
main()
| # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates, updates, and deletes a job object.
"""
from os import path
import yaml
from time import sleep
from kubernetes import client, config
JOB_NAME = "pi"
def create_job_object():
# Configureate Pod template container
container = client.V1Container(
name="pi",
image="perl",
command=["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"])
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": "pi"}),
spec=client.V1PodSpec(restart_policy="Never", containers=[container]))
# Create the specification of deployment
spec = client.V1JobSpec(
template=template,
backoff_limit=4)
# Instantiate the job object
job = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(name=JOB_NAME),
spec=spec)
return job
def create_job(api_instance, job):
api_response = api_instance.create_namespaced_job(
body=job,
namespace="default")
# Need to wait for a second for the job status to update
sleep(1)
print("Job created. status='%s'" % str(get_job_status(api_instance)))
def get_job_status(api_instance):
api_response = api_instance.read_namespaced_job_status(
name=JOB_NAME,
namespace="default")
return api_response.status
def update_job(api_instance, job):
# Update container image
job.spec.template.spec.containers[0].image = "perl"
api_response = api_instance.patch_namespaced_job(
name=JOB_NAME,
namespace="default",
body=job)
print("Job updated. status='%s'" % str(api_response.status))
def delete_job(api_instance):
api_response = api_instance.delete_namespaced_job(
name=JOB_NAME,
namespace="default",
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Job deleted. status='%s'" % str(api_response.status))
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
batch_v1 = client.BatchV1Api()
# Create a job object with client-python API. The job we
# created is same as the `pi-job.yaml` in the /examples folder.
job = create_job_object()
create_job(batch_v1, job)
update_job(batch_v1, job)
delete_job(batch_v1)
if __name__ == '__main__':
main()
| Python | 0.000001 |
f0df0e081aba7e9eb7a39088613d29c7c1e8a596 | set integrationtime to 80% of sampling interval | examples/liveview.py | examples/liveview.py | #!/usr/bin/env python
""" File: example_liveview.py
Author: Andreas Poehlmann
Last change: 2013/02/27
Liveview example
"""
import oceanoptics
import time
import numpy as np
from gi.repository import Gtk, GLib
class mpl:
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
class DynamicPlotter(Gtk.Window):
def __init__(self, sample_interval=0.1, smoothing=1, oversampling=1, raw=False, size=(600,350)):
# Gtk stuff
Gtk.Window.__init__(self, title='Ocean Optics Spectrometer')
self.connect("destroy", lambda x : Gtk.main_quit())
self.set_default_size(*size)
# Data stuff
self.sample_interval = int(sample_interval*1000)
self.smoothing = int(smoothing)
self._sample_n = 0
self.raw = bool(raw)
self.spectrometer = oceanoptics.get_a_random_spectrometer()
self.spectrometer.integration_time(time_us=(sample_interval * 0.8 * 1000000))
self.wl = self.spectrometer.wavelengths()
self.sp = self.spectrometer.intensities()
self.sp = np.zeros((len(self.sp), int(oversampling)))
# MPL stuff
self.figure = mpl.Figure()
self.ax = self.figure.add_subplot(1, 1, 1)
self.ax.grid(True)
self.canvas = mpl.FigureCanvas(self.figure)
self.line, = self.ax.plot(self.wl, self.sp[:,0])
# Gtk stuff
self.add(self.canvas)
self.canvas.show()
self.show_all()
def update_plot(self):
# -> redraw on new spectrum
# -> average over self.sample_n spectra
# -> smooth if self.smoothing
# remark:
# > smoothing can be done after averaging
# get spectrum
sp = np.array(self.spectrometer.intensities(raw=self.raw))
self.sp[:,self._sample_n] = sp
self._sample_n += 1
self._sample_n %= self.sp.shape[1]
if self._sample_n != 0: # do not draw or average
return
# average!
sp = np.mean(self.sp, axis=1)
if self.smoothing > 1:
n = self.smoothing
kernel = np.ones((n,)) / n
sp = np.convolve(sp, kernel)[(n-1):]
self.line.set_ydata(sp)
self.ax.relim()
self.ax.autoscale_view(False, False, True)
self.canvas.draw()
return True
def run(self):
GLib.timeout_add(self.sample_interval, self.update_plot)
Gtk.main()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--raw', action='store_true', help='Show raw detector values')
parser.add_argument('-i', '--interval', type=float, default=0.1, metavar='SECONDS',
help='Update interval')
parser.add_argument('-s', '--smooth', type=int, default=1, metavar='N',
help='Number of spectrum points to average over')
parser.add_argument('-O', '--oversample', type=int, default=1, metavar='N',
help='Average together successive spectra')
args = parser.parse_args()
m = DynamicPlotter(sample_interval=args.interval, raw=args.raw, smoothing=args.smooth,
oversampling=args.oversample)
m.run()
| #!/usr/bin/env python
""" File: example_liveview.py
Author: Andreas Poehlmann
Last change: 2013/02/27
Liveview example
"""
import oceanoptics
import time
import numpy as np
from gi.repository import Gtk, GLib
class mpl:
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
class DynamicPlotter(Gtk.Window):
def __init__(self, sample_interval=0.1, smoothing=1, oversampling=1, raw=False, size=(600,350)):
# Gtk stuff
Gtk.Window.__init__(self, title='Ocean Optics Spectrometer')
self.connect("destroy", lambda x : Gtk.main_quit())
self.set_default_size(*size)
# Data stuff
self.sample_interval = int(sample_interval*1000)
self.smoothing = int(smoothing)
self._sample_n = 0
self.raw = bool(raw)
self.spectrometer = oceanoptics.get_a_random_spectrometer()
self.wl = self.spectrometer.wavelengths()
self.sp = self.spectrometer.intensities()
self.sp = np.zeros((len(self.sp), int(oversampling)))
# MPL stuff
self.figure = mpl.Figure()
self.ax = self.figure.add_subplot(1, 1, 1)
self.ax.grid(True)
self.canvas = mpl.FigureCanvas(self.figure)
self.line, = self.ax.plot(self.wl, self.sp[:,0])
# Gtk stuff
self.add(self.canvas)
self.canvas.show()
self.show_all()
def update_plot(self):
# -> redraw on new spectrum
# -> average over self.sample_n spectra
# -> smooth if self.smoothing
# remark:
# > smoothing can be done after averaging
# get spectrum
sp = np.array(self.spectrometer.intensities(raw=self.raw))
self.sp[:,self._sample_n] = sp
self._sample_n += 1
self._sample_n %= self.sp.shape[1]
if self._sample_n != 0: # do not draw or average
return
# average!
sp = np.mean(self.sp, axis=1)
if self.smoothing > 1:
n = self.smoothing
kernel = np.ones((n,)) / n
sp = np.convolve(sp, kernel)[(n-1):]
self.line.set_ydata(sp)
self.ax.relim()
self.ax.autoscale_view(False, False, True)
self.canvas.draw()
return True
def run(self):
GLib.timeout_add(self.sample_interval, self.update_plot)
Gtk.main()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--raw', action='store_true', help='Show raw detector values')
parser.add_argument('-i', '--interval', type=float, default=0.1, metavar='SECONDS',
help='Update interval')
parser.add_argument('-s', '--smooth', type=int, default=1, metavar='N',
help='Number of spectrum points to average over')
parser.add_argument('-O', '--oversample', type=int, default=1, metavar='N',
help='Average together successive spectra')
args = parser.parse_args()
m = DynamicPlotter(sample_interval=args.interval, raw=args.raw, smoothing=args.smooth,
oversampling=args.oversample)
m.run()
| Python | 0 |
4d5edd17d7382108b90d3f60f2f11317da228603 | Add kafka start/stop script | script/kafkaServer.py | script/kafkaServer.py | #!/bin/python
from __future__ import print_function
import subprocess
import sys
import json
from util import appendline, get_ip_address
if __name__ == "__main__":
# start server one by one
if len(sys.argv) < 2 or sys.argv[1] not in ['start', 'stop']:
sys.stderr.write("Usage: python %s start or stop\n" % (sys.argv[0]))
sys.exit(1)
else:
config = json.load(open('cluster-config.json'))
if sys.argv[1] == 'start':
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-start.sh'])
else:
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-stop.sh'])
| #!/bin/python
from __future__ import print_function
import subprocess
import sys
import json
from util import appendline, get_ip_address
if __name__ == "__main__":
# start server one by one
if len(sys.argv) < 2 or sys.argv[1] not in ['start', 'stop']:
sys.stderr.write("Usage: python %s start or stop\n" % (sys.argv[0]))
sys.exit(1)
else:
config = json.load(open('cluster-config.json'))
if sys.argv[1] == 'start':
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-start.sh ')
else:
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-stop.sh ')
| Python | 0.000001 |
ebfaf30fca157e83ea9e4bf33173221fc9525caf | Fix emplorrs demo salary db error | demo/examples/employees/forms.py | demo/examples/employees/forms.py | from django import forms
from .models import Employee, DeptManager, Title, Salary
class ChangeManagerForm(forms.Form):
manager = forms.ModelChoiceField(queryset=Employee.objects.all()[:100])
def __init__(self, *args, **kwargs):
self.department = kwargs.pop('department')
super(ChangeManagerForm, self).__init__(*args, **kwargs)
def save(self):
new_manager = self.cleaned_data['manager']
DeptManager.objects.filter(
department=self.department
).set(
department=self.department,
employee=new_manager
)
class ChangeTitleForm(forms.Form):
position = forms.CharField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeTitleForm, self).__init__(*args, **kwargs)
def save(self):
new_title = self.cleaned_data['position']
Title.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
title=new_title
)
class ChangeSalaryForm(forms.Form):
salary = forms.IntegerField(max_value=1000000)
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeSalaryForm, self).__init__(*args, **kwargs)
def save(self):
new_salary = self.cleaned_data['salary']
Salary.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
salary=new_salary,
)
| from datetime import date
from django import forms
from django.utils import timezone
from .models import Employee, DeptManager, Title, Salary
class ChangeManagerForm(forms.Form):
manager = forms.ModelChoiceField(queryset=Employee.objects.all()[:100])
def __init__(self, *args, **kwargs):
self.department = kwargs.pop('department')
super(ChangeManagerForm, self).__init__(*args, **kwargs)
def save(self):
new_manager = self.cleaned_data['manager']
DeptManager.objects.filter(
department=self.department
).set(
department=self.department,
employee=new_manager
)
class ChangeTitleForm(forms.Form):
position = forms.CharField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeTitleForm, self).__init__(*args, **kwargs)
def save(self):
new_title = self.cleaned_data['position']
Title.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
title=new_title
)
class ChangeSalaryForm(forms.Form):
salary = forms.IntegerField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeSalaryForm, self).__init__(*args, **kwargs)
def save(self):
new_salary = self.cleaned_data['salary']
Salary.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
salary=new_salary,
)
| Python | 0.000001 |
d82d43a32d770498e802b44089637e774c331c13 | test for post and terminals | busineme/core/tests/test_views.py | busineme/core/tests/test_views.py | from django.test import TestCase
from django.test import Client
from ..models import Busline
from ..models import Terminal
from ..models import Post
from authentication.models import BusinemeUser
STATUS_OK = 200
STATUS_NOT_FOUND = 404
GENERIC_NOT_FOUND_ID = 99999999
class TestSearchResultView(TestCase):
def setUp(self):
self.client = Client()
self.busline = Busline()
self.busline.line_number = '001'
self.busline.description = 'route'
self.busline.route_size = 0.1
self.busline.fee = 3.50
self.terminal = Terminal(description="terminal")
self.terminal.save()
self.busline.save()
self.busline.terminals.add(self.terminal)
def test_get(self):
response = self.client.get("/buslines/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_busline(self):
bus = Busline.objects.get(description="route")
response = self.client.get(
"/buslines/" + str(bus.id) + "/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_busline_not_found(self):
response = self.client.get(
'/buslines/' + str(GENERIC_NOT_FOUND_ID) + "/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
class TestTerminalSearchResultView(TestCase):
def setUp(self):
self.terminal = Terminal()
self.terminal.description = "Terminal Description Test String"
self.terminal.addres = "Terminal Adress Test String "
self.terminal.save()
def test_get(self):
response = self.client.get("/terminals/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_terminal(self):
terminal = self.terminal.id
response = self.client.get("/terminals/%s/" % str(terminal))
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_terminal_null(self):
response = self.client.get('\
/terminals/%s/' % (str(GENERIC_NOT_FOUND_ID)))
code = response.status_code
self.assertEquals(code, STATUS_NOT_FOUND)
class TestPostView(TestCase):
def setUp(self):
self.post = Post()
self.busline = Busline()
self.busline.line_number = "001"
self.busline.route_size = 0.1
self.busline.fee = 3.50
self.busline.save()
self.user = BusinemeUser()
self.user.username = "TestUser"
self.user.save()
self.post.busline = self.busline
self.post.traffic = 1
self.post.capacity = 1
self.post.user = self.user
self.post.save()
def test_get(self):
response = self.client.get("/posts/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_post(self):
post_id = self.post.id
response = self.client.get("/posts/%s/" % str(post_id))
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_terminal_null(self):
response = self.client.get('\
/posts/%s/' % (str(GENERIC_NOT_FOUND_ID)))
code = response.status_code
self.assertEquals(code, STATUS_NOT_FOUND)
| from django.test import TestCase
from django.test import Client
from ..models import Busline
from ..models import Terminal
STATUS_OK = 200
STATUS_NOT_FOUND = 404
BUSLINE_NOT_FOUND_ID = 99999999
class TestSearchResultView(TestCase):
def setUp(self):
self.client = Client()
self.busline = Busline()
self.busline.line_number = '001'
self.busline.description = 'route'
self.busline.route_size = 0.1
self.busline.fee = 3.50
self.terminal = Terminal(description="terminal")
self.terminal.save()
self.busline.save()
self.busline.terminals.add(self.terminal)
def test_get(self):
response = self.client.get("/buslines/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_busline(self):
bus = Busline.objects.get(description="route")
response = self.client.get(
"/buslines/" + str(bus.id) + "/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_busline_not_found(self):
response = self.client.get(
'/buslines/' + str(BUSLINE_NOT_FOUND_ID) + "/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
class TestTerminalSearchResultView(TestCase):
def setUp(self):
self.terminal = Terminal()
self.terminal.description = "Terminal Description Test String"
self.terminal.addres = "Terminal Adress Test String "
self.terminal.save()
def test_get(self):
response = self.client.get("/terminals/")
code = response.status_code
self.assertEquals(code, STATUS_OK)
def test_get_terminal(self):
terminal = self.terminal.id
response = self.client.get("/terminals/%s/" % str(terminal))
code = response.status_code
self.assertEquals(code, STATUS_OK)
| Python | 0 |
dd7a857c98975eac7930747e0aee34ebcb9f3178 | Update Evaluation.py | src/LiviaNet/Modules/General/Evaluation.py | src/LiviaNet/Modules/General/Evaluation.py | """
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVA Department, ETS, Montreal.
"""
import pdb
import numpy as np
# ----- Dice Score -----
def computeDice(autoSeg, groundTruth):
""" Returns
-------
DiceArray : floats array
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0 """
n_classes = int( np.max(groundTruth) + 1)
DiceArray = []
for c_i in xrange(1,n_classes):
idx_Auto = np.where(autoSeg.flatten() == c_i)[0]
idx_GT = np.where(groundTruth.flatten() == c_i)[0]
autoArray = np.zeros(autoSeg.size,dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size,dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray, empty_score=1.0)
#dice = np.sum(autoSeg[groundTruth==c_i])*2.0 / (np.sum(autoSeg) + np.sum(groundTruth))
DiceArray.append(dsc)
return DiceArray
def dice(im1, im2):
"""
Computes the Dice coefficient
----------
im1 : boolean array
im2 : boolean array
If they are not boolean, they will be converted.
-------
It returns the Dice coefficient as a float on the range [0,1].
1: Perfect overlapping
0: Not overlapping
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.size != im2.size:
raise ValueError("Size mismatch between input arrays!!!")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return 1.0
# Compute Dice
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
| """
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVA Department, ETS, Montreal.
"""
import pdb
import numpy as np
# ----- Dice Score -----
def computeDice(autoSeg, groundTruth):
""" Returns
-------
DiceArray : floats array
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0 """
n_classes = int( np.max(groundTruth) + 1)
DiceArray = []
for c_i in xrange(1,n_classes):
idx_Auto = np.where(autoSeg.flatten() == c_i)[0]
idx_GT = np.where(groundTruth.flatten() == c_i)[0]
autoArray = np.zeros(autoSeg.size,dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size,dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray, empty_score=1.0)
#dice = np.sum(autoSeg[groundTruth==c_i])*2.0 / (np.sum(autoSeg) + np.sum(groundTruth))
DiceArray.append(dsc)
return DiceArray
def dice(im1, im2):
"""
Computes the Dice coefficient
----------
im1 : boolean array
im2 : boolean array
If they are not boolean, they will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Both are empty (sum eq to zero) = empty_score
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.size != im2.size:
raise ValueError("Size mismatch between input arrays!!!")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return 1.0
# Compute Dice
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
| Python | 0 |
06f78c21e6b7e3327244e89e90365169f4c32ea1 | Fix style issues raised by pep8. | calaccess_campaign_browser/api.py | calaccess_campaign_browser/api.py | from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filer_id_raw': ALL}
excludes = ['id']
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filing_id_raw': ALL}
excludes = ['id']
| from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = { 'filer_id_raw': ALL }
excludes = [ 'id' ]
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = { 'filing_id_raw': ALL }
excludes = [ 'id' ]
| Python | 0 |
a473b2cb9af95c1296ecae4d2138142f2be397ee | Add variant extension in example script | examples/variants.py | examples/variants.py | #!/usr/bin/env python
# -*- coding: utf8 - *-
from __future__ import print_function, unicode_literals
from cihai.bootstrap import bootstrap_unihan
from cihai.core import Cihai
def variant_list(unihan, field):
for char in unihan.with_fields(field):
print("Character: {}".format(char.char))
for var in char.untagged_vars(field):
print(var)
def script(unihan_options={}):
"""Wrapped so we can test in tests/test_examples.py"""
print("This example prints variant character data.")
c = Cihai()
c.add_dataset('cihai.unihan.Unihan', namespace='unihan')
if not c.sql.is_bootstrapped: # download and install Unihan to db
bootstrap_unihan(c.sql.metadata, options=unihan_options)
c.sql.reflect_db() # automap new table created during bootstrap
c.unihan.add_extension('cihai.unihan.UnihanVariants', namespace='variants')
print("## ZVariants")
variant_list(c.unihan, "kZVariant")
print("## kSemanticVariant")
variant_list(c.unihan, "kSemanticVariant")
print("## kSpecializedSemanticVariant")
variant_list(c.unihan, "kSpecializedSemanticVariant")
if __name__ == '__main__':
script()
| #!/usr/bin/env python
# -*- coding: utf8 - *-
from __future__ import print_function, unicode_literals
from cihai.bootstrap import bootstrap_unihan
from cihai.core import Cihai
def variant_list(unihan, field):
for char in unihan.with_fields(field):
print("Character: {}".format(char.char))
for var in char.untagged_vars(field):
print(var)
def script(unihan_options={}):
"""Wrapped so we can test in tests/test_examples.py"""
print("This example prints variant character data.")
c = Cihai()
c.add_dataset('cihai.unihan.Unihan', namespace='unihan')
if not c.sql.is_bootstrapped: # download and install Unihan to db
bootstrap_unihan(c.sql.metadata, options=unihan_options)
c.sql.reflect_db() # automap new table created during bootstrap
print("## ZVariants")
variant_list(c.unihan, "kZVariant")
print("## kSemanticVariant")
variant_list(c.unihan, "kSemanticVariant")
print("## kSpecializedSemanticVariant")
variant_list(c.unihan, "kSpecializedSemanticVariant")
if __name__ == '__main__':
script()
| Python | 0 |
eeebe264c4d873369f3d24b2e7b676e004eb6671 | Fix path bug in update_source. | neuroimaging/externals/pynifti/utils/update_source.py | neuroimaging/externals/pynifti/utils/update_source.py | #!/usr/bin/env python
"""Copy source files from pynifti git directory into nipy source directory.
We only want to copy the files necessary to build pynifti and the nifticlibs,
and use them within nipy. We will not copy docs, tests, etc...
Pynifti should be build before this script is run so swig generates the
wrapper for nifticlib. We do not want swig as a dependency for nipy.
"""
from os import mkdir
from os.path import join, exists, expanduser
from shutil import copy2 as copy
"""
The pynifti source should be in a directory level with nipy-trunk
Ex:
/Users/cburns/src/nipy
/Users/cburns/src/pynifti
"""
src_dir = expanduser('~/src/pynifti')
# Destination directory is the top-level externals/pynifti directory
dst_dir = '..'
assert exists(src_dir)
copy(join(src_dir, 'AUTHOR'), join(dst_dir, 'AUTHOR'))
copy(join(src_dir, 'COPYING'), join(dst_dir, 'COPYING'))
# pynifti source and swig wrappers
nifti_list = ['niftiformat.py', 'niftiimage.py', 'utils.py',
'nifticlib.py', 'nifticlib_wrap.c']
nifti_src = join(src_dir, 'nifti')
nifti_dst = join(dst_dir, 'nifti')
if not exists(nifti_dst):
mkdir(nifti_dst)
def copynifti(filename):
copy(join(nifti_src, filename), join(nifti_dst, filename))
for nf in nifti_list:
copynifti(nf)
# nifticlib sources
nifticlib_list = ['LICENSE', 'README', 'nifti1.h', 'nifti1_io.c',
'nifti1_io.h', 'znzlib.c', 'znzlib.h']
nifticlib_src = join(src_dir, '3rd', 'nifticlibs')
nifticlib_dst = join(nifti_dst, 'nifticlibs')
if not exists(nifticlib_dst):
mkdir(nifticlib_dst)
def copynifticlib(filename):
copy(join(nifticlib_src, filename), join(nifticlib_dst, filename))
for nf in nifticlib_list:
copynifticlib(nf)
| #!/usr/bin/env python
"""Copy source files from pynifti git directory into nipy source directory.
We only want to copy the files necessary to build pynifti and the nifticlibs,
and use them within nipy. We will not copy docs, tests, etc...
Pynifti should be build before this script is run so swig generates the
wrapper for nifticlib. We do not want swig as a dependency for nipy.
"""
from os import mkdir
from os.path import join, exists
from shutil import copy2 as copy
"""
The pynifti source should be in a directory level with nipy-trunk
Ex:
/Users/cburns/src/nipy
/Users/cburns/src/pynifti
"""
src_dir = '../../../../../pynifti'
# Destination directory is the top-level externals/pynifti directory
dst_dir = '..'
assert exists(src_dir)
copy(join(src_dir, 'AUTHOR'), join(dst_dir, 'AUTHOR'))
copy(join(src_dir, 'COPYING'), join(dst_dir, 'COPYING'))
# pynifti source and swig wrappers
nifti_list = ['niftiformat.py', 'niftiimage.py', 'utils.py',
'nifticlib.py', 'nifticlib_wrap.c']
nifti_src = join(src_dir, 'nifti')
nifti_dst = join(dst_dir, 'nifti')
if not exists(nifti_dst):
mkdir(nifti_dst)
def copynifti(filename):
copy(join(nifti_src, filename), join(nifti_dst, filename))
for nf in nifti_list:
copynifti(nf)
# nifticlib sources
nifticlib_list = ['LICENSE', 'README', 'nifti1.h', 'nifti1_io.c',
'nifti1_io.h', 'znzlib.c', 'znzlib.h']
nifticlib_src = join(src_dir, '3rd', 'nifticlibs')
nifticlib_dst = join(nifti_dst, 'nifticlibs')
if not exists(nifticlib_dst):
mkdir(nifticlib_dst)
def copynifticlib(filename):
copy(join(nifticlib_src, filename), join(nifticlib_dst, filename))
for nf in nifticlib_list:
copynifticlib(nf)
| Python | 0 |
ccb6728111a3142830bd4b3fccb8a956002013f0 | Update example to remove upload, not relevant for plotly! | examples/plotly_datalogger.py | examples/plotly_datalogger.py | from pymoku import Moku, MokuException
from pymoku.instruments import *
import pymoku.plotly_support as pmp
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.INFO)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
linespec = {
'shape' : 'spline',
'width' : '2'
}
try:
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
if i.datalogger_busy():
i.datalogger_stop()
pmp.stream_init(m, 'benizl.anu', 'na8qic5nqw', 'kdi5h54dhl', 'v7qd9o6bcq', line=linespec)
i.datalogger_start(start=10, duration=600, filetype='plot')
print "Plotly URL is: %s" % pmp.stream_url(m)
while True:
time.sleep(1)
trems, treme = i.datalogger_remaining()
samples = i.datalogger_samples()
print "Captured (%d samples); %d seconds from start, %d from end" % (samples, trems, treme)
# TODO: Symbolic constants
if i.datalogger_completed():
break
e = i.datalogger_error()
if e:
print "Error occured: %s" % e
except Exception:
traceback.print_exc()
finally:
i.datalogger_stop()
m.close()
| from pymoku import Moku, MokuException
from pymoku.instruments import *
import pymoku.plotly_support as pmp
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
linespec = {
'shape' : 'spline',
'width' : '2'
}
try:
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
if i.datalogger_busy():
i.datalogger_stop()
pmp.stream_init(m, 'benizl.anu', 'na8qic5nqw', 'kdi5h54dhl', 'v7qd9o6bcq', line=linespec)
i.datalogger_start(start=0, duration=60*10, filetype='plot')
print "Plotly URL is: %s" % pmp.stream_url(m)
while True:
time.sleep(1)
trems, treme = i.datalogger_remaining()
samples = i.datalogger_samples()
print "Captured (%d samples); %d seconds from start, %d from end" % (samples, trems, treme)
# TODO: Symbolic constants
if i.datalogger_completed():
break
e = i.datalogger_error()
if e:
print "Error occured: %s" % e
i.datalogger_stop()
i.datalogger_upload()
except Exception as e:
print e
finally:
i.datalogger_stop()
m.close()
| Python | 0 |
6f9cd84e454ee101dab23b74be345060fa4633e1 | rewrote the person.py | examples/postgresql/person.py | examples/postgresql/person.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import PostgreSQL
class Person(PostgreSQL):
# You need the name of table, of course.
table = 'person'
# Squash some columns?
squash_all = True
# The above is equals to listing all of the columns:
#squashed = set(['person_id', 'name'])
# `arrange` uses `arrange_by,
# and `arrange` is useful for selecting multi-model
arrange_by = ('person_id', )
# It decides the columns which will be use to prepare the conditions.
ident_by = arrange_by
# the other clauses you want to put in queries
clauses = dict(order_by=arrange_by)
if __name__ == '__main__':
# if you want to see the SQLs it generates
#Person.dump_sql = True
print '# The Model of Mosky'
print
mosky = Person.select({'person_id': 'mosky'})
print mosky
print
print '# Access the Model, and Re-Select'
print
print mosky.person_id
print mosky['name']
print
print '# Rename Mosky, and Re-Select'
print
mosky.name = 'Yiyu Lui'
# The previous one has some typo.
mosky.name = 'Yiyu Liu'
# The two changes will be merge into only an update.
mosky.save()
# Re-selecting is not necessary. I just wanna show you the db is really
# changed.
# `where` is a shortcut of `select`
print Person.where(person_id='mosky')
print
print '# Rename Her Back'
print
mosky['name'] = 'Mosky Liu'
mosky.save()
print Person.where(person_id='mosky')
print
print '# Arrange Rows into Models'
print
for person in Person.arrange({'person_id': ('mosky', 'andy')}):
print person
# or use ``Person.find(person_id=('mosky', 'andy'))`` in for-loop
print
print '# Insert a New Person'
print
from psycopg2 import IntegrityError
from mosql.util import star
try:
new_person = Person.insert({'person_id': 'new'}, returning=star)
except IntegrityError:
print '(skip it, because this person is existent.)'
else:
print new_person
print
print '# Delete the New Person'
print
new_person = Person.delete({'person_id': 'new'}, returning=star)
print new_person
print
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import PostgreSQL
class Person(PostgreSQL):
table = 'person'
arrange_by = ('person_id', )
squashed = set(['person_id', 'name'])
ident_by = arrange_by
if __name__ == '__main__':
print '# select all'
person = Person.select()
print 'squashed:', person
print 'actually:', person.cols
print
print '# select with a condition'
person = Person.select({'person_id': 'mosky'})
print 'squashed:', person
print 'actually:', person.cols
print
print '# arrange entire table'
for person in Person.arrange():
print person
print
print '# arrange with a condition'
for person in Person.arrange({'person_id': ('mosky', 'andy')}):
print person
print
print '# rename mosky'
# where is almost same as select
mosky = Person.where(person_id='mosky')
# model expands the change for columns squashed
mosky.name = '<ttypo>'
mosky['name'] = '<renamed>'
# model will merged the updates when save
Person.dump_sql = True
mosky.save()
Person.dump_sql = False
mosky = Person.select({'person_id': 'mosky'})
print mosky.name
print
from mosql.util import star
print '# rename mosky back'
mosky = Person.update({'person_id': 'mosky'}, set={'name': 'Mosky Liu'}, returning=star)
print mosky
print
import mosql.json as json
print '# json'
print json.dumps(mosky)
print
print '# mem test'
def gen_rows():
yield ['andy', 'Andy First']
print 'mock cursor: yielded the first row'
yield ['bob', 'Bob Second']
print 'mock cursor: yielded the second row'
yield ['cindy', 'Cindy Third']
print 'mock cursor: yielded the thrid row'
ps = Person.arrange_rows(['person_id', 'name'], gen_rows())
print next(ps)
| Python | 0.999986 |
09bd40bc8d29fab157630d6411aa8316148a10d6 | Fix indentation bug | src/backend.py | src/backend.py | import os
import logging
import imp
import translation
#from mpi4py import MPI
class Backend(object):
def __init__(self, config_file):
if(config_file is None):
# Try to load an example configuration file
config_file = os.path.abspath(os.path.dirname(__file__)+
"/../examples/cxitut13/conf.py")
logging.warning("No configuration file given! "
"Loading example configuration from %s" % (config_file))
self.backend_conf = imp.load_source('backend_conf', config_file)
self.translator = translation.init_translator(self.backend_conf.state)
print 'Starting backend...'
def mpi_init(self):
comm = MPI.COMM_WORLD
self.rank = comm.Get_rank()
print "MPI rank %d inited" % rank
def start(self):
self.backend_conf.state['_running'] = True
while(self.backend_conf.state['_running']):
evt = self.translator.nextEvent()
self.backend_conf.onEvent(evt)
| import os
import logging
import imp
import translation
#from mpi4py import MPI
class Backend(object):
def __init__(self, config_file):
if(config_file is None):
# Try to load an example configuration file
config_file = os.path.abspath(os.path.dirname(__file__)+
"/../examples/cxitut13/conf.py")
logging.warning("No configuration file given! "
"Loading example configuration from %s" % (config_file))
self.backend_conf = imp.load_source('backend_conf', config_file)
self.translator = translation.init_translator(self.backend_conf.state)
print 'Starting backend...'
def mpi_init(self):
comm = MPI.COMM_WORLD
self.rank = comm.Get_rank()
print "MPI rank %d inited" % rank
def start(self):
self.backend_conf.state['_running'] = True
while(self.backend_conf.state['_running']):
evt = self.translator.nextEvent()
self.backend_conf.onEvent(evt)
| Python | 0.000019 |
a333ca8964132b3f1830c2ceda8cbb805df78999 | Fix locale initialization | product/runtime/src/main/python/java/android/__init__.py | product/runtime/src/main/python/java/android/__init__.py | """Copyright (c) 2018 Chaquo Ltd. All rights reserved."""
from importlib import reload
import os
from os.path import exists, join
import sys
import traceback
from . import stream, importer
def initialize(context, build_json, app_path):
stream.initialize()
importer.initialize(context, build_json, app_path)
initialize_stdlib(context)
def initialize_stdlib(context):
from com.chaquo.python import Common
# These are ordered roughly from low to high level.
initialize_sys(context, Common)
initialize_os(context)
initialize_tempfile(context)
initialize_ssl(context)
initialize_ctypes(context)
initialize_locale(context)
def initialize_sys(context, Common):
if sys.version_info[0] >= 3:
sys.abiflags = Common.PYTHON_SUFFIX[len(Common.PYTHON_VERSION_SHORT):]
# argv defaults to not existing, which may crash some programs.
sys.argv = [""]
# executable defaults to "python" on 2.7, or "" on 3.6. But neither of these values (or
# None, which is mentioned in the documentation) will allow platform.platform() to run
# without crashing.
try:
sys.executable = os.readlink("/proc/{}/exe".format(os.getpid()))
except Exception:
# Can't be certain that /proc will work on all devices, so try to carry on.
traceback.print_exc()
sys.executable = ""
# Remove default paths (#5410).
invalid_paths = [p for p in sys.path
if not (exists(p) or p.startswith(importer.ASSET_PREFIX))]
for p in invalid_paths:
sys.path.remove(p)
def initialize_os(context):
# By default, os.path.expanduser("~") returns "/data", which is an unwritable directory.
# Make it return something more usable.
os.environ.setdefault("HOME", str(context.getFilesDir()))
def initialize_tempfile(context):
tmpdir = join(str(context.getCacheDir()), "chaquopy/tmp")
if not exists(tmpdir):
os.makedirs(tmpdir)
os.environ["TMPDIR"] = tmpdir
def initialize_ssl(context):
# OpenSSL actually does know the location of the system CA store on Android, but
# unfortunately there are multiple incompatible formats of that location, so we can't rely
# on it (https://blog.kylemanna.com/android/android-ca-certificates/).
os.environ["SSL_CERT_FILE"] = join(str(context.getFilesDir()), "chaquopy/cacert.pem")
# hashlib may already have been imported during bootstrap: reload it now that the the
# OpenSSL interface in `_hashlib` is on sys.path.
import hashlib
reload(hashlib)
def initialize_ctypes(context):
import ctypes.util
import sysconfig
# The standard implementation of find_library requires external tools, so will always fail
# on Android. I can't see any easy way of finding the absolute library pathname ourselves
# (there is no LD_LIBRARY_PATH on Android), but we can at least support the case where the
# user passes the return value of find_library to CDLL().
def find_library_override(name):
filename = "lib{}.so".format(name)
try:
ctypes.CDLL(filename)
except OSError:
return None
else:
return filename
ctypes.util.find_library = find_library_override
ctypes.pythonapi = ctypes.PyDLL(sysconfig.get_config_vars()["LDLIBRARY"])
def initialize_locale(context):
import locale
# Of the various encoding functions in test_android.py, this only affects `getlocale`. All
# the others are controlled by the LC_ALL environment variable (set in chaquopy_java.pyx),
# and are not modifiable after Python startup.
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
| """Copyright (c) 2018 Chaquo Ltd. All rights reserved."""
from importlib import reload
import os
from os.path import exists, join
import sys
import traceback
from . import stream, importer
def initialize(context, build_json, app_path):
stream.initialize()
importer.initialize(context, build_json, app_path)
initialize_stdlib(context)
def initialize_stdlib(context):
from com.chaquo.python import Common
initialize_sys(context, Common)
initialize_os(context)
initialize_tempfile(context)
initialize_ssl(context)
initialize_ctypes(context)
def initialize_sys(context, Common):
if sys.version_info[0] >= 3:
sys.abiflags = Common.PYTHON_SUFFIX[len(Common.PYTHON_VERSION_SHORT):]
# argv defaults to not existing, which may crash some programs.
sys.argv = [""]
# executable defaults to "python" on 2.7, or "" on 3.6. But neither of these values (or
# None, which is mentioned in the documentation) will allow platform.platform() to run
# without crashing.
try:
sys.executable = os.readlink("/proc/{}/exe".format(os.getpid()))
except Exception:
# Can't be certain that /proc will work on all devices, so try to carry on.
traceback.print_exc()
sys.executable = ""
# Remove default paths (#5410).
invalid_paths = [p for p in sys.path
if not (exists(p) or p.startswith(importer.ASSET_PREFIX))]
for p in invalid_paths:
sys.path.remove(p)
def initialize_os(context):
# By default, os.path.expanduser("~") returns "/data", which is an unwritable directory.
# Make it return something more usable.
os.environ.setdefault("HOME", str(context.getFilesDir()))
def initialize_tempfile(context):
tmpdir = join(str(context.getCacheDir()), "chaquopy/tmp")
if not exists(tmpdir):
os.makedirs(tmpdir)
os.environ["TMPDIR"] = tmpdir
def initialize_ssl(context):
# OpenSSL actually does know the location of the system CA store on Android, but
# unfortunately there are multiple incompatible formats of that location, so we can't rely
# on it (https://blog.kylemanna.com/android/android-ca-certificates/).
os.environ["SSL_CERT_FILE"] = join(str(context.getFilesDir()), "chaquopy/cacert.pem")
# hashlib may already have been imported during bootstrap: reload it now that the the
# OpenSSL interface in `_hashlib` is on sys.path.
import hashlib
reload(hashlib)
def initialize_ctypes(context):
import ctypes.util
import sysconfig
# The standard implementation of find_library requires external tools, so will always fail
# on Android. I can't see any easy way of finding the absolute library pathname ourselves
# (there is no LD_LIBRARY_PATH on Android), but we can at least support the case where the
# user passes the return value of find_library to CDLL().
def find_library_override(name):
filename = "lib{}.so".format(name)
try:
ctypes.CDLL(filename)
except OSError:
return None
else:
return filename
ctypes.util.find_library = find_library_override
ctypes.pythonapi = ctypes.PyDLL(sysconfig.get_config_vars()["LDLIBRARY"])
| Python | 0.009032 |
2e5f5fc689ee55f32556be69dcbf0672ea7fdbed | change deprecation warning | district42/json_schema/schema.py | district42/json_schema/schema.py | import warnings
from copy import deepcopy
from ..errors import DeclarationError
from .types import (Any, AnyOf, Array, ArrayOf, Boolean, Enum, Null, Number,
Object, OneOf, SchemaType, String, Timestamp, Undefined)
class Schema:
def ref(self, schema):
return deepcopy(schema)
def from_native(self, value):
if value is None:
return self.null
datatype = type(value)
if datatype is bool:
return self.boolean(value)
elif datatype is int:
return self.integer(value)
elif datatype is float:
return self.float(value)
elif datatype is str:
return self.string(value)
elif datatype is list:
return self.array([self.from_native(elem) for elem in value])
elif datatype is dict:
return self.object({k: self.from_native(v) for k, v in value.items()})
elif datatype is tuple:
return self.enum(*value)
raise DeclarationError('Unknown type "{}"'.format(datatype))
@property
def null(self):
return Null()
@property
def boolean(self):
return Boolean()
@property
def number(self):
return Number()
@property
def integer(self):
return Number().integer
@property
def float(self):
return Number().float
@property
def string(self):
return String()
@property
def timestamp(self):
return Timestamp()
@property
def array(self):
return Array()
@property
def array_of(self):
message = 'schema.array_of is deprecated, use schema.array.of instead'
warnings.warn(message, DeprecationWarning, stacklevel=2)
return ArrayOf()
@property
def object(self):
return Object()
@property
def any(self):
return Any()
@property
def any_of(self):
return AnyOf()
@property
def one_of(self):
return OneOf()
@property
def enum(self):
return Enum()
@property
def undefined(self):
return Undefined()
| import warnings
from copy import deepcopy
from ..errors import DeclarationError
from .types import (Any, AnyOf, Array, ArrayOf, Boolean, Enum, Null, Number,
Object, OneOf, SchemaType, String, Timestamp, Undefined)
class Schema:
def ref(self, schema):
return deepcopy(schema)
def from_native(self, value):
if value is None:
return self.null
datatype = type(value)
if datatype is bool:
return self.boolean(value)
elif datatype is int:
return self.integer(value)
elif datatype is float:
return self.float(value)
elif datatype is str:
return self.string(value)
elif datatype is list:
return self.array([self.from_native(elem) for elem in value])
elif datatype is dict:
return self.object({k: self.from_native(v) for k, v in value.items()})
elif datatype is tuple:
return self.enum(*value)
raise DeclarationError('Unknown type "{}"'.format(datatype))
@property
def null(self):
return Null()
@property
def boolean(self):
return Boolean()
@property
def number(self):
return Number()
@property
def integer(self):
return Number().integer
@property
def float(self):
return Number().float
@property
def string(self):
return String()
@property
def timestamp(self):
return Timestamp()
@property
def array(self):
return Array()
@property
def array_of(self):
warnings.warn('deprecated', DeprecationWarning, stacklevel=2)
return ArrayOf()
@property
def object(self):
return Object()
@property
def any(self):
return Any()
@property
def any_of(self):
return AnyOf()
@property
def one_of(self):
return OneOf()
@property
def enum(self):
return Enum()
@property
def undefined(self):
return Undefined()
| Python | 0.000001 |
a0e1183d9da98dd9f79c496b055cab0bb2638532 | Update h_RNN | h_RNN/Mnist.py | h_RNN/Mnist.py | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import time
import numpy as np
import tensorflow as tf
from h_RNN.RNN import RNNWrapper, Generator
from h_RNN.SpRNN import SparseRNN
from Util.Util import DataUtil
class MnistGenerator(Generator):
def __init__(self, im=None, om=None, one_hot=True):
super(MnistGenerator, self).__init__(im, om)
self._x, self._y = DataUtil.get_dataset("mnist", "../_Data/mnist.txt", quantized=True, one_hot=one_hot)
self._x = self._x.reshape(-1, 28, 28)
self._x_train, self._x_test = self._x[:1800], self._x[1800:]
self._y_train, self._y_test = self._y[:1800], self._y[1800:]
def gen(self, batch, test=False, **kwargs):
if batch == 0:
if test:
return self._x_test, self._y_test
return self._x_train, self._y_train
batch = np.random.choice(len(self._x_train), batch)
return self._x_train[batch], self._y_train[batch]
if __name__ == '__main__':
n_history = 3
print("=" * 60, "\n" + "Normal LSTM", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10, squeeze=True)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Sparse LSTM" + "\n" + "-" * 60)
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = SparseRNN()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
| import time
import tflearn
import numpy as np
import tensorflow as tf
from h_RNN.RNN import RNNWrapper, Generator
from h_RNN.SpRNN import SparseRNN
from Util.Util import DataUtil
class MnistGenerator(Generator):
def __init__(self, im=None, om=None, one_hot=True):
super(MnistGenerator, self).__init__(im, om)
self._x, self._y = DataUtil.get_dataset("mnist", "../_Data/mnist.txt", quantized=True, one_hot=one_hot)
self._x = self._x.reshape(-1, 28, 28)
self._x_train, self._x_test = self._x[:1800], self._x[1800:]
self._y_train, self._y_test = self._y[:1800], self._y[1800:]
def gen(self, batch, test=False, **kwargs):
if batch == 0:
if test:
return self._x_test, self._y_test
return self._x_train, self._y_train
batch = np.random.choice(len(self._x_train), batch)
return self._x_train[batch], self._y_train[batch]
if __name__ == '__main__':
n_history = 3
print("=" * 60, "\n" + "Normal LSTM", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10, squeeze=True)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Sparse LSTM" + "\n" + "-" * 60)
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = SparseRNN()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Tflearn", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, 28, 28])
net = tf.concat(tflearn.lstm(net, 128, return_seq=True)[-n_history:], axis=1)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='adam', batch_size=64,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)
print("Time Cost: {}".format(time.time() - t))
| Python | 0.000001 |
b9cc76d410ca034918c615402e3fbe82b226859e | Add public address validation test. | path_and_address/tests/test_validation.py | path_and_address/tests/test_validation.py | from itertools import product
from ..validation import valid_address, valid_hostname, valid_port
def _join(host_and_port):
return '%s:%s' % host_and_port
def _join_all(hostnames, ports):
return map(_join, product(hostnames, ports))
hostnames = [
'0.0.0.0',
'127.0.0.1',
'localhost',
'example.com',
'example.org',
]
invalid_hostnames = [
'http://example.com',
'http://example.com:8080',
'example.com/',
'example.com:8080/',
'example.com:0',
'0.0.0.0:0',
]
ports = [1, 80, 5000, 8080, 65535]
invalid_ports = [None, -80, -1, 0, 65536, 75000,
float('nan'), '', 'nan', 'hello', 'a string']
addresses = hostnames + ports + _join_all(hostnames, ports)
invalid_addresses = invalid_hostnames \
+ _join_all(hostnames, invalid_ports) \
+ _join_all(invalid_hostnames, ports) \
+ _join_all(invalid_hostnames, invalid_ports)
def test_valid_address():
for address in addresses:
assert valid_address(address), 'Invalid address, expected to be valid: ' + repr(address)
for address in invalid_addresses:
assert not valid_address(address), 'Valid address, expected to be invalid: ' + repr(address)
def test_valid_hostname():
for hostname in hostnames:
assert valid_hostname(hostname), 'Invalid hostname, expected to be valid: ' + repr(hostname)
for hostname in invalid_hostnames:
assert not valid_hostname(hostname), 'Valid hostname, expected to be invalid: ' + repr(hostname)
def test_valid_port():
for port in ports:
assert valid_port(port), 'Invalid port, expected to be valid: ' + repr(port)
for port in invalid_ports:
assert not valid_port(port), 'Valid port, expected to be invalid: ' + repr(port)
| from itertools import product
from ..validation import valid_address, valid_hostname, valid_port
def _join(host_and_port):
return '%s:%s' % host_and_port
def _join_all(hostnames, ports):
return map(_join, product(hostnames, ports))
hostnames = [
'127.0.0.1',
'localhost',
'example.com',
'example.org',
]
invalid_hostnames = [
'http://example.com',
'http://example.com:8080',
'example.com/',
'example.com:8080/',
'example.com:0',
'localhost:0',
'127.0.0.1:0',
]
ports = [1, 80, 5000, 8080, 65535]
invalid_ports = [None, -80, -1, 0, 65536, 75000,
float('nan'), '', 'nan', 'hello', 'a string']
addresses = hostnames + ports + _join_all(hostnames, ports)
invalid_addresses = invalid_hostnames \
+ _join_all(hostnames, invalid_ports) \
+ _join_all(invalid_hostnames, ports) \
+ _join_all(invalid_hostnames, invalid_ports)
def test_valid_address():
for address in addresses:
assert valid_address(address), 'Invalid address, expected to be valid: ' + repr(address)
for address in invalid_addresses:
assert not valid_address(address), 'Valid address, expected to be invalid: ' + repr(address)
def test_valid_hostname():
for hostname in hostnames:
assert valid_hostname(hostname), 'Invalid hostname, expected to be valid: ' + repr(hostname)
for hostname in invalid_hostnames:
assert not valid_hostname(hostname), 'Valid hostname, expected to be invalid: ' + repr(hostname)
def test_valid_port():
for port in ports:
assert valid_port(port), 'Invalid port, expected to be valid: ' + repr(port)
for port in invalid_ports:
assert not valid_port(port), 'Valid port, expected to be invalid: ' + repr(port)
| Python | 0 |
fd7454610f4cffcfc8c289539b3824f023fe973f | change cruise input dim | modules/tools/prediction/mlp_train/common/configure.py | modules/tools/prediction/mlp_train/common/configure.py | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
parameters = {
'mlp': {
'train_data_rate': 0.8,
'size_obstacle_feature': 22,
'size_lane_sequence_feature': 40,
'dim_input': 22 + 40,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 1
},
'cruise_mlp': {
'dim_input': 23 + 8 + 180,
'dim_hidden_1': 50,
'dim_hidden_2': 18,
'dim_output': 2
},
'junction_mlp': {
'dim_input': 3 + 60,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 12
},
'feature': {
'threshold_label_time_delta': 1.0,
'prediction_label_timeframe': 3.0,
'maximum_maneuver_finish_time': 6.0,
# Lane change is defined to be finished if the ratio of deviation
# from center-line to the lane width is within this: (must be < 0.5)
'lane_change_finish_condition': 0.1
}
}
labels = {'go_false': 0, 'go_true': 1, 'cutin_false': -1, 'cutin_true': 2}
| #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
parameters = {
'mlp': {
'train_data_rate': 0.8,
'size_obstacle_feature': 22,
'size_lane_sequence_feature': 40,
'dim_input': 22 + 40,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 1
},
'cruise_mlp': {
'dim_input': 23 + 180,
'dim_hidden_1': 50,
'dim_hidden_2': 18,
'dim_output': 2
},
'junction_mlp': {
'dim_input': 3 + 60,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 12
},
'feature': {
'threshold_label_time_delta': 1.0,
'prediction_label_timeframe': 3.0,
'maximum_maneuver_finish_time': 6.0,
# Lane change is defined to be finished if the ratio of deviation
# from center-line to the lane width is within this: (must be < 0.5)
'lane_change_finish_condition': 0.1
}
}
labels = {'go_false': 0, 'go_true': 1, 'cutin_false': -1, 'cutin_true': 2}
| Python | 0.99845 |
8092efdd0bf5f5ca8d5498cf679b019920c00bfd | format with black | plugins/feeds/public/virustotal_apiv3.py | plugins/feeds/public/virustotal_apiv3.py | import logging
import re
import json
from datetime import timedelta, datetime
from core import Feed
from core.config.config import yeti_config
from core.observables import Hash, File
# Variable
VTAPI = yeti_config.get("vt", "key")
headers = {"x-apikey": VTAPI}
limit = 10
params = {"limit": limit}
regex = "[A-Fa-f0-9]{64}" # Find SHA256
class VirusTotalPriv(Feed):
default_values = {
"frequency": timedelta(minutes=5),
"name": "VirusTotalHuntingV3",
"source": "https://www.virustotal.com/api/v3/intelligence/hunting_notifications",
"description": "Feed of hunting for VirusTotal API v3",
}
settings = {
"vt_url_hunting_v3": {
"name": "VT Url Hunting v3",
"description": "Hunting feed for VT API v3",
}
}
def update(self):
if VTAPI:
self.source = (
"https://www.virustotal.com/api/v3/intelligence/hunting_notifications"
)
for index, item in self.update_json(
params=params, headers=headers, key="data"
):
self.analyze(item)
else:
logging.error("Your VT API key is not set in the config file!")
def analyze(self, item):
tags = []
context = {"source": self.name}
# Parse value of interest
subject = item["attributes"]["rule_name"]
date = item["attributes"]["date"]
tags2 = item["attributes"]["tags"]
sha2 = re.search(regex, str(tags2)).group()
date_string = datetime.utcfromtimestamp(date).strftime("%d/%m/%Y %H:%M:%S")
tags2.remove(sha2)
# Update to Yeti DB
f_vt3 = File.get_or_create(value="FILE:{}".format(sha2))
sha256 = Hash.get_or_create(value=sha2)
f_vt3.active_link_to(sha256, "sha256", self.name)
tags.append(tags2)
context["date_added"] = date_string
context["snippet"] = item["attributes"]["snippet"]
# context['source_country'] = item["attributes"]['source_country']
context["raw"] = item
f_vt3.tag(str(tags))
f_vt3.add_context(context)
| import logging
import re
import json
from datetime import timedelta, datetime
from core import Feed
from core.config.config import yeti_config
from core.observables import Hash, File
# Variable
VTAPI = yeti_config.get('vt', 'key')
headers = {"x-apikey": VTAPI}
limit = 10
params = {'limit': limit}
regex = "[A-Fa-f0-9]{64}" # Find SHA256
class VirusTotalPriv(Feed):
default_values = {
"frequency": timedelta(minutes=5),
"name": "VirusTotalHuntingV3",
"source": "https://www.virustotal.com/api/v3/intelligence/hunting_notifications",
"description": "Feed of hunting for VirusTotal API v3",
}
settings = {
'vt_url_hunting_v3': {
'name': 'VT Url Hunting v3',
'description': 'Hunting feed for VT API v3'
}
}
def update(self):
if VTAPI:
self.source = "https://www.virustotal.com/api/v3/intelligence/hunting_notifications"
for index, item in self.update_json(params=params, headers=headers, key="data"):
self.analyze(item)
else:
logging.error("Your VT API key is not set in the config file!")
def analyze(self, item):
tags = []
context = {'source': self.name}
# Parse value of interest
subject = item["attributes"]["rule_name"]
date = item["attributes"]["date"]
tags2 = item["attributes"]["tags"]
sha2 = re.search(regex, str(tags2)).group()
date_string = datetime.utcfromtimestamp(date).strftime('%d/%m/%Y %H:%M:%S')
tags2.remove(sha2)
# Update to Yeti DB
f_vt3 = File.get_or_create(value='FILE:{}'.format(sha2))
sha256 = Hash.get_or_create(value=sha2)
f_vt3.active_link_to(sha256, 'sha256', self.name)
tags.append(tags2)
context['date_added'] = date_string
context['snippet'] = item["attributes"]['snippet']
# context['source_country'] = item["attributes"]['source_country']
context['raw'] = item
f_vt3.tag(str(tags))
f_vt3.add_context(context)
| Python | 0.000001 |
aae5146bd672fdec9a055666c9742acbc1dddd5b | remove obsolete comment | planetstack/core/dashboard/views/shell.py | planetstack/core/dashboard/views/shell.py | import datetime
import os
import sys
import time
import json
from django.http import HttpResponse, HttpResponseServerError, HttpResponseForbidden
from django.views.generic import TemplateView, View
from core.models import *
from django.forms.models import model_to_dict
def ensure_serializable(d):
d2={}
for (k,v) in d.items():
# datetime is not json serializable
if isinstance(v, datetime.datetime):
d2[k] = time.mktime(v.timetuple())
elif v.__class__.__name__ == "Geoposition":
pass
else:
d2[k] = v
return d2
def sliver_to_dict(sliver):
d = model_to_dict(sliver)
d["slice_id"] = sliver.slice.id
d["node_id"] = sliver.node.id
return d
def slice_to_dict(slice):
d = model_to_dict(slice)
d["slivers"] = [sliver_to_dict(x) for x in slice.slivers]
return d
def node_to_dict(node):
d = model_to_dict(node)
d["slivers"] = []
class OpenCloudData:
def __init__(self, user):
self.loadAll()
def loadAll(self):
self.allNodes = list(Node.objects.all())
self.allSlices = list(Slice.objects.all())
self.allSlivers = list(Sliver.objects.all())
self.allSites = list(Site.objects.all())
self.site_id = {}
for site in self.allSites:
d = model_to_dict(site)
d["node_ids"] = []
d["slice_ids"] = []
self.site_id[site.id] = ensure_serializable(d)
self.node_id = {}
for node in self.allNodes:
d = model_to_dict(node)
d["sliver_ids"] = []
self.node_id[node.id] = ensure_serializable(d)
self.site_id[node.site_id]["node_ids"].append(node.id)
self.slice_id = {}
for slice in self.allSlices:
d = model_to_dict(slice)
d["sliver_ids"] = []
self.slice_id[slice.id] = ensure_serializable(d)
self.site_id[slice.site_id]["slice_ids"].append(site.id)
print self.slice_id.keys()
self.sliver_id = {}
for sliver in self.allSlivers:
self.sliver_id[sliver.id] = model_to_dict(sliver)
self.slice_id[sliver.slice_id]["sliver_ids"].append(sliver.id)
self.node_id[sliver.node_id]["sliver_ids"].append(sliver.id)
def get_opencloud_data(self):
return {"slices": self.slice_id.values(),
"slivers": self.sliver_id.values(),
"nodes": self.node_id.values(),
"sites": self.site_id.values()}
class ShellDataView(View):
url = r'^shelldata/'
def get(self, request, **kwargs):
result = OpenCloudData(request.user).get_opencloud_data()
return HttpResponse(json.dumps(result), mimetype='application/json')
| # /opt/planetstack/core/dashboard/views/helloworld.py
import datetime
import os
import sys
import time
import json
from django.http import HttpResponse, HttpResponseServerError, HttpResponseForbidden
from django.views.generic import TemplateView, View
from core.models import *
from django.forms.models import model_to_dict
def ensure_serializable(d):
d2={}
for (k,v) in d.items():
# datetime is not json serializable
if isinstance(v, datetime.datetime):
d2[k] = time.mktime(v.timetuple())
elif v.__class__.__name__ == "Geoposition":
pass
else:
d2[k] = v
return d2
def sliver_to_dict(sliver):
d = model_to_dict(sliver)
d["slice_id"] = sliver.slice.id
d["node_id"] = sliver.node.id
return d
def slice_to_dict(slice):
d = model_to_dict(slice)
d["slivers"] = [sliver_to_dict(x) for x in slice.slivers]
return d
def node_to_dict(node):
d = model_to_dict(node)
d["slivers"] = []
class OpenCloudData:
def __init__(self, user):
self.loadAll()
def loadAll(self):
self.allNodes = list(Node.objects.all())
self.allSlices = list(Slice.objects.all())
self.allSlivers = list(Sliver.objects.all())
self.allSites = list(Site.objects.all())
self.site_id = {}
for site in self.allSites:
d = model_to_dict(site)
d["node_ids"] = []
d["slice_ids"] = []
self.site_id[site.id] = ensure_serializable(d)
self.node_id = {}
for node in self.allNodes:
d = model_to_dict(node)
d["sliver_ids"] = []
self.node_id[node.id] = ensure_serializable(d)
self.site_id[node.site_id]["node_ids"].append(node.id)
self.slice_id = {}
for slice in self.allSlices:
d = model_to_dict(slice)
d["sliver_ids"] = []
self.slice_id[slice.id] = ensure_serializable(d)
self.site_id[slice.site_id]["slice_ids"].append(site.id)
print self.slice_id.keys()
self.sliver_id = {}
for sliver in self.allSlivers:
self.sliver_id[sliver.id] = model_to_dict(sliver)
self.slice_id[sliver.slice_id]["sliver_ids"].append(sliver.id)
self.node_id[sliver.node_id]["sliver_ids"].append(sliver.id)
def get_opencloud_data(self):
return {"slices": self.slice_id.values(),
"slivers": self.sliver_id.values(),
"nodes": self.node_id.values(),
"sites": self.site_id.values()}
class ShellDataView(View):
url = r'^shelldata/'
def get(self, request, **kwargs):
result = OpenCloudData(request.user).get_opencloud_data()
return HttpResponse(json.dumps(result), mimetype='application/json')
| Python | 0 |
d60b460928c55c544b18c57c0eb697ae88fde9e0 | Make masked fill values into nan before further processing to avoid issues with precision leading to different behaviours. (#632) | lib/improver/ensemble_calibration/ensemble_calibration_utilities.py | lib/improver/ensemble_calibration/ensemble_calibration_utilities.py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2018 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This module defines all the utilities used by the "plugins"
specific for ensemble calibration.
"""
import numpy as np
import iris
def convert_cube_data_to_2d(
forecast, coord="realization", transpose=True):
"""
Function to convert data from a N-dimensional cube into a 2d
numpy array. The result can be transposed, if required.
Args:
forecast (iris.cube.Cube):
N-dimensional cube to be reshaped.
coord (string):
The data will be flattened along this coordinate.
transpose (boolean):
If True, the resulting flattened data is transposed.
This will transpose a 2d array of the format [:, coord]
to [coord, :].
If False, the resulting flattened data is not transposed.
This will result in a 2d array of format [:, coord].
Returns:
forecast_data (numpy.array):
Reshaped 2d array.
"""
forecast_data = []
if np.ma.is_masked(forecast.data):
forecast.data = np.ma.filled(forecast.data, np.nan)
for coord_slice in forecast.slices_over(coord):
forecast_data.append(coord_slice.data.flatten())
if transpose:
forecast_data = np.asarray(forecast_data).T
return np.array(forecast_data)
def check_predictor_of_mean_flag(predictor_of_mean_flag):
"""
Check the predictor_of_mean_flag at the start of the
estimate_coefficients_for_ngr method, to avoid having to check
and raise an error later.
Args:
predictor_of_mean_flag (string):
String to specify the input to calculate the calibrated mean.
Currently the ensemble mean ("mean") and the ensemble realizations
("realizations") are supported as the predictors.
"""
if predictor_of_mean_flag.lower() not in ["mean", "realizations"]:
msg = ("The requested value for the predictor_of_mean_flag {}"
"is not an accepted value."
"Accepted values are 'mean' or 'realizations'").format(
predictor_of_mean_flag.lower())
raise ValueError(msg)
| # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2018 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This module defines all the utilities used by the "plugins"
specific for ensemble calibration.
"""
import numpy as np
import iris
def convert_cube_data_to_2d(
forecast, coord="realization", transpose=True):
"""
Function to convert data from a N-dimensional cube into a 2d
numpy array. The result can be transposed, if required.
Args:
forecast (iris.cube.Cube):
N-dimensional cube to be reshaped.
coord (string):
The data will be flattened along this coordinate.
transpose (boolean):
If True, the resulting flattened data is transposed.
This will transpose a 2d array of the format [:, coord]
to [coord, :].
If False, the resulting flattened data is not transposed.
This will result in a 2d array of format [:, coord].
Returns:
forecast_data (numpy.array):
Reshaped 2d array.
"""
forecast_data = []
for coord_slice in forecast.slices_over(coord):
forecast_data.append(coord_slice.data.flatten())
if transpose:
forecast_data = np.asarray(forecast_data).T
return np.array(forecast_data)
def check_predictor_of_mean_flag(predictor_of_mean_flag):
"""
Check the predictor_of_mean_flag at the start of the
estimate_coefficients_for_ngr method, to avoid having to check
and raise an error later.
Args:
predictor_of_mean_flag (string):
String to specify the input to calculate the calibrated mean.
Currently the ensemble mean ("mean") and the ensemble realizations
("realizations") are supported as the predictors.
"""
if predictor_of_mean_flag.lower() not in ["mean", "realizations"]:
msg = ("The requested value for the predictor_of_mean_flag {}"
"is not an accepted value."
"Accepted values are 'mean' or 'realizations'").format(
predictor_of_mean_flag.lower())
raise ValueError(msg)
| Python | 0 |
51a9a02ccf4a133818f14f3ff6e864c1e041ec37 | Update event_chat.py | ark/events/event_chat.py | ark/events/event_chat.py | from ark.chat_commands import ChatCommands
from ark.cli import *
from ark.database import Db
from ark.rcon import Rcon
class EventChat(object):
@classmethod
def output_chat_from_server(cls,text,line):
out(line)
@classmethod
def parse_chat_command(cls,steam_name,player_name,text,line):
ChatCommands.parse(steam_name,player_name,text)
@classmethod
def update_player_name(cls,steam_name,player_name,text,line):
steam_id = Rcon.find_online_steam_id(steam_name)
if steam_id:
Db.update_player(steam_id, steam_name=steam_name, name=player_name)
@classmethod
def store_chat(cls,steam_name,player_name,text,line):
player = Db.find_player(steam_name=player_name)
player_id = player.id if player is not None else None
Db.create_chat_entry(player_id,player_name,text)
@classmethod
def output_chat(cls,steam_name,player_name,text,line):
out(line)
@classmethod
def filter_chat(cls,steam_name,player_name,text,line):
words=text.split()
res=None
for word in words:
if res is None:
res=Db.check_word(word)
if res:
player=Db.find_player(steam_name=steam_name)
steamid=player.steam_id if player is not None else None
if steamid is not None:
"""Rcon.kick_player(steamid)"""
"""msg=Lang.get('chat_filter_player_kicked').format(player_name,res)"""
msg=Lang.get('chat_filter_forbidden_word').format(player_name,res)
Rcon.broadcast(msg, rcon.response_callback_response_only)
| from ark.chat_commands import ChatCommands
from ark.cli import *
from ark.database import Db
from ark.rcon import Rcon
class EventChat(object):
@classmethod
def output_chat_from_server(cls,text,line):
out(line)
@classmethod
def parse_chat_command(cls,steam_name,player_name,text,line):
ChatCommands.parse(steam_name,player_name,text)
@classmethod
def update_player_name(cls,steam_name,player_name,text,line):
steam_id = Rcon.find_online_steam_id(steam_name)
if steam_id:
Db.update_player(steam_id, steam_name=steam_name, name=player_name)
@classmethod
def store_chat(cls,steam_name,player_name,text,line):
player = Db.find_player(steam_name=player_name)
player_id = player.id if player is not None else None
Db.create_chat_entry(player_id,player_name,text)
@classmethod
def output_chat(cls,steam_name,player_name,text,line):
out(line)
| Python | 0.000002 |
e236b7d34cdf156cc16ba8c95b0526785e717898 | update scenario | enquiry/tests/scenario.py | enquiry/tests/scenario.py | from datetime import datetime
from dateutil.relativedelta import relativedelta
from enquiry.tests.model_maker import make_enquiry
def default_scenario_enquiry():
make_enquiry(
'Rick',
'Can I buy some hay?',
'',
'07840 538 357',
)
make_enquiry(
'Ryan',
(
'Can I see some of the fencing you have done?\n'
"I would like to see some of your standard agricultural "
"fencing on a local dairy farm. "
"I like this fencing: http://en.wikipedia.org/wiki/Fencing"
),
'test@pkimber.net',
'01234 567 890',
email_sent=datetime.now() + relativedelta(days=1),
)
| from enquiry.tests.model_maker import make_enquiry
def default_scenario_enquiry():
make_enquiry(
'Rick',
'Can I buy some hay?',
'',
'07840 538 357',
)
make_enquiry(
'Ryan',
(
'Can I see some of the fencing you have done?\n'
"I would like to see some of your standard agricultural "
"fencing on a local dairy farm. "
"I like this fencing: http://en.wikipedia.org/wiki/Fencing"
),
'test@pkimber.net',
'01234 567 890',
)
| Python | 0.000001 |
ecceb10500a395ce2cb79d913ab43187921468be | move fn towards dictionary comprehension | iatidataquality/dqparsetests.py | iatidataquality/dqparsetests.py | import re
import sys
import itertools
from functools import partial
import iatidataquality.models as models
class TestSyntaxError(Exception): pass
comment = re.compile('#')
blank = re.compile('^$')
def ignore_line(line):
return bool(comment.match(line) or blank.match(line))
def test_functions():
mappings = []
def add(regex):
def append_to_mappings(fn):
mappings.append((re.compile(regex),fn))
return fn
return append_to_mappings
def add_partial(regex):
def append_to_mappings(fn):
def partial_fn(groups):
return partial(fn, groups=groups)
mappings.append((re.compile(regex), partial_fn))
return fn
return append_to_mappings
@add('(\S*) is an? (.*)\?')
def is_an(groups):
if groups[1] == 'iso date':
return None
elif groups[1] == 'integer':
def int_check(x):
try:
int(x)
return True
except ValueError:
return False
def is_an_integer(activity):
return reduce(lambda x,y: x and y,
map(lambda x: int_check(x),
activity.xpath(groups[0])),
False)
return is_an_integer
@add_partial('(\S*) has more than (\S*) characters\?')
def text_chars(activity, groups):
return bool(reduce(lambda x,y: x or y,
map(lambda x: len(x)>int(groups[1]),
activity.xpath(groups[0])),
False))
def rm_blank(alist):
return filter(lambda x: x!='', alist)
@add_partial('(\S*) sum to (\S*)\?')
def sum(activity, groups):
return (reduce(lambda x,y: float(x)+float(y),
rm_blank(activity.xpath(groups[0])),
0)
== float(groups[1]))
@add_partial('(\S*) exists (\S*) times?\?')
def exist_times(activity, groups):
return len(rm_blank(activity.xpath(groups[0]))) == int(groups[1])
@add_partial('(\S*) exists more than (\S*) times?\?')
def exist_times(activity, groups):
return len(rm_blank(activity.xpath(groups[0]))) > int(groups[1])
def exist_check(activity, xpath):
return bool(rm_blank(activity.xpath(xpath)))
@add_partial('only one of (\S*) or (\S*) exists\?')
def exist_xor(activity, groups):
return (exist_check(activity, groups[0]) !=
exist_check(activity, groups[1]))
@add_partial('(\S*) or (\S*) exists\?')
def exist_or(activity, groups):
return (exist_check(activity, groups[0]) or
exist_check(activity, groups[1]))
@add_partial('(\S*) exists\?')
def exist(activity, groups):
return exist_check(activity, groups[0])
@add('(.*)')
def fail(line):
return None
def get_active_tests():
for test in models.Test.query.filter(models.Test.active == True).all():
yield test
def get_mappings(ms, line):
for regex, lam in ms:
yield regex.match(line), lam
first_true = lambda tupl: bool(tupl.__getitem__(0))
test_functions = {}
tests = get_active_tests()
tests = itertools.ifilter(lambda test: test.test_level == 1, tests)
tests = itertools.ifilter(lambda test: not ignore_line(test.name), tests)
def function_for_test(test):
line = test.name
test_id = test.id
match_data = get_mappings(mappings, line)
matching_mappings = itertools.ifilter(first_true, match_data)
try:
m, lam = matching_mappings.next()
except StopIteration:
raise TestSyntaxError(line)
f = lam(m.groups())
return test_id, f
for test in tests:
test_id, f = function_for_test(test)
test_functions[test_id] = f
return test_functions
| import re
import sys
import itertools
from functools import partial
import iatidataquality.models as models
class TestSyntaxError(Exception): pass
comment = re.compile('#')
blank = re.compile('^$')
def ignore_line(line):
return bool(comment.match(line) or blank.match(line))
def test_functions():
mappings = []
def add(regex):
def append_to_mappings(fn):
mappings.append((re.compile(regex),fn))
return fn
return append_to_mappings
def add_partial(regex):
def append_to_mappings(fn):
def partial_fn(groups):
return partial(fn, groups=groups)
mappings.append((re.compile(regex), partial_fn))
return fn
return append_to_mappings
@add('(\S*) is an? (.*)\?')
def is_an(groups):
if groups[1] == 'iso date':
return None
elif groups[1] == 'integer':
def int_check(x):
try:
int(x)
return True
except ValueError:
return False
def is_an_integer(activity):
return reduce(lambda x,y: x and y,
map(lambda x: int_check(x),
activity.xpath(groups[0])),
False)
return is_an_integer
@add_partial('(\S*) has more than (\S*) characters\?')
def text_chars(activity, groups):
return bool(reduce(lambda x,y: x or y,
map(lambda x: len(x)>int(groups[1]),
activity.xpath(groups[0])),
False))
def rm_blank(alist):
return filter(lambda x: x!='', alist)
@add_partial('(\S*) sum to (\S*)\?')
def sum(activity, groups):
return (reduce(lambda x,y: float(x)+float(y),
rm_blank(activity.xpath(groups[0])),
0)
== float(groups[1]))
@add_partial('(\S*) exists (\S*) times?\?')
def exist_times(activity, groups):
return len(rm_blank(activity.xpath(groups[0]))) == int(groups[1])
@add_partial('(\S*) exists more than (\S*) times?\?')
def exist_times(activity, groups):
return len(rm_blank(activity.xpath(groups[0]))) > int(groups[1])
def exist_check(activity, xpath):
return bool(rm_blank(activity.xpath(xpath)))
@add_partial('only one of (\S*) or (\S*) exists\?')
def exist_xor(activity, groups):
return (exist_check(activity, groups[0]) !=
exist_check(activity, groups[1]))
@add_partial('(\S*) or (\S*) exists\?')
def exist_or(activity, groups):
return (exist_check(activity, groups[0]) or
exist_check(activity, groups[1]))
@add_partial('(\S*) exists\?')
def exist(activity, groups):
return exist_check(activity, groups[0])
@add('(.*)')
def fail(line):
return None
def get_active_tests():
for test in models.Test.query.filter(models.Test.active == True).all():
yield test
def get_mappings(ms, line):
for regex, lam in ms:
yield regex.match(line), lam
first_true = lambda tupl: bool(tupl.__getitem__(0))
test_functions = {}
tests = get_active_tests()
tests = itertools.ifilter(lambda test: test.test_level == 1, tests)
tests = itertools.ifilter(lambda test: not ignore_line(test.name), tests)
for test in tests:
line = test.name
test_id = test.id
match_data = get_mappings(mappings, line)
matching_mappings = itertools.ifilter(first_true, match_data)
try:
m, lam = matching_mappings.next()
except StopIteration:
raise TestSyntaxError(line)
f = lam(m.groups())
test_functions[test_id] = f
return test_functions
| Python | 0.000018 |
ddcd166b72ef96296a884f63f626c3ffd236059f | make tests pass without LMS settings | common/djangoapps/status/tests.py | common/djangoapps/status/tests.py | from django.conf import settings
from django.test import TestCase
from mock import Mock
import os
from override_settings import override_settings
from tempfile import NamedTemporaryFile
from status import get_site_status_msg
# Get a name where we can put test files
TMP_FILE = NamedTemporaryFile(delete=False)
TMP_NAME = TMP_FILE.name
# Close it--we just want the path.
TMP_FILE.close()
@override_settings(STATUS_MESSAGE_PATH=TMP_NAME)
class TestStatus(TestCase):
"""Test that the get_site_status_msg function does the right thing"""
no_file = None
invalid_json = """{
"global" : "Hello, Globe",
}"""
global_only = """{
"global" : "Hello, Globe"
}"""
toy_only = """{
"edX/toy/2012_Fall" : "A toy story"
}"""
global_and_toy = """{
"global" : "Hello, Globe",
"edX/toy/2012_Fall" : "A toy story"
}"""
# json to use, expected results for course=None (e.g. homepage),
# for toy course, for full course. Note that get_site_status_msg
# is supposed to return global message even if course=None. The
# template just happens to not display it outside the courseware
# at the moment...
checks = [
(no_file, None, None, None),
(invalid_json, None, None, None),
(global_only, "Hello, Globe", "Hello, Globe", "Hello, Globe"),
(toy_only, None, "A toy story", None),
(global_and_toy, "Hello, Globe", "Hello, Globe<br>A toy story", "Hello, Globe"),
]
def setUp(self):
"""
Mock courses, since we don't have to have full django
settings (common tests run without the lms settings imported)
"""
self.full = Mock()
self.full.id = 'edX/full/2012_Fall'
self.toy = Mock()
self.toy.id = 'edX/toy/2012_Fall'
def create_status_file(self, contents):
"""
Write contents to settings.STATUS_MESSAGE_PATH.
"""
with open(settings.STATUS_MESSAGE_PATH, 'w') as f:
f.write(contents)
def remove_status_file(self):
"""Delete the status file if it exists"""
if os.path.exists(settings.STATUS_MESSAGE_PATH):
os.remove(settings.STATUS_MESSAGE_PATH)
def tearDown(self):
self.remove_status_file()
def test_get_site_status_msg(self):
"""run the tests"""
for (json_str, exp_none, exp_toy, exp_full) in self.checks:
self.remove_status_file()
if json_str:
self.create_status_file(json_str)
print "checking results for {0}".format(json_str)
print "course=None:"
self.assertEqual(get_site_status_msg(None), exp_none)
print "course=toy:"
self.assertEqual(get_site_status_msg(self.toy), exp_toy)
print "course=full:"
self.assertEqual(get_site_status_msg(self.full), exp_full)
| from django.conf import settings
from django.test import TestCase
from tempfile import NamedTemporaryFile
import os
from override_settings import override_settings
from status import get_site_status_msg
import xmodule.modulestore.django
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import Location
from xmodule.modulestore.xml_importer import import_from_xml
class TestStatus(TestCase):
"""Test that the get_site_status_msg function does the right thing"""
no_file = None
invalid_json = """{
"global" : "Hello, Globe",
}"""
global_only = """{
"global" : "Hello, Globe"
}"""
toy_only = """{
"edX/toy/2012_Fall" : "A toy story"
}"""
global_and_toy = """{
"global" : "Hello, Globe",
"edX/toy/2012_Fall" : "A toy story"
}"""
# json to use, expected results for course=None (e.g. homepage),
# for toy course, for full course. Note that get_site_status_msg
# is supposed to return global message even if course=None. The
# template just happens to not display it outside the courseware
# at the moment...
checks = [
(no_file, None, None, None),
(invalid_json, None, None, None),
(global_only, "Hello, Globe", "Hello, Globe", "Hello, Globe"),
(toy_only, None, "A toy story", None),
(global_and_toy, "Hello, Globe", "Hello, Globe<br>A toy story", "Hello, Globe"),
]
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
courses = modulestore().get_courses()
def find_course(course_id):
"""Assumes the course is present"""
return [c for c in courses if c.id==course_id][0]
self.full = find_course("edX/full/6.002_Spring_2012")
self.toy = find_course("edX/toy/2012_Fall")
def create_status_file(self, contents):
"""
Write contents to settings.STATUS_MESSAGE_PATH.
"""
with open(settings.STATUS_MESSAGE_PATH, 'w') as f:
f.write(contents)
def remove_status_file(self):
"""Delete the status file if it exists"""
if os.path.exists(settings.STATUS_MESSAGE_PATH):
os.remove(settings.STATUS_MESSAGE_PATH)
def tearDown(self):
self.remove_status_file()
def test_get_site_status_msg(self):
"""run the tests"""
for (json_str, exp_none, exp_toy, exp_full) in self.checks:
self.remove_status_file()
if json_str:
self.create_status_file(json_str)
print "checking results for {0}".format(json_str)
print "course=None:"
self.assertEqual(get_site_status_msg(None), exp_none)
print "course=toy:"
self.assertEqual(get_site_status_msg(self.toy), exp_toy)
print "course=full:"
self.assertEqual(get_site_status_msg(self.full), exp_full)
| Python | 0 |
c2f99fe178ff853e87b3f034394b18956d395e87 | Change credits verbose_name to autorship. | ideascube/mediacenter/models.py | ideascube/mediacenter/models.py | from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from ideascube.models import (
LanguageField, SortedTaggableManager, TimeStampedModel)
from ideascube.search.models import SearchableQuerySet, SearchMixin
class DocumentQuerySet(SearchableQuerySet, models.QuerySet):
def image(self):
return self.filter(kind=Document.IMAGE)
def video(self):
return self.filter(kind=Document.VIDEO)
def pdf(self):
return self.filter(kind=Document.PDF)
def text(self):
return self.filter(kind=Document.TEXT)
def audio(self):
return self.filter(kind=Document.AUDIO)
class Document(SearchMixin, TimeStampedModel):
IMAGE = 'image'
VIDEO = 'video'
PDF = 'pdf'
EPUB = 'epub'
TEXT = 'text'
AUDIO = 'audio'
APP = 'app'
OTHER = 'other'
KIND_CHOICES = (
(IMAGE, _('image')),
(AUDIO, _('sound')),
(VIDEO, _('video')),
(PDF, _('pdf')),
(TEXT, _('text')),
(EPUB, _('epub')),
(APP, _('app')),
(OTHER, _('other')),
)
KIND_DICT = dict(KIND_CHOICES)
title = models.CharField(verbose_name=_('title'), max_length=100)
summary = models.TextField(verbose_name=_('summary'))
lang = LanguageField(verbose_name=_('Language'), max_length=10, blank=True)
original = models.FileField(verbose_name=_('original'),
upload_to='mediacenter/document',
max_length=10240)
preview = models.ImageField(verbose_name=_('preview'),
upload_to='mediacenter/preview',
max_length=10240,
blank=True)
credits = models.CharField(verbose_name=_('Authorship'), max_length=300)
kind = models.CharField(verbose_name=_('type'),
max_length=5,
choices=KIND_CHOICES,
default=OTHER)
objects = DocumentQuerySet.as_manager()
tags = TaggableManager(blank=True, manager=SortedTaggableManager)
package_id = models.CharField(verbose_name=_('package'), max_length=100,
blank=True)
class Meta:
ordering = ["-modified_at", ]
def __str__(self):
return self.title
def __repr__(self):
return '<{}: {}>'.format(self.kind, str(self))
def get_absolute_url(self):
return reverse('mediacenter:document_detail', kwargs={'pk': self.pk})
@property
def index_strings(self):
return (self.title, self.summary, self.credits,
u' '.join(self.tags.names()))
@property
def index_lang(self):
return self.lang
@property
def index_kind(self):
return self.kind
@property
def index_source(self):
return self.package_id
@property
def index_tags(self):
return self.tags.slugs()
@property
def slug(self):
return self.get_kind_display()
| from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from ideascube.models import (
LanguageField, SortedTaggableManager, TimeStampedModel)
from ideascube.search.models import SearchableQuerySet, SearchMixin
class DocumentQuerySet(SearchableQuerySet, models.QuerySet):
def image(self):
return self.filter(kind=Document.IMAGE)
def video(self):
return self.filter(kind=Document.VIDEO)
def pdf(self):
return self.filter(kind=Document.PDF)
def text(self):
return self.filter(kind=Document.TEXT)
def audio(self):
return self.filter(kind=Document.AUDIO)
class Document(SearchMixin, TimeStampedModel):
IMAGE = 'image'
VIDEO = 'video'
PDF = 'pdf'
EPUB = 'epub'
TEXT = 'text'
AUDIO = 'audio'
APP = 'app'
OTHER = 'other'
KIND_CHOICES = (
(IMAGE, _('image')),
(AUDIO, _('sound')),
(VIDEO, _('video')),
(PDF, _('pdf')),
(TEXT, _('text')),
(EPUB, _('epub')),
(APP, _('app')),
(OTHER, _('other')),
)
KIND_DICT = dict(KIND_CHOICES)
title = models.CharField(verbose_name=_('title'), max_length=100)
summary = models.TextField(verbose_name=_('summary'))
lang = LanguageField(verbose_name=_('Language'), max_length=10, blank=True)
original = models.FileField(verbose_name=_('original'),
upload_to='mediacenter/document',
max_length=10240)
preview = models.ImageField(verbose_name=_('preview'),
upload_to='mediacenter/preview',
max_length=10240,
blank=True)
credits = models.CharField(verbose_name=_('credit'), max_length=300)
kind = models.CharField(verbose_name=_('type'),
max_length=5,
choices=KIND_CHOICES,
default=OTHER)
objects = DocumentQuerySet.as_manager()
tags = TaggableManager(blank=True, manager=SortedTaggableManager)
package_id = models.CharField(verbose_name=_('package'), max_length=100,
blank=True)
class Meta:
ordering = ["-modified_at", ]
def __str__(self):
return self.title
def __repr__(self):
return '<{}: {}>'.format(self.kind, str(self))
def get_absolute_url(self):
return reverse('mediacenter:document_detail', kwargs={'pk': self.pk})
@property
def index_strings(self):
return (self.title, self.summary, self.credits,
u' '.join(self.tags.names()))
@property
def index_lang(self):
return self.lang
@property
def index_kind(self):
return self.kind
@property
def index_source(self):
return self.package_id
@property
def index_tags(self):
return self.tags.slugs()
@property
def slug(self):
return self.get_kind_display()
| Python | 0 |
f113aaae2232d0041e01a6f12ab2ba083df65d44 | Change submit module to use new interface. | autocms/submit.py | autocms/submit.py | """Functions to submit and register new jobs."""
import os
def submit_and_stamp(counter, testname, scheduler, config):
"""Submit a job to the scheduler and produce a newstamp file.
The full path of the newstamp file is returned."""
result = scheduler.submit_job(counter, testname, config)
stamp_filename = ('stamp.' +
str(result.submit_time) +
str(counter))
stamp_path = os.path.join(config['AUTOCMS_BASEDIR'],
testname,
stamp_filename)
with open(stamp_path, 'w') as stampfile:
stampfile.write(result.stamp())
return stamp_path
def get_job_counter(testname, config):
"""Return an integer for the counter to pass to the next job."""
counter_path = os.path.join(config['AUTOCMS_BASEDIR'],
testname,
'counter')
if os.path.exists(counter_path):
with open(counter_path) as handle:
count = handle.read()
else:
count = 1
return int(count)
def set_job_counter(count, testname, config):
"""Write the job counter to file."""
counter_path = os.path.join(config['AUTOCMS_BASEDIR'],
testname,
'counter')
with open(counter_path, 'w') as handle:
handle.write(str(count))
| """Functions to submit and register new jobs."""
import os
import socket
def submit_and_stamp(counter, testname, scheduler, config):
"""Submit a job to the scheduler and produce a newstamp file.
This function should be run from within the test directory.
If the submission fails an output log will be produced with the
standard output of the submitter.
The name of the newstamp file is returned."""
result = scheduler.submit_job(counter, testname, config)
newstamp = str(result.id) + ' ' + str(timestamp) + ' ' + str(returncode)
if returncode != 0:
logfile_name = (testname + '.submission.' + str(counter) +
'.' + str(timestamp) + '.log')
newstamp += ' ' + logfile_name
log = "Job submission failed at {0}\n".format(timestamp)
log += "On node {0}\n".format(socket.gethostname())
log += "Submission command output:\n\n"
for line in output:
log += line + '\n'
with open(logfile_name, 'w') as logfile:
logfile.write(log)
newstamp += "\n"
newstamp_filename = 'newstamp.' + str(timestamp)
with open(newstamp_filename, 'w') as nsfile:
nsfile.write(newstamp)
return newstamp_filename
def get_job_counter():
"""Return an integer for the counter to pass to the next job.
This should be called from within the test directory."""
if os.path.exists('counter'):
with open('counter') as handle:
count = handle.read()
else:
count = 1
return int(count)
def set_job_counter(count):
"""Write the job counter to file.
This should be called from within the test directory."""
with open('counter', 'w') as handle:
handle.write(str(count))
| Python | 0 |
0973acf04fd2fd59db4880d5ba4d994f4c1733db | Add length detection for PNG images. | identifiers/image_identifier.py | identifiers/image_identifier.py |
import io
from struct import unpack
import sys
from identifier import Result
#############
# Constants #
#############
PNG_CHUNK_IEND = b'IEND'
PNG_CHUNK_IHDR = b'IHDR'
#######################
# Identifier Patterns #
#######################
JPEG_PATTERNS = [
'FF D8 FF E0',
'FF D8 FF E1',
'FF D8 FF FE',
]
GIF_PATTERNS = [
'47 49 46 38 39 61',
'47 49 46 38 37 61',
]
PNG_PATTERNS = [
'89 50 4E 47 0D 0A 1A 0A'
]
BMP_PATTERNS = [
'42 4D 62 25',
'42 4D F8 A9',
'42 4D 76 02',
]
ICO_PATTERNS = [
'00 00 01 00'
]
def read4UB(stream):
return unpack('>I', stream.read(4))[0]
class PngResolver:
def next_chunk(self, stream):
"""
Assumes there is a chunk at the current position in the stream.
Returns the name of the current chunk and its length.
Also advances the stream to the start of the next chunk.
"""
chunk_len = read4UB(stream)
chunk_name = stream.read(4)
stream.seek(chunk_len + 4, io.SEEK_CUR)
return (chunk_name, chunk_len)
def identify(self, stream):
try:
origin = stream.tell()
# Skip to the beginning of the first PNG chunk
stream.seek(origin + 8)
# Check to make sure the first chunk is the IHDR chunk
chunk_name, chunk_len = self.next_chunk(stream)
if chunk_name != PNG_CHUNK_IHDR or chunk_len != 0x0D:
return
# Loop through till we find the final chunk
while chunk_name != PNG_CHUNK_IEND:
chunk_name, chunk_len = self.next_chunk(stream)
# Now calculate the actual file length
end = stream.tell()
length = end - origin
return Result('PNG', 'PNG image file', length=length)
except BaseException as e:
print(e, file=sys.stderr)
# Ignore all errors
pass
class JpegResolver:
def identify(self, stream):
return Result('JPEG', 'JPEG image file')
class GifResolver:
def identify(self, stream):
return Result('GIF', 'GIF image file')
class BmpResolver:
def identify(self, stream):
return Result('BMP', 'BMP image file')
class IcoResolver:
def identity(self, stream):
return Result('ICO', 'Windows icon file')
def load(hound):
# Register JPEGs
hound.add_matches(JPEG_PATTERNS, JpegResolver())
# Register PNGs
hound.add_matches(PNG_PATTERNS, PngResolver())
# Register GIFs
hound.add_matches(GIF_PATTERNS, GifResolver())
# Register BMPs
hound.add_matches(BMP_PATTERNS, BmpResolver())
# Register ICOs
hound.add_matches(ICO_PATTERNS, IcoResolver())
|
# Identifier for basic image files
from identifier import Result
JPEG_PATTERNS = [
'FF D8 FF E0',
'FF D8 FF E1',
'FF D8 FF FE',
]
GIF_PATTERNS = [
'47 49 46 38 39 61',
'47 49 46 38 37 61',
]
PNG_PATTERNS = [
'89 50 4E 47'
]
BMP_PATTERNS = [
'42 4D 62 25',
'42 4D F8 A9',
'42 4D 76 02',
]
ICO_PATTERNS = [
'00 00 01 00'
]
class PngResolver:
def identify(self, stream):
return Result('PNG', 'PNG image file')
class JpegResolver:
def identify(self, stream):
return Result('JPEG', 'JPEG image file')
class GifResolver:
def identify(self, stream):
return Result('GIF', 'GIF image file')
class BmpResolver:
def identify(self, stream):
return Result('BMP', 'BMP image file')
class IcoResolver:
def identity(self, stream):
return Result('ICO', 'Windows icon file')
def load(hound):
# Register JPEGs
hound.add_matches(JPEG_PATTERNS, JpegResolver())
# Register PNGs
hound.add_matches(PNG_PATTERNS, PngResolver())
# Register GIFs
hound.add_matches(GIF_PATTERNS, GifResolver())
# Register BMPs
hound.add_matches(BMP_PATTERNS, BmpResolver())
# Register ICOs
hound.add_matches(ICO_PATTERNS, IcoResolver())
| Python | 0 |
d7c5b8784fd747355884e3371f1c85ede9a9bf6f | Disable some packages for now, so that packaging can finish on the buildbots as they are. This should let wrench run the Mono test suite. | profiles/mono-mac-release-64/packages.py | profiles/mono-mac-release-64/packages.py | import os
from bockbuild.darwinprofile import DarwinProfile
class MonoReleasePackages:
def __init__(self):
# Toolchain
#package order is very important.
#autoconf and automake don't depend on CC
#ccache uses a different CC since it's not installed yet
#every thing after ccache needs a working ccache
self.packages.extend ([
'autoconf.py',
'automake.py',
'ccache.py',
'libtool.py',
'xz.py',
'tar.py',
'gettext.py',
'pkg-config.py'
])
#needed to autogen gtk+
self.packages.extend ([
'gtk-osx-docbook.py',
'gtk-doc.py',
])
# # Base Libraries
self.packages.extend([
'libpng.py', #needed by cairo
'libjpeg.py',
'libtiff.py',
'libgif.py',
'libxml2.py',
'freetype.py',
'fontconfig.py',
'pixman.py', #needed by cairo
'cairo.py', #needed by Mono graphics functions (System.Drawing)
'libffi.py', #needed by glib
'glib.py',
'pango.py',
'atk.py',
'intltool.py',
'gdk-pixbuf.py',
'gtk+.py',
'libglade.py',
'sqlite.py',
'expat.py',
'ige-mac-integration.py'
])
# # Theme
self.packages.extend([
'libcroco.py',
'librsvg.py',
'hicolor-icon-theme.py',
'gtk-engines.py',
'murrine.py',
'xamarin-gtk-theme.py',
'gtk-quartz-engine.py'
])
# Mono
self.packages.extend([
'mono-llvm.py',
'mono-master.py',
#'libgdiplus.py',
#'xsp.py',
#'gtk-sharp-2.12-release.py',
#'boo.py',
# 'nant.py',
#'ironlangs.py',
#'fsharp-3.1.py',
#'mono-addins.py',
#'mono-basic.py',
])
self.packages = [os.path.join('..', '..', 'packages', p) for p in self.packages]
| import os
from bockbuild.darwinprofile import DarwinProfile
class MonoReleasePackages:
def __init__(self):
# Toolchain
#package order is very important.
#autoconf and automake don't depend on CC
#ccache uses a different CC since it's not installed yet
#every thing after ccache needs a working ccache
self.packages.extend ([
'autoconf.py',
'automake.py',
'ccache.py',
'libtool.py',
'xz.py',
'tar.py',
'gettext.py',
'pkg-config.py'
])
#needed to autogen gtk+
self.packages.extend ([
'gtk-osx-docbook.py',
'gtk-doc.py',
])
# # Base Libraries
self.packages.extend([
'libpng.py', #needed by cairo
'libjpeg.py',
'libtiff.py',
'libgif.py',
'libxml2.py',
'freetype.py',
'fontconfig.py',
'pixman.py', #needed by cairo
'cairo.py', #needed by Mono graphics functions (System.Drawing)
'libffi.py', #needed by glib
'glib.py',
'pango.py',
'atk.py',
'intltool.py',
'gdk-pixbuf.py',
'gtk+.py',
'libglade.py',
'sqlite.py',
'expat.py',
'ige-mac-integration.py'
])
# # Theme
self.packages.extend([
'libcroco.py',
'librsvg.py',
'hicolor-icon-theme.py',
'gtk-engines.py',
'murrine.py',
'xamarin-gtk-theme.py',
'gtk-quartz-engine.py'
])
# Mono
self.packages.extend([
'mono-llvm.py',
'mono-master.py',
'libgdiplus.py',
'xsp.py',
'gtk-sharp-2.12-release.py',
'boo.py',
# 'nant.py',
'ironlangs.py',
'fsharp-3.1.py',
'mono-addins.py',
'mono-basic.py',
])
self.packages = [os.path.join('..', '..', 'packages', p) for p in self.packages]
| Python | 0 |
fe0d872c69280b5713a4ad6f0a1cd4a5623fdd75 | Add createnapartcommand contents | cadnano/part/createnapartcommand.py | cadnano/part/createnapartcommand.py | from ast import literal_eval
from cadnano.cnproxy import UndoCommand
from cadnano.part.nucleicacidpart import NucleicAcidPart
class CreateNucleicAcidPartCommand(UndoCommand):
def __init__(self, document, grid_type, use_undostack):
# TODO[NF]: Docstring
super(CreateNucleicAcidPartCommand, self).__init__("Create NA Part")
self.document = document
self.grid_type = grid_type
self.use_undostack = use_undostack
def redo(self):
new_part = NucleicAcidPart(document=self.document, grid_type=self.grid_type)
self.document._addPart(new_part, use_undostack=self.use_undostack)
def undo(self):
self.document.deactivateActivePart()
| Python | 0 | |
72117d55715b80df0a01fa519be09bfeec0bc272 | fix generate empty tag bug | ezblog/blog/views.py | ezblog/blog/views.py | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from .models import Post, Category, Tag
# index
def index(request):
per_page = 2
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.all(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public'), per_page)
try:
contents = pg.page(page)
except PageNotAnInteger:
contents = pg.page(1)
except EmptyPage:
contents = []
ctx = {
'posts': contents,
}
return render(request, 'list.html', ctx)
# posts
def posts(request, pk):
if request.method == 'GET':
return __get_post(request, pk)
elif request.method == 'PUT':
url = reverse('blog:index')
return redirect(url)
elif request.method == 'DELETE':
return __delete_post(request, pk)
else:
raise Http404
def __get_post(request, pk):
post = get_object_or_404(Post, pk=pk)
ctx = {
'post': post,
}
return render(request, 'detail.html', ctx)
def __delete_post(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
response = HttpResponse()
response.status_code = 200
return response
# create_post
def create_post(request):
if request.method == 'POST':
return __create_post(request)
else:
raise Http404
def __create_post(request):
title = request.POST.get('title')
content = request.POST.get('content')
category_pk = request.POST.get('category')
status = request.POST.get('status')
tags = request.POST.get('tags')
if tags:
tags = request.POST.get('tags').split(',')
new_post = Post()
new_post.title = title
new_post.content = content
if category_pk:
new_post.category = Category.objects.get(pk=category_pk)
new_post.status = status
new_post.save()
if tags:
for name in tags:
name = name.strip()
if name:
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
new_post.tags.add(tag)
new_post.save()
url = reverse('blog:posts', kwargs={'pk': new_post.pk})
return redirect(url)
# create_form
def create_form(request):
if request.method == 'GET':
return __create_form(request)
else:
raise Http404
def __create_form(request):
categories = Category.objects.all()
post = Post()
status_choices = post.get_status_choices()
ctx = {
'categories': categories,
'status_choices': status_choices,
}
return render(request, 'edit.html', ctx)
| from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from .models import Post, Category, Tag
# index
def index(request):
per_page = 2
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.all(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public'), per_page)
try:
contents = pg.page(page)
except PageNotAnInteger:
contents = pg.page(1)
except EmptyPage:
contents = []
ctx = {
'posts': contents,
}
return render(request, 'list.html', ctx)
# posts
def posts(request, pk):
if request.method == 'GET':
return __get_post(request, pk)
elif request.method == 'PUT':
url = reverse('blog:index')
return redirect(url)
elif request.method == 'DELETE':
return __delete_post(request, pk)
else:
raise Http404
def __get_post(request, pk):
post = get_object_or_404(Post, pk=pk)
ctx = {
'post': post,
}
return render(request, 'detail.html', ctx)
def __delete_post(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
response = HttpResponse()
response.status_code = 200
return response
# create_post
def create_post(request):
if request.method == 'POST':
return __create_post(request)
else:
raise Http404
def __create_post(request):
title = request.POST.get('title')
content = request.POST.get('content')
category_pk = request.POST.get('category')
status = request.POST.get('status')
tags = request.POST.get('tags').split(',')
new_post = Post()
new_post.title = title
new_post.content = content
if category_pk:
new_post.category = Category.objects.get(pk=category_pk)
new_post.status = status
new_post.save()
if tags:
for name in tags:
name = name.strip()
print(name)
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
new_post.tags.add(tag)
new_post.save()
url = reverse('blog:posts', kwargs={'pk': new_post.pk})
return redirect(url)
# create_form
def create_form(request):
if request.method == 'GET':
return __create_form(request)
else:
raise Http404
def __create_form(request):
categories = Category.objects.all()
post = Post()
status_choices = post.get_status_choices()
ctx = {
'categories': categories,
'status_choices': status_choices,
}
return render(request, 'edit.html', ctx)
| Python | 0.000003 |
899254d3bd064ba8e5653ad9081674b7af1495fa | fix capture=True | fabfile/openstack.py | fabfile/openstack.py | #!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import yaml
from fabric.api import task, local, settings, warn_only
from cuisine import file_exists
@task
def up():
""" Boot instances """
# call class OpenStack
op = OpenStack()
# Check if fingerprint exists on the list
op.check_key()
class OpenStack:
def __init__(self):
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = '{0}/{1}'.format(cfg_dir, 'openstack.yml')
f = open(cfg_file)
self.cfg = yaml.safe_load(f)
self.cfg['key_file'] = os.path.abspath(os.path.expanduser(self.cfg['key_file']))
f.close()
self.key_fingerprint = \
local('ssh-keygen -l -f {}|awk \'{{print $2}}\''.format(self.cfg['key_file']), capture=True)
def check_key(self):
if not os.path.exists(self.cfg['key_file']):
print "{} doesn't exist".format(self.cfg['key_file'])
exit(1)
with settings(warn_only=True):
output = local('nova keypair-list|grep {}'.format(self.key_fingerprint), capture=True)
if not output.return_code == 0:
print "ERROR: your key is not registered yet."
exit(1)
if not output.split()[1] == self.cfg['key_name']:
print "your key is already registered with a different name."
exit(1)
#def check_image(self):
# with settings(warn_only=True):
| #!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import yaml
from fabric.api import task, local, settings, warn_only
from cuisine import file_exists
@task
def up():
""" Boot instances """
# call class OpenStack
op = OpenStack()
# Check if fingerprint exists on the list
op.check_key()
class OpenStack:
def __init__(self):
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = '{0}/{1}'.format(cfg_dir, 'openstack.yml')
f = open(cfg_file)
self.cfg = yaml.safe_load(f)
self.cfg['key_file'] = os.path.abspath(os.path.expanduser(self.cfg['key_file']))
f.close()
self.key_fingerprint = \
local('ssh-keygen -l -f {}|awk \'{{print $2}}\''.format(self.cfg['key_file']), capture=True)
def check_key(self):
if not os.path.exists(self.cfg['key_file']):
print "{} doesn't exist".format(self.cfg['key_file'])
exit(1)
with settings(warn_only=True):
output = local('nova keypair-list|grep {}'.format(self.key_fingerprint))
print '#### ', output
if not output.return_code == 0:
print "ERROR: your key is not registered yet."
exit(1)
if not output.split()[1] == self.cfg['key_name']:
print "your key is already registered with a different name."
exit(1)
#def check_image(self):
# with settings(warn_only=True):
| Python | 0.998992 |
0fb32166825d630cc5e87b39588e280737567448 | Fix AWS Athena Sensor object has no attribute 'mode' (#4844) | airflow/contrib/sensors/aws_athena_sensor.py | airflow/contrib/sensors/aws_athena_sensor.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_athena_hook import AWSAthenaHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
class AthenaSensor(BaseSensorOperator):
"""
Asks for the state of the Query until it reaches a failure state or success state.
If it fails, failing the task.
:param query_execution_id: query_execution_id to check the state of
:type query_execution_id: str
:param max_retires: Number of times to poll for query state before
returning the current state, defaults to None
:type max_retires: int
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
:param sleep_time: Time to wait between two consecutive call to
check query status on athena, defaults to 10
:type sleep_time: int
"""
INTERMEDIATE_STATES = ('QUEUED', 'RUNNING',)
FAILURE_STATES = ('FAILED', 'CANCELLED',)
SUCCESS_STATES = ('SUCCEEDED',)
template_fields = ['query_execution_id']
template_ext = ()
ui_color = '#66c3ff'
@apply_defaults
def __init__(self,
query_execution_id,
max_retires=None,
aws_conn_id='aws_default',
sleep_time=10,
*args, **kwargs):
super(AthenaSensor, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
self.query_execution_id = query_execution_id
self.hook = None
self.sleep_time = sleep_time
self.max_retires = max_retires
def poke(self, context):
self.hook = self.get_hook()
self.hook.get_conn()
state = self.hook.poll_query_status(self.query_execution_id, self.max_retires)
if state in self.FAILURE_STATES:
raise AirflowException('Athena sensor failed')
if state in self.INTERMEDIATE_STATES:
return False
return True
def get_hook(self):
return AWSAthenaHook(self.aws_conn_id, self.sleep_time)
| # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_athena_hook import AWSAthenaHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
class AthenaSensor(BaseSensorOperator):
"""
Asks for the state of the Query until it reaches a failure state or success state.
If it fails, failing the task.
:param query_execution_id: query_execution_id to check the state of
:type query_execution_id: str
:param max_retires: Number of times to poll for query state before
returning the current state, defaults to None
:type max_retires: int
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
:param sleep_time: Time to wait between two consecutive call to
check query status on athena, defaults to 10
:type sleep_time: int
"""
INTERMEDIATE_STATES = ('QUEUED', 'RUNNING',)
FAILURE_STATES = ('FAILED', 'CANCELLED',)
SUCCESS_STATES = ('SUCCEEDED',)
template_fields = ['query_execution_id']
template_ext = ()
ui_color = '#66c3ff'
@apply_defaults
def __init__(self,
query_execution_id,
max_retires=None,
aws_conn_id='aws_default',
sleep_time=10,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
self.query_execution_id = query_execution_id
self.hook = None
self.sleep_time = sleep_time
self.max_retires = max_retires
def poke(self, context):
self.hook = self.get_hook()
self.hook.get_conn()
state = self.hook.poll_query_status(self.query_execution_id, self.max_retires)
if state in self.FAILURE_STATES:
raise AirflowException('Athena sensor failed')
if state in self.INTERMEDIATE_STATES:
return False
return True
def get_hook(self):
return AWSAthenaHook(self.aws_conn_id, self.sleep_time)
| Python | 0 |
f2b25679ff906615906552810368092cc5321a3c | Add source and issue tracker link warnings | fdroidserver/lint.py | fdroidserver/lint.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# rewritemeta.py - part of the FDroid server tool
# Copyright (C) 2010-12, Ciaran Gultnieks, ciaran@ciarang.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See th
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public Licen
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import re
import common, metadata
config = None
options = None
appid = None
def warn(message):
global appid
if appid:
print "%s:" % appid
appid = None
print(' %s' % message)
def main():
global config, options, appid
# Parse command line...
parser = OptionParser(usage="Usage: %prog [options] [APPID [APPID ...]]")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Spew out even more information than normal")
(options, args) = parser.parse_args()
config = common.read_config(options)
# Get all apps...
allapps = metadata.read_metadata(xref=False)
apps = common.read_app_args(args, allapps, False)
regex_warnings = {
'Source Code': [
(re.compile(r'.*code\.google\.com/p/[^/]+/source/.*'),
"/source is enough on its own"),
(re.compile(r'.*code\.google\.com/p/[^/]+[/]*$'),
"/source is missing")
],
'Issue Tracker': [
(re.compile(r'.*code\.google\.com/p/[^/]+/issues/.*'),
"/issues is enough on its own"),
(re.compile(r'.*code\.google\.com/p/[^/]+[/]*$'),
"/issues is missing"),
(re.compile(r'.*github\.com/[^/]+/[^/]+/issues/.*'),
"/issues is enough on its own"),
(re.compile(r'.*github\.com/[^/]+/[^/]+[/]*$'),
"/issues is missing")
]
}
for app in apps:
appid = app['id']
lastcommit = ''
for build in app['builds']:
if 'commit' in build and 'disable' not in build:
lastcommit = build['commit']
if (app['Update Check Mode'] == 'RepoManifest' and
any(s in lastcommit for s in ('.', ',', '_', '-', '/'))):
warn("Last used commit '%s' looks like a tag, but Update Check Mode is RepoManifest" % lastcommit)
summ_chars = len(app['Summary'])
if summ_chars > config['char_limits']['Summary']:
warn("Summary of length %s is over the %i char limit" % (
summ_chars, config['char_limits']['Summary']))
if app['Summary']:
lastchar = app['Summary'][-1]
if any(lastchar==c for c in ['.', ',', '!', '?']):
warn("Summary should not end with a %s" % lastchar)
for f in ['Source Code', 'Issue Tracker']:
if f not in regex_warnings:
continue
for m, r in regex_warnings[f]:
if m.match(app[f]):
warn("%s url '%s': %s" % (f, app[f], r))
desc_chars = 0
for line in app['Description']:
desc_chars += len(line)
if desc_chars > config['char_limits']['Description']:
warn("Description of length %s is over the %i char limit" % (
desc_chars, config['char_limits']['Description']))
if not appid:
print
print "Finished."
if __name__ == "__main__":
main()
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# rewritemeta.py - part of the FDroid server tool
# Copyright (C) 2010-12, Ciaran Gultnieks, ciaran@ciarang.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See th
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public Licen
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import common, metadata
config = None
options = None
appid = None
def warn(message):
global appid
if appid:
print "%s:" % appid
appid = None
print(' %s' % message)
def main():
global config, options, appid
# Parse command line...
parser = OptionParser(usage="Usage: %prog [options] [APPID [APPID ...]]")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Spew out even more information than normal")
(options, args) = parser.parse_args()
config = common.read_config(options)
# Get all apps...
allapps = metadata.read_metadata(xref=False)
apps = common.read_app_args(args, allapps, False)
for app in apps:
appid = app['id']
lastcommit = ''
for build in app['builds']:
if 'commit' in build and 'disable' not in build:
lastcommit = build['commit']
if (app['Update Check Mode'] == 'RepoManifest' and
any(s in lastcommit for s in ('.', ',', '_', '-', '/'))):
warn("Last used commit '%s' looks like a tag, but Update Check Mode is RepoManifest" % lastcommit)
summ_chars = len(app['Summary'])
if summ_chars > config['char_limits']['Summary']:
warn("Summary of length %s is over the %i char limit" % (
summ_chars, config['char_limits']['Summary']))
if app['Summary']:
lastchar = app['Summary'][-1]
if any(lastchar==c for c in ['.', ',', '!', '?']):
warn("Summary should not end with a %s" % lastchar)
desc_chars = 0
for line in app['Description']:
desc_chars += len(line)
if desc_chars > config['char_limits']['Description']:
warn("Description of length %s is over the %i char limit" % (
desc_chars, config['char_limits']['Description']))
if not appid:
print
print "Finished."
if __name__ == "__main__":
main()
| Python | 0 |
4d1c465e5c946ac17334e29e0ded7b6134533d12 | Disable save in Crop multi roi and show the image instead | plugins/Scripts/Plugins/Crop_Multi_Roi.py | plugins/Scripts/Plugins/Crop_Multi_Roi.py | from ij import IJ
from ij.plugin.frame import RoiManager
from io.scif.config import SCIFIOConfig
from io.scif.img import ImageRegion
from io.scif.img import ImgOpener
from io.scif.img import ImgSaver
from net.imagej.axis import Axes
from net.imglib2.img.display.imagej import ImageJFunctions
import os
def main():
# Get current image filename
imp = IJ.getImage()
f = imp.getOriginalFileInfo()
if not f:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Iterate over all ROIs from ROI Manager
rois = RoiManager.getInstance()
if not rois:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
fname = os.path.join(f.directory, f.fileName)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Opening crop %i / %i" % (crop_id, len(rois_array)))
# Get ROI bounds
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
axes = [Axes.X, Axes.Y]
ranges = ["%i-%i" % (x, x+w), "%i-%i" % (y, y+h)]
config = SCIFIOConfig()
config.imgOpenerSetRegion(ImageRegion(axes, ranges))
opener = ImgOpener()
imps = opener.openImgs(fname, config)
imp = imps[0]
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, f.fileName)
crop_fname = os.path.join(f.directory, crop_basename)
imp.setName(crop_basename)
# Save cropped image
#IJ.log("Saving crop to %s" % crop_fname)
#saver = ImgSaver()
#saver.saveImg(crop_fname, imp)
# Show cropped image
ImageJFunctions.show(imp)
IJ.log('Done')
main()
| from ij import IJ
from ij.plugin.frame import RoiManager
from io.scif.config import SCIFIOConfig
from io.scif.img import ImageRegion
from io.scif.img import ImgOpener
from io.scif.img import ImgSaver
from net.imagej.axis import Axes
import os
def main():
# Get current image filename
imp = IJ.getImage()
f = imp.getOriginalFileInfo()
if not f:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Iterate over all ROIs from ROI Manager
rois = RoiManager.getInstance()
if not rois:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
fname = os.path.join(f.directory, f.fileName)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Opening crop %i / %i" % (crop_id, len(rois_array)))
# Get ROI bounds
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
axes = [Axes.X, Axes.Y]
ranges = ["%i-%i" % (x, x+w), "%i-%i" % (y, y+h)]
config = SCIFIOConfig()
config.imgOpenerSetRegion(ImageRegion(axes, ranges))
opener = ImgOpener()
imps = opener.openImgs(fname, config)
imp = imps[0]
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, f.fileName)
crop_fname = os.path.join(f.directory, crop_basename)
IJ.log("Saving crop to %s" % crop_fname)
# Save cropped image
saver = ImgSaver()
saver.saveImg(crop_fname, imp)
IJ.log('Done')
main()
| Python | 0 |
9a19c34a104aabd0c5b34734f587573d5766a4bd | support multi-file results | finishTest/Finish.py | finishTest/Finish.py | from __future__ import print_function
from BaseTask import BaseTask
from Engine import MasterTbl, Error, get_platform
from Dbg import Dbg
import os, json, time, platform
dbg = Dbg()
validA = ("passed", "failed", "diff")
comment_block = """
Test Results:
'notfinished': means that the test has started but not completed.
'failed': means that the test has started but not completed.
'notrun': test has not started running.
'diff' : Test has run but is different from gold copy.
'passed': Test has run and matches gold copy.
"""
class Finish(BaseTask):
def __init__(self,name):
super(Finish, self).__init__(name)
def __parse_input_fn(self, fnA):
result = "passed"
for fn in fnA:
if (not os.path.exists(fn)):
return "failed"
f = open(fn)
lineA = f.readlines()
f.close()
found = False
for line in lineA:
line = line.strip()
if (line[0] == "#" or len(line) < 1):
continue
found = True
idx = line.find(",")
if (idx > 0):
line = line[0:idx]
line = line.lower()
if (line != "passed"):
result = line
break
if (not result in validA or not found):
result = "failed"
break
return result
def execute(self, *args, **kwargs):
masterTbl = MasterTbl()
result_fn = masterTbl['result_fn']
runtime_fn = masterTbl['runtime_fn']
input_fnA = masterTbl['pargs']
result = self.__parse_input_fn(input_fnA)
my_result = { 'testresult' : result, "comment" : comment_block.split('\n') }
f = open(result_fn,"w")
f.write(json.dumps(my_result, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
if (not os.path.exists(runtime_fn)):
Error("Unable to open: ", runtime_fn)
f = open(runtime_fn)
runtime = json.loads(f.read())
f.close()
t1 = time.time()
runtime['T1'] = t1
runtime['TT'] = t1 - runtime['T0']
unameT = get_platform()
for k in unameT:
runtime[k] = unameT[k]
f = open(runtime_fn,"w")
f.write(json.dumps(runtime, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
| from __future__ import print_function
from BaseTask import BaseTask
from Engine import MasterTbl, Error, get_platform
from Dbg import Dbg
import os, json, time, platform
dbg = Dbg()
validA = ("passed", "failed", "diff")
comment_block = """
Test Results:
'notfinished': means that the test has started but not completed.
'failed': means that the test has started but not completed.
'notrun': test has not started running.
'diff' : Test has run but is different from gold copy.
'passed': Test has run and matches gold copy.
"""
class Finish(BaseTask):
def __init__(self,name):
super(Finish, self).__init__(name)
def __parse_input_fn(self, fn):
if (not os.path.exists(fn)):
return "failed"
f = open(fn)
lineA = f.readlines()
f.close()
found = False
result = "passed"
for line in lineA:
line = line.strip()
if (line[0] == "#" or len(line) < 1):
continue
found = True
idx = line.find(",")
if (idx > 0):
line = line[0:idx]
line = line.lower()
if (line != "passed"):
result = line
break
if (not result in validA or not found):
result = "failed"
return result
def execute(self, *args, **kwargs):
masterTbl = MasterTbl()
result_fn = masterTbl['result_fn']
runtime_fn = masterTbl['runtime_fn']
input_fn = masterTbl['pargs'][0]
result = self.__parse_input_fn(input_fn)
my_result = { 'testresult' : result, "comment" : comment_block.split('\n') }
f = open(result_fn,"w")
f.write(json.dumps(my_result, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
if (not os.path.exists(runtime_fn)):
Error("Unable to open: ", runtime_fn)
f = open(runtime_fn)
runtime = json.loads(f.read())
f.close()
t1 = time.time()
runtime['T1'] = t1
runtime['TT'] = t1 - runtime['T0']
unameT = get_platform()
for k in unameT:
runtime[k] = unameT[k]
f = open(runtime_fn,"w")
f.write(json.dumps(runtime, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
| Python | 0 |
8481cb40caa896b81386f4a9ddb6fda92e14cc76 | Fix a typo | ironic/tests/unit/db/sqlalchemy/test_types.py | ironic/tests/unit/db/sqlalchemy/test_types.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for custom SQLAlchemy types via Ironic DB."""
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
import ironic.db.sqlalchemy.api as sa_api
from ironic.db.sqlalchemy import models
from ironic.tests.unit.db import base
class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
# NOTE(max_lobur): Since it's not straightforward to check this in
# isolation these tests use existing db models.
def test_JSONEncodedDict_default_value(self):
# Create chassis w/o extra specified.
ch1_id = uuidutils.generate_uuid()
self.dbapi.create_chassis({'uuid': ch1_id})
# Get chassis manually to test SA types in isolation from UOM.
ch1 = sa_api.model_query(models.Chassis).filter_by(uuid=ch1_id).one()
self.assertEqual({}, ch1.extra)
# Create chassis with extra specified.
ch2_id = uuidutils.generate_uuid()
extra = {'foo1': 'test', 'foo2': 'other extra'}
self.dbapi.create_chassis({'uuid': ch2_id, 'extra': extra})
# Get chassis manually to test SA types in isolation from UOM.
ch2 = sa_api.model_query(models.Chassis).filter_by(uuid=ch2_id).one()
self.assertEqual(extra, ch2.extra)
def test_JSONEncodedDict_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_chassis,
{'extra': ['this is not a dict']})
def test_JSONEncodedList_default_value(self):
# Create conductor w/o extra specified.
cdr1_id = 321321
self.dbapi.register_conductor({'hostname': 'test_host1',
'drivers': None,
'id': cdr1_id})
# Get conductor manually to test SA types in isolation from UOM.
cdr1 = (sa_api
.model_query(models.Conductor)
.filter_by(id=cdr1_id)
.one())
self.assertEqual([], cdr1.drivers)
# Create conductor with drivers specified.
cdr2_id = 623623
drivers = ['foo1', 'other driver']
self.dbapi.register_conductor({'hostname': 'test_host2',
'drivers': drivers,
'id': cdr2_id})
# Get conductor manually to test SA types in isolation from UOM.
cdr2 = (sa_api
.model_query(models.Conductor)
.filter_by(id=cdr2_id)
.one())
self.assertEqual(drivers, cdr2.drivers)
def test_JSONEncodedList_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.register_conductor,
{'hostname': 'test_host3',
'drivers': {'this is not a list': 'test'}})
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for custom SQLAlchemy types via Ironic DB."""
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
import ironic.db.sqlalchemy.api as sa_api
from ironic.db.sqlalchemy import models
from ironic.tests.unit.db import base
class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
# NOTE(max_lobur): Since it's not straightforward to check this in
# isolation these tests use existing db models.
def test_JSONEncodedDict_default_value(self):
# Create chassis w/o extra specified.
ch1_id = uuidutils.generate_uuid()
self.dbapi.create_chassis({'uuid': ch1_id})
# Get chassis manually to test SA types in isolation from UOM.
ch1 = sa_api.model_query(models.Chassis).filter_by(uuid=ch1_id).one()
self.assertEqual({}, ch1.extra)
# Create chassis with extra specified.
ch2_id = uuidutils.generate_uuid()
extra = {'foo1': 'test', 'foo2': 'other extra'}
self.dbapi.create_chassis({'uuid': ch2_id, 'extra': extra})
# Get chassis manually to test SA types in isolation from UOM.
ch2 = sa_api.model_query(models.Chassis).filter_by(uuid=ch2_id).one()
self.assertEqual(extra, ch2.extra)
def test_JSONEncodedDict_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_chassis,
{'extra': ['this is not a dict']})
def test_JSONEncodedLict_default_value(self):
# Create conductor w/o extra specified.
cdr1_id = 321321
self.dbapi.register_conductor({'hostname': 'test_host1',
'drivers': None,
'id': cdr1_id})
# Get conductor manually to test SA types in isolation from UOM.
cdr1 = (sa_api
.model_query(models.Conductor)
.filter_by(id=cdr1_id)
.one())
self.assertEqual([], cdr1.drivers)
# Create conductor with drivers specified.
cdr2_id = 623623
drivers = ['foo1', 'other driver']
self.dbapi.register_conductor({'hostname': 'test_host2',
'drivers': drivers,
'id': cdr2_id})
# Get conductor manually to test SA types in isolation from UOM.
cdr2 = (sa_api
.model_query(models.Conductor)
.filter_by(id=cdr2_id)
.one())
self.assertEqual(drivers, cdr2.drivers)
def test_JSONEncodedList_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.register_conductor,
{'hostname': 'test_host3',
'drivers': {'this is not a list': 'test'}})
| Python | 0.999988 |
38efb136609b645b0076c0aa1481330f9e28ee51 | Add a rule for matching packages by regex. | fmn/rules/generic.py | fmn/rules/generic.py | # Generic rules for FMN
import re
import fedmsg
import fmn.rules.utils
def user_filter(config, message, fasnick=None, *args, **kw):
""" All messages for a certain user
Use this rule to include messages that are associated with a
specific user.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
return fasnick in fedmsg.meta.msg2usernames(message, **config)
def not_user_filter(config, message, fasnick=None, *args, **kw):
""" All messages not concerning one or more users
Use this rule to exclude messages that are associated with one or more
users. Specify several users by separating them with a comma ','.
"""
fasnick = kw.get('fasnick', fasnick)
if not fasnick:
return False
fasnick = fasnick or [] and fasnick.split(',')
valid = True
for nick in fasnick:
if nick.strip() in fedmsg.meta.msg2usernames(message, **config):
valid = False
break
return valid
def user_package_filter(config, message, fasnick=None, *args, **kw):
""" All messages concerning user's packages
This rule includes messages that relate to packages where the
specified user has **commit** ACLs.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
user_packages = fmn.rules.utils.get_packages_of_user(config, fasnick)
msg_packages = fedmsg.meta.msg2packages(message, **config)
return user_packages.intersection(msg_packages)
def package_filter(config, message, package=None, *args, **kw):
""" All messages pertaining to a certain package
Use this rule to include messages that relate to a certain package
(*i.e., nethack*).
"""
package = kw.get('package', package)
if package:
return package in fedmsg.meta.msg2packages(message, **config)
def package_regex_filter(config, message, pattern=None, *args, **kw):
""" All messages pertaining to packages matching a given regex
Use this rule to include messages that relate to packages that match
particular regular expressions
(*i.e., (maven|javapackages-tools|maven-surefire)*).
"""
pattern = kw.get('pattern', pattern)
if pattern:
packages = fedmsg.meta.msg2packages(message, **config)
regex = re.compile(pattern)
return any([regex.match(package) for package in packages])
def trac_hosted_filter(config, message, project=None, *args, **kw):
""" Filter the messages for one or more fedorahosted projects
Adding this rule allows you to get notifications for one or more
`fedorahosted <https://fedorahosted.org>`_ project. Specify multiple
projects by separating them with a comma ','.
"""
project = kw.get('project', project)
link = fedmsg.meta.msg2link(message, **config)
if not link:
return False
project = project or [] and project.split(',')
valid = False
for proj in project:
if '://fedorahosted.org/%s/' % proj.strip() in link:
valid = True
return valid
| # Generic rules for FMN
import fedmsg
import fmn.rules.utils
def user_filter(config, message, fasnick=None, *args, **kw):
""" All messages for a certain user
Use this rule to include messages that are associated with a
specific user.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
return fasnick in fedmsg.meta.msg2usernames(message, **config)
def not_user_filter(config, message, fasnick=None, *args, **kw):
""" All messages not concerning one or more users
Use this rule to exclude messages that are associated with one or more
users. Specify several users by separating them with a comma ','.
"""
fasnick = kw.get('fasnick', fasnick)
if not fasnick:
return False
fasnick = fasnick or [] and fasnick.split(',')
valid = True
for nick in fasnick:
if nick.strip() in fedmsg.meta.msg2usernames(message, **config):
valid = False
break
return valid
def user_package_filter(config, message, fasnick=None, *args, **kw):
""" All messages concerning user's packages
This rule includes messages that relate to packages where the
specified user has **commit** ACLs.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
user_packages = fmn.rules.utils.get_packages_of_user(config, fasnick)
msg_packages = fedmsg.meta.msg2packages(message, **config)
return user_packages.intersection(msg_packages)
def package_filter(config, message, package=None, *args, **kw):
""" All messages pertaining to a certain package
Use this rule to include messages that relate to a certain package
(*i.e., nethack*).
"""
package = kw.get('package', package)
if package:
return package in fedmsg.meta.msg2packages(message, **config)
def trac_hosted_filter(config, message, project=None, *args, **kw):
""" Filter the messages for one or more fedorahosted projects
Adding this rule allows you to get notifications for one or more
`fedorahosted <https://fedorahosted.org>`_ project. Specify multiple
projects by separating them with a comma ','.
"""
project = kw.get('project', project)
link = fedmsg.meta.msg2link(message, **config)
if not link:
return False
project = project or [] and project.split(',')
valid = False
for proj in project:
if '://fedorahosted.org/%s/' % proj.strip() in link:
valid = True
return valid
| Python | 0 |
7f974b87c278ef009535271461b5e49686057a9a | Fix for django >= 1.10 | avatar/management/commands/rebuild_avatars.py | avatar/management/commands/rebuild_avatars.py | from django.core.management.base import BaseCommand
from avatar.conf import settings
from avatar.models import Avatar
class Command(BaseCommand):
help = ("Regenerates avatar thumbnails for the sizes specified in "
"settings.AVATAR_AUTO_GENERATE_SIZES.")
def handle(self, *args, **options):
for avatar in Avatar.objects.all():
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
if options['verbosity'] != 0:
print("Rebuilding Avatar id=%s at size %s." % (avatar.id, size))
avatar.create_thumbnail(size)
| from django.core.management.base import NoArgsCommand
from avatar.conf import settings
from avatar.models import Avatar
class Command(NoArgsCommand):
help = ("Regenerates avatar thumbnails for the sizes specified in "
"settings.AVATAR_AUTO_GENERATE_SIZES.")
def handle_noargs(self, **options):
for avatar in Avatar.objects.all():
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
if options['verbosity'] != 0:
print("Rebuilding Avatar id=%s at size %s." % (avatar.id, size))
avatar.create_thumbnail(size)
| Python | 0 |
f36cad198c45caa40f179e5a9de134610cc3f6fe | fix date filter | skylines/commands/flights/selector.py | skylines/commands/flights/selector.py | from flask.ext.script import Option
from sqlalchemy import func
from datetime import datetime
from skylines.model import Airport, Flight
selector_options = (
Option('--date-from', help='Date from (YYYY-MM-DD)'),
Option('--date-to', help='Date to (YYYY-MM-DD)'),
Option('--uploaded-from', help='Date from (YYYY-MM-DD)'),
Option('--uploaded-to', help='Date to (YYYY-MM-DD)'),
Option('--private', action='store_true',
help='Process private flights, too'),
Option('--country-code', help='Country code of the start airport'),
Option('--airport-name', help='Airport name of the start airport'),
Option('ids', metavar='ID', nargs='*', type=int,
help='Any number of flight IDs.'),
)
def select(q, **kwargs):
if kwargs.get('ids'):
print "ids == " + str(kwargs.get('ids'))
q = q.filter(Flight.id.in_(kwargs.get('ids')))
if kwargs.get('date_from'):
try:
date_from = datetime.strptime(kwargs.get('date_from'), "%Y-%m-%d")
q = q.filter(Flight.takeoff_time >= date_from)
print "takeoff_time >= " + str(date_from)
except:
print "Cannot parse date-from"
return None
if kwargs.get('date_to'):
try:
date_to = datetime.strptime(kwargs.get('date_to'), "%Y-%m-%d")
q = q.filter(Flight.takeoff_time < date_to)
print "takeoff_time < " + str(date_to)
except:
print "Cannot parse date-to"
return None
if kwargs.get('uploaded_from'):
try:
uploaded_from = datetime.strptime(kwargs.get('uploaded_from'), "%Y-%m-%d")
q = q.filter(Flight.time_created >= uploaded_from)
print "time_created >= " + str(uploaded_from)
except:
print "Cannot parse uploaded-from"
return None
if kwargs.get('uploaded_to'):
try:
uploaded_to = datetime.strptime(kwargs.get('uploaded_to'), "%Y-%m-%d")
q = q.filter(Flight.time_created < uploaded_to)
print "time_created < " + str(uploaded_to)
except:
print "Cannot parse uploaded-to"
return None
if not kwargs.get('private'):
print "privacy_level == PUBLIC"
q = q.filter(Flight.privacy_level == Flight.PrivacyLevel.PUBLIC)
if kwargs.get('country_code'):
country_code = kwargs.get('country_code')
q = q.join(Flight.takeoff_airport)
q = q.filter(func.lower(Airport.country_code) == func.lower(country_code))
print "takeoff_airport country code: " + country_code
if kwargs.get('airport_name'):
airport_name = kwargs.get('airport_name')
q = q.join(Flight.takeoff_airport)
q = q.filter(func.lower(Airport.name) == func.lower(airport_name))
print "takeoff_airport name: " + airport_name
return q
| from flask.ext.script import Option
from sqlalchemy import func
from datetime import datetime
from skylines.model import Airport, Flight
selector_options = (
Option('--date-from', help='Date from (YYYY-MM-DD)'),
Option('--date-to', help='Date to (YYYY-MM-DD)'),
Option('--uploaded-from', help='Date from (YYYY-MM-DD)'),
Option('--uploaded-to', help='Date to (YYYY-MM-DD)'),
Option('--private', action='store_true',
help='Process private flights, too'),
Option('--country-code', help='Country code of the start airport'),
Option('--airport-name', help='Airport name of the start airport'),
Option('ids', metavar='ID', nargs='*', type=int,
help='Any number of flight IDs.'),
)
def select(q, **kwargs):
if kwargs.get('ids'):
print "ids == " + str(kwargs.get('ids'))
q = q.filter(Flight.id.in_(kwargs.get('ids')))
if kwargs.get('date_from'):
try:
date_from = datetime.strptime(kwargs.get('date_from'), "%Y-%m-%d")
q = q.filter(Flight.takeoff_time >= date_from)
print "takeoff_time >= " + str(date_from)
except:
print "Cannot parse date-from"
return None
if kwargs.get('date_to'):
try:
date_to = datetime.strptime(kwargs.get('date_to'), "%Y-%m-%d")
q = q.filter(Flight.takeoff_time >= date_to)
print "takeoff_time < " + str(date_to)
except:
print "Cannot parse date-to"
return None
if kwargs.get('uploaded_from'):
try:
uploaded_from = datetime.strptime(kwargs.get('uploaded_from'), "%Y-%m-%d")
q = q.filter(Flight.time_created >= uploaded_from)
print "time_created >= " + str(uploaded_from)
except:
print "Cannot parse uploaded-from"
return None
if kwargs.get('uploaded_to'):
try:
uploaded_to = datetime.strptime(kwargs.get('uploaded_to'), "%Y-%m-%d")
q = q.filter(Flight.time_created < uploaded_to)
print "time_created < " + str(uploaded_to)
except:
print "Cannot parse uploaded-to"
return None
if not kwargs.get('private'):
print "privacy_level == PUBLIC"
q = q.filter(Flight.privacy_level == Flight.PrivacyLevel.PUBLIC)
if kwargs.get('country_code'):
country_code = kwargs.get('country_code')
q = q.join(Flight.takeoff_airport)
q = q.filter(func.lower(Airport.country_code) == func.lower(country_code))
print "takeoff_airport country code: " + country_code
if kwargs.get('airport_name'):
airport_name = kwargs.get('airport_name')
q = q.join(Flight.takeoff_airport)
q = q.filter(func.lower(Airport.name) == func.lower(airport_name))
print "takeoff_airport name: " + airport_name
return q
| Python | 0.000011 |
0da189464703837e212bff06c24cc6eb5b62eeea | Fix name of room | blackbelt/slack.py | blackbelt/slack.py | from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#engine-room'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
| from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#sre'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
| Python | 0.999953 |
eb3a332cf5aeb6b213c333cbfba78b26b776db49 | fix facebook api | social_publisher/backends/facebook.py | social_publisher/backends/facebook.py | # -*- coding: utf-8 -*-
from social_publisher import facebook
from social_publisher.backends import base
class FacebookBackend(base.BaseBackend):
name = 'facebook'
auth_provider = 'facebook'
def get_api(self, social_user):
return facebook.GraphAPI(social_user.extra_data.get('access_token'))
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
image = kwargs.get('image')
if image:
res = self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params={'image': image})
kwargs['object_attachment'] = res['id']
return self.get_api(social_user).post(
'{}/feed'.format(owner_id),
params=kwargs
)
return _post
class FacebookPostImageBackend(FacebookBackend):
name = 'facebook_post_image'
auth_provider = 'facebook'
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file>
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
return self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params=kwargs
)
return _post
| # -*- coding: utf-8 -*-
from social_publisher import facebook
from social_publisher.backends import base
class FacebookBackend(base.BaseBackend):
name = 'facebook'
auth_provider = 'facebook'
def get_api(self, social_user):
return facebook.GraphAPI(social_user.extra_data.get('access_token'))
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
image = kwargs.get('image')
if image:
res = self.get_api(social_user).post(
'{}/photos'.format(owner_id), image=image)
kwargs['object_attachment'] = res['id']
return self.get_api(social_user).post(
'{}/feed'.format(owner_id),
params=kwargs
)
return _post
class FacebookPostImageBackend(FacebookBackend):
name = 'facebook_post_image'
auth_provider = 'facebook'
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file>
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
return self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params=kwargs
)
return _post
| Python | 0.000014 |
07c8888a3623ea40c4f2047e11445726e61e2438 | Fix lint. | packs/csv/tests/test_action_parse.py | packs/csv/tests/test_action_parse.py | import unittest2
from parse_csv import ParseCSVAction
__all__ = [
'ParseCSVActionTestCase'
]
MOCK_DATA = """
first,last,year
name1,surename1,1990
""".strip()
class ParseCSVActionTestCase(unittest2.TestCase):
def test_run(self):
result = ParseCSVAction().run(data=MOCK_DATA, delimiter=',')
expected = [
['first', 'last', 'year'],
['name1', 'surename1', '1990']
]
self.assertEqual(result, expected)
| import unittest2
from parse_csv import ParseCSVAction
__all__ = [
'ParseCSVActionTestCase'
]
MOCK_DATA = """
first,last,year
name1,surename1,1990
""".strip()
class ParseCSVActionTestCase(unittest2.TestCase):
def test_run(self):
result = ParseCSVAction().run(data=MOCK_DATA, delimiter=',')
expected = [
['first', 'last', 'year'],
['name1', 'surename1', '1990']
]
self.assertEqual(result, expected)
| Python | 0.000001 |
7c75a9c01aec6427bef573e69605087e7b30ff33 | test cases for createview | parcellate/apps/winparcel/tests.py | parcellate/apps/winparcel/tests.py | from django.test import TestCase
from django.test.client import (Client,
RequestFactory)
from .models import (RSSObject,
RSSEntry)
from .lib import ReadRSS
from .views import RSSObjectCreateView
class RSSObjectAddViewTests(TestCase):
""" RSS Object Add View tests."""
def test_add_rss_in_the_context(self):
client = Client()
response = client.get('/rss/add')
self.assertEquals(
list(response.context.get('object_list')),[])
RSSObject.objects.create(title='Serious Eats',
url='http://feeds.feedburner.com/seriouseats')
response = client.get('/rss/add')
self.assertEquals(response.context.get('object_list').count(), 1)
def test_add_rss_in_the_context_request_factory(self):
factory = RequestFactory()
request = factory.get('/')
response = RSSObjectCreateView.as_view()(request)
self.assertEquals(
list(response.context_data.get('object_list')),[])
RSSObject.objects.create(title='Serious Eats',
url='http://feeds.feedburner.com/seriouseats')
response = RSSObjectCreateView.as_view()(request)
self.assertEquals(
response.context_data.get('object_list').count(), 1)
class SimpleTest(TestCase):
test_data = dict(
url = 'http://feeds.feedburner.com/seriouseats',
title = 'Serious Eats'
)
testvals = {'title': 'Test Test Test',
'url': 'http://www.google.com',
'summary': 'This is a test save',
'author': 'Viv',
'uri': 'http://vivyly.github.io',
'content': '<div class="blah">TESTING</div>'
}
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def create_rss(self):
rss = RSSObject()
for key, val in self.test_data.iteritems():
setattr(rss, key, val)
rss.save()
return rss
def test_add_rss(self):
rss_obj = self.create_rss()
for key, val in self.test_data.iteritems():
self.assertEqual(getattr(rss_obj, key), val)
def test_add_rss_entry(self):
rss_obj = self.create_rss()
rss_entry = RSSEntry()
for key, val in self.testvals.iteritems():
setattr(rss_entry, key, val)
rss_entry.rssatom = rss_obj
rss_entry.save()
for key, val in self.test_data.iteritems():
self.assertEqual(getattr(rss_obj, key), val)
for key, val in self.testvals.iteritems():
self.assertEqual(getattr(rss_entry, key), val)
def test_add_rss_entry_lib(self):
rss_obj = self.create_rss()
read_rss = ReadRSS(rss=rss_obj)
created = read_rss.save_entries()
self.assertEqual(created, 15)
| """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from .models import (RSSObject,
RSSEntry)
from .lib import ReadRSS
class SimpleTest(TestCase):
test_data = dict(
url = 'http://feeds.feedburner.com/seriouseats',
title = 'Serious Eats'
)
testvals = {'title': 'Test Test Test',
'url': 'http://www.google.com',
'summary': 'This is a test save',
'author': 'Viv',
'uri': 'http://vivyly.github.io',
'content': '<div class="blah">TESTING</div>'
}
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def create_rss(self):
rss = RSSObject()
for key, val in self.test_data.iteritems():
setattr(rss, key, val)
rss.save()
return rss
def test_add_rss(self):
rss_obj = self.create_rss()
for key, val in self.test_data.iteritems():
self.assertEqual(getattr(rss_obj, key), val)
def test_add_rss_entry(self):
rss_obj = self.create_rss()
rss_entry = RSSEntry()
for key, val in self.testvals.iteritems():
setattr(rss_entry, key, val)
rss_entry.rssatom = rss_obj
rss_entry.save()
for key, val in self.test_data.iteritems():
self.assertEqual(getattr(rss_obj, key), val)
for key, val in self.testvals.iteritems():
self.assertEqual(getattr(rss_entry, key), val)
def test_add_rss_entry_lib(self):
rss_obj = self.create_rss()
read_rss = ReadRSS(rss=rss_obj)
created = read_rss.save_entries()
self.assertEqual(created, 15)
| Python | 0 |
c68792c50f91445ed733c5e5ed0c226a04b1e173 | Use chromium snapshots for Linux_64 and Mac. | chrome/test/chromedriver/archive.py | chrome/test/chromedriver/archive.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads items from the Chromium continuous archive."""
import os
import platform
import urllib
import util
CHROME_34_REVISION = '251854'
CHROME_35_REVISION = '260135'
CHROME_36_REVISION = '269696'
_SITE = 'http://commondatastorage.googleapis.com'
class Site(object):
CONTINUOUS = _SITE + '/chromium-browser-continuous'
CHROMIUM_SNAPSHOT = _SITE + '/chromium-browser-snapshots'
BLINK_SNAPSHOT = _SITE + '/chromium-webkit-snapshots'
def GetLatestRevision(site=Site.CONTINUOUS):
"""Returns the latest revision (as a string) available for this platform.
Args:
site: the archive site to check against, default to the continuous one.
"""
url = site + '/%s/LAST_CHANGE'
return urllib.urlopen(url % _GetDownloadPlatform()).read()
def DownloadChrome(revision, dest_dir, site=Site.CONTINUOUS):
"""Downloads the packaged Chrome from the archive to the given directory.
Args:
revision: the revision of Chrome to download.
dest_dir: the directory to download Chrome to.
site: the archive site to download from, default to the continuous one.
Returns:
The path to the unzipped Chrome binary.
"""
def GetZipName():
if util.IsWindows():
return 'chrome-win32'
elif util.IsMac():
return 'chrome-mac'
elif util.IsLinux():
return 'chrome-linux'
def GetChromePathFromPackage():
if util.IsWindows():
return 'chrome.exe'
elif util.IsMac():
return 'Chromium.app/Contents/MacOS/Chromium'
elif util.IsLinux():
return 'chrome'
zip_path = os.path.join(dest_dir, 'chrome-%s.zip' % revision)
if not os.path.exists(zip_path):
url = site + '/%s/%s/%s.zip' % (_GetDownloadPlatform(), revision,
GetZipName())
print 'Downloading', url, '...'
urllib.urlretrieve(url, zip_path)
util.Unzip(zip_path, dest_dir)
return os.path.join(dest_dir, GetZipName(), GetChromePathFromPackage())
def _GetDownloadPlatform():
"""Returns the name for this platform on the archive site."""
if util.IsWindows():
return 'Win'
elif util.IsMac():
return 'Mac'
elif util.IsLinux():
if platform.architecture()[0] == '64bit':
return 'Linux_x64'
else:
return 'Linux'
def GetLatestSnapshotVersion():
"""Returns the latest revision of snapshot build."""
return GetLatestRevision(GetSnapshotDownloadSite())
def GetSnapshotDownloadSite():
"""Returns the site to download snapshot build according to the platform.
For Linux 32-bit, it is chromium snapshot build.
For other platform, it is blink snapshot build.
Because there is no linux32 blink snapshot build.
"""
if _GetDownloadPlatform() in ('Linux', 'Linux_x64', 'Mac'):
return Site.CHROMIUM_SNAPSHOT
else:
return Site.BLINK_SNAPSHOT
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads items from the Chromium continuous archive."""
import os
import platform
import urllib
import util
CHROME_34_REVISION = '251854'
CHROME_35_REVISION = '260135'
CHROME_36_REVISION = '269696'
_SITE = 'http://commondatastorage.googleapis.com'
class Site(object):
CONTINUOUS = _SITE + '/chromium-browser-continuous'
CHROMIUM_SNAPSHOT = _SITE + '/chromium-browser-snapshots'
BLINK_SNAPSHOT = _SITE + '/chromium-webkit-snapshots'
def GetLatestRevision(site=Site.CONTINUOUS):
"""Returns the latest revision (as a string) available for this platform.
Args:
site: the archive site to check against, default to the continuous one.
"""
url = site + '/%s/LAST_CHANGE'
return urllib.urlopen(url % _GetDownloadPlatform()).read()
def DownloadChrome(revision, dest_dir, site=Site.CONTINUOUS):
"""Downloads the packaged Chrome from the archive to the given directory.
Args:
revision: the revision of Chrome to download.
dest_dir: the directory to download Chrome to.
site: the archive site to download from, default to the continuous one.
Returns:
The path to the unzipped Chrome binary.
"""
def GetZipName():
if util.IsWindows():
return 'chrome-win32'
elif util.IsMac():
return 'chrome-mac'
elif util.IsLinux():
return 'chrome-linux'
def GetChromePathFromPackage():
if util.IsWindows():
return 'chrome.exe'
elif util.IsMac():
return 'Chromium.app/Contents/MacOS/Chromium'
elif util.IsLinux():
return 'chrome'
zip_path = os.path.join(dest_dir, 'chrome-%s.zip' % revision)
if not os.path.exists(zip_path):
url = site + '/%s/%s/%s.zip' % (_GetDownloadPlatform(), revision,
GetZipName())
print 'Downloading', url, '...'
urllib.urlretrieve(url, zip_path)
util.Unzip(zip_path, dest_dir)
return os.path.join(dest_dir, GetZipName(), GetChromePathFromPackage())
def _GetDownloadPlatform():
"""Returns the name for this platform on the archive site."""
if util.IsWindows():
return 'Win'
elif util.IsMac():
return 'Mac'
elif util.IsLinux():
if platform.architecture()[0] == '64bit':
return 'Linux_x64'
else:
return 'Linux'
def GetLatestSnapshotVersion():
"""Returns the latest revision of snapshot build."""
return GetLatestRevision(GetSnapshotDownloadSite())
def GetSnapshotDownloadSite():
"""Returns the site to download snapshot build according to the platform.
For Linux 32-bit, it is chromium snapshot build.
For other platform, it is blink snapshot build.
Because there is no linux32 blink snapshot build.
"""
if _GetDownloadPlatform() == 'Linux':
return Site.CHROMIUM_SNAPSHOT
else:
return Site.BLINK_SNAPSHOT
| Python | 0.000001 |
9c898d7e547b13bb289c0d1cada0bbd4078803dc | Allow passing of size_cutoff to preassembler methods. | indra/db/pre_assemble_script.py | indra/db/pre_assemble_script.py | import indra.tools.assemble_corpus as ac
from indra.db.util import get_statements, insert_pa_stmts
from indra.preassembler import Preassembler
from indra.preassembler.hierarchy_manager import hierarchies
def make_unique_statement_set(preassembler, stmts):
stmt_groups = preassembler.get_stmt_matching_groups(stmts)
unique_stmts = []
for _, duplicates in stmt_groups:
# Get the first statement and add the evidence of all subsequent
# Statements to it
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
first_stmt = stmt.get_new_copy()
first_stmt.evidence.append(stmt.uuid)
# This should never be None or anything else
assert isinstance(first_stmt, type(stmt))
unique_stmts.append(first_stmt)
return unique_stmts
def get_match_key_maps(preassembler, unique_stmts, **generate_id_map_kwargs):
id_maps = preassembler.generate_id_maps(unique_stmts,
**generate_id_map_kwargs)
return {tuple([unique_stmts[idx].matches_key() for idx in idx_pair])
for idx_pair in id_maps}
def process_statements(stmts, **generate_id_map_kwargs):
stmts = ac.map_grounding(stmts)
stmts = ac.map_sequence(stmts)
pa = Preassembler(hierarchies)
unique_stmts = make_unique_statement_set(pa, stmts)
match_key_maps = get_match_key_maps(pa, unique_stmts,
**generate_id_map_kwargs)
return unique_stmts, match_key_maps
def preassemble_db_stmts(db, num_proc, *clauses):
"""Run pre-assembly on a set of statements in the database."""
stmts = get_statements(clauses, db=db, do_stmt_count=False)
unique_stmts, match_key_maps = process_statements(stmts, poolsize=num_proc)
insert_pa_stmts(db, unique_stmts)
return unique_stmts, match_key_maps
| import indra.tools.assemble_corpus as ac
from indra.db.util import get_statements, insert_pa_stmts
from indra.preassembler import Preassembler
from indra.preassembler.hierarchy_manager import hierarchies
def make_unique_statement_set(preassembler, stmts):
stmt_groups = preassembler.get_stmt_matching_groups(stmts)
unique_stmts = []
for _, duplicates in stmt_groups:
# Get the first statement and add the evidence of all subsequent
# Statements to it
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
first_stmt = stmt.get_new_copy()
first_stmt.evidence.append(stmt.uuid)
# This should never be None or anything else
assert isinstance(first_stmt, type(stmt))
unique_stmts.append(first_stmt)
return unique_stmts
def get_match_key_maps(preassembler, unique_stmts, num_procs=1):
id_maps = preassembler.generate_id_maps(unique_stmts, num_procs)
return [[unique_stmts[idx].matches_key() for idx in idx_pair]
for idx_pair in id_maps]
def process_statements(stmts, num_procs=1):
stmts = ac.map_grounding(stmts)
stmts = ac.map_sequence(stmts)
pa = Preassembler(hierarchies)
unique_stmts = make_unique_statement_set(pa, stmts)
match_key_maps = get_match_key_maps(pa, unique_stmts, num_procs)
return unique_stmts, match_key_maps
def preassemble_db_stmts(db, num_procs, *clauses):
"""Run pre-assembly on a set of statements in the database."""
stmts = get_statements(clauses, db=db, do_stmt_count=False)
pa_stmts = process_statements(stmts, num_procs)
insert_pa_stmts(db, pa_stmts)
return pa_stmts
| Python | 0 |
4df8aafb1d4ab12ad795b30f1f75937072216f1b | Implement proper event detection, lots of debugging code | hdltools/vcd/event.py | hdltools/vcd/event.py | """VCD Event tracker."""
from typing import Tuple, Dict
from colorama import Fore, Back, init
from hdltools.vcd.parser import BaseVCDParser, VCDParserError
from hdltools.vcd.trigger import VCDTriggerDescriptor
from hdltools.vcd.mixins.conditions import VCDConditionMixin
from hdltools.vcd.mixins.time import VCDTimeRestrictionMixin
from hdltools.vcd.trigger.condtable import ConditionTableTrigger
init(autoreset=True)
# an event is a VCDTriggerDescriptor
class VCDEventTracker(
BaseVCDParser, VCDConditionMixin, VCDTimeRestrictionMixin
):
"""Event tracker."""
def __init__(
self, events: Dict[str, Tuple[VCDTriggerDescriptor]], **kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self._events = events
self._evt_triggers = {
evt_name: [
ConditionTableTrigger(conditions=conds, evt_name=evt_name),
None,
]
for evt_name, conds in events.items()
}
# arm immediately
for trigger, _ in self._evt_triggers.values():
trigger.trigger_callback = self._evt_trigger_callback
trigger.arm_trigger()
def _evt_trigger_callback(self, trigger_fsm):
"""Event trigger callback."""
# update last triggered time
self._evt_triggers[trigger_fsm.evt_name][1] = self.current_time
print(
Back.RED
+ f"DEBUG: {self.last_cycle_time}: evt fired: {trigger_fsm.evt_name}"
)
def _state_change_handler(self, old_state, new_state):
"""Detect state transition."""
super()._state_change_handler(old_state, new_state)
# when header state finishes, we have list of variables
if old_state == "header":
# add VCD variable identifiers to condition table elements
for _, (condtable, _) in self._evt_triggers.items():
for cond in condtable.conditions:
# post-process now
candidates = self.variable_search(
cond.name, cond.scope, True
)
if not candidates:
raise RuntimeError("cannot locate VCD variable")
# associate with first candidate
cond.vcd_var = list(candidates)[0].identifiers[0]
print("DEBUG: header parsing completed")
def clock_change_handler(self, time):
"""Handle time."""
if time == 0:
return
for condtable, _ in self._evt_triggers.values():
# re-arm
if condtable.trigger_armed is False:
condtable.arm_trigger()
for condtable, _ in self._evt_triggers.values():
# update consolidated values
changed = []
for cond in condtable.conditions:
# pick variables directly for speed
var = self.variables[cond.vcd_var]
if var.last_changed == self.last_cycle_time:
_changed, state = condtable.advance(cond, var.value)
if _changed:
changed.append((cond, state))
if changed:
print(
Fore.CYAN
+ f"DEBUG: @{time}: table {condtable.triggerid} changes:"
)
for cond, state in changed:
msg_color = Fore.RED if state is False else Fore.GREEN
print(msg_color + f"DEBUG: cond {cond} -> {state}")
# check and fire trigger
condtable.check_and_fire()
# for var in self.variables.values():
# # print(var.value)
# if var.last_changed == self.last_cycle_time:
# condtable.match_and_advance(var, var.value)
def initial_value_handler(self, stmt, fields):
"""Handle initial value assignment."""
var = self.variables[fields["var"]]
var.value = fields["value"]
def value_change_handler(self, stmt, fields):
"""Handle value change."""
if self.time_valid is False or self.waiting_precondition:
return
# update local variable value
var = self.variables[fields["var"]]
var.value = fields["value"]
var.last_changed = self.current_time
# for trigger, _ in self._evt_triggers.values():
# trigger.match_and_advance(var, fields["value"])
| """VCD Event tracker."""
from typing import Tuple, Dict
from hdltools.vcd.parser import BaseVCDParser, VCDParserError
from hdltools.vcd.trigger import VCDTriggerDescriptor
from hdltools.vcd.mixins.conditions import VCDConditionMixin
from hdltools.vcd.mixins.time import VCDTimeRestrictionMixin
from hdltools.vcd.trigger.condtable import ConditionTableTrigger
# an event is a VCDTriggerDescriptor
class VCDEventTracker(
BaseVCDParser, VCDConditionMixin, VCDTimeRestrictionMixin
):
"""Event tracker."""
def __init__(
self, events: Dict[str, Tuple[VCDTriggerDescriptor]], **kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self._events = events
self._evt_triggers = {
evt_name: [
ConditionTableTrigger(conditions=conds, evt_name=evt_name),
None,
]
for evt_name, conds in events.items()
}
# arm immediately
for trigger, _ in self._evt_triggers.values():
trigger.trigger_callback = self._evt_trigger_callback
trigger.arm_trigger()
def _evt_trigger_callback(self, trigger_fsm):
"""Event trigger callback."""
# update last triggered time
self._evt_triggers[trigger_fsm.evt_name][1] = self.current_time
print(f"DEBUG: {self.current_time}: evt fired: {trigger_fsm.evt_name}")
def _state_change_handler(self, old_state, new_state):
"""Detect state transition."""
super()._state_change_handler(old_state, new_state)
# when header state finishes, we have list of variables
if old_state == "header":
# add VCD variable identifiers to condition table elements
for _, (condtable, _) in self._evt_triggers.items():
for cond in condtable.conditions:
# post-process now
candidates = self.variable_search(
cond.name, cond.scope, True
)
if not candidates:
raise RuntimeError("cannot locate VCD variable")
# associate with first candidate
cond.vcd_var = list(candidates)[0].identifiers[0]
print("DEBUG: header parsing completed")
def clock_change_handler(self, time):
"""Handle time."""
for condtable, _ in self._evt_triggers.values():
# re-arm
if condtable.trigger_armed is False:
condtable.arm_trigger()
def value_change_handler(self, stmt, fields):
"""Handle value change."""
if self.time_valid is False or self.waiting_precondition:
return
# feed event triggers
var = self.variables[fields["var"]]
for trigger, _ in self._evt_triggers.values():
trigger.match_and_advance(var, fields["value"])
| Python | 0.000001 |
37c65efa1b78abcc75d506554e6fb877678ec2f2 | Fix a typo | editorsnotes/api/views/topics.py | editorsnotes/api/views/topics.py | from editorsnotes.main.models import Topic
from .. import filters as es_filters
from ..serializers.topics import TopicSerializer
from .base import BaseListAPIView, BaseDetailView, DeleteConfirmAPIView
from .mixins import (ElasticSearchListMixin, EmbeddedMarkupReferencesMixin,
HydraProjectPermissionsMixin)
__all__ = ['TopicList', 'TopicDetail', 'TopicConfirmDelete']
class TopicList(ElasticSearchListMixin, HydraProjectPermissionsMixin,
BaseListAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
es_filter_backends = (
es_filters.ProjectFilterBackend,
es_filters.QFilterBackend,
es_filters.UpdaterFilterBackend,
)
hydra_project_perms = ('main.add_topic',)
class TopicDetail(EmbeddedMarkupReferencesMixin, HydraProjectPermissionsMixin,
BaseDetailView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
hydra_project_perms = ('main.change_topic', 'main.delete_topic',)
class TopicConfirmDelete(DeleteConfirmAPIView):
queryset = Topic.objects.all()
permissions = {
'GET': ('main.delete_topic',),
'HEAD': ('main.delete_topic',)
}
| from editorsnotes.main.models import Topic
from .. import filters as es_filters
from ..serializers.topics import TopicSerializer
from .base import BaseListAPIView, BaseDetailView, DeleteConfirmAPIView
from .mixins import (ElasticSearchListMixin, EmbeddedMarkupReferencesMixin,
HydraProjectPermissionsMixin)
__all__ = ['TopicList', 'TopicDetail', 'TopicConfirmDelete']
class TopicList(ElasticSearchListMixin, HydraProjectPermissionsMixin,
BaseListAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
es_filter_backends = (
es_filters.ProjectFilterBackend,
es_filters.QFilterBackend,
es_filters.UpdaterFilterBackend,
)
hydra_project_perms = ('main.add_note',)
class TopicDetail(EmbeddedMarkupReferencesMixin, HydraProjectPermissionsMixin,
BaseDetailView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
hydra_project_perms = ('main.change_note', 'main.delete_note',)
class TopicConfirmDelete(DeleteConfirmAPIView):
queryset = Topic.objects.all()
permissions = {
'GET': ('main.delete_topic',),
'HEAD': ('main.delete_topic',)
}
| Python | 1 |
0091c41d8dd064b40ccf35d4d24c01ae4438f028 | Set sender in signal handlers | cityhallmonitor/signals/handlers.py | cityhallmonitor/signals/handlers.py | from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils import timezone
from cityhallmonitor.models import DirtyFieldsModel
@receiver(pre_save, sender=DirtyFieldsModel)
def handle_pre_save(sender, instance, *args, **kwargs):
"""Set updated_at timestamp if model is actually dirty"""
if hasattr(sender, 'is_dirty'):
if instance.is_dirty():
instance.updated_at = timezone.now()
@receiver(post_save, sender=DirtyFieldsModel)
def handle_post_save(sender, instance, **kwargs):
"""Reset dirty state"""
if hasattr(sender, 'reset_state'):
instance.reset_state()
| from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils import timezone
@receiver(pre_save)
def handle_pre_save(sender, instance, *args, **kwargs):
"""
Set updated_at timestamp if model is actually dirty
"""
if hasattr(sender, 'is_dirty'):
if instance.is_dirty():
instance.updated_at = timezone.now()
@receiver(post_save)
def handle_post_save(sender, instance, **kwargs):
"""
Reset dirty state
"""
if hasattr(sender, 'reset_state'):
instance.reset_state()
| Python | 0.000001 |
2409bf1377ceaee99e4d4b49d0c8c2a2fef57687 | Generate a new 'name' if necessary | ckanext/ddi/importer/ddiimporter.py | ckanext/ddi/importer/ddiimporter.py | import requests
import traceback
from pprint import pprint
from ckan.lib.munge import munge_title_to_name
from ckanext.harvest.harvesters import HarvesterBase
from ckanext.ddi.importer import metadata
import ckanapi
import logging
log = logging.getLogger(__name__)
class DdiImporter(HarvesterBase):
def run(self, file_path=None, url=None):
pkg_dict = None
ckan_metadata = metadata.DdiCkanMetadata()
if file_path is not None:
with open(file_path) as xml_file:
pkg_dict = ckan_metadata.load(xml_file.read())
elif url is not None:
log.debug('Fetch file from %s' % url)
r = requests.get(url)
xml_file = r.text
pkg_dict = ckan_metadata.load(xml_file)
if pkg_dict['url'] == '':
pkg_dict['url'] = url
resources = []
resources.append({
'url': url,
'name': pkg_dict['title'],
'format': 'xml'
})
pkg_dict['resources'] = resources
pkg_dict = self.cleanup_pkg_dict(pkg_dict)
self.insert_or_update_pkg(pkg_dict)
def insert_or_update_pkg(self, pkg_dict):
try:
registry = ckanapi.LocalCKAN()
pprint(pkg_dict)
if pkg_dict['id'] and pkg_dict['id'] != '':
try:
registry.call_action('package_update', pkg_dict)
except ckanapi.NotFound:
del pkg_dict['id']
pkg_dict['name'] = self._gen_new_name(pkg_dict['name'])
registry.call_action('package_create', pkg_dict)
else:
del pkg_dict['id']
registry.call_action('package_create', pkg_dict)
except:
traceback.print_exc()
def cleanup_pkg_dict(self, pkg_dict):
if pkg_dict['name'] != '':
pkg_dict['name'] = munge_title_to_name(pkg_dict['name'])
else:
pkg_dict['name'] = munge_title_to_name(pkg_dict['title'])
if pkg_dict['url'] == '':
del pkg_dict['url']
return pkg_dict
| import requests
import traceback
from pprint import pprint
from ckan.lib.munge import munge_title_to_name
from ckanext.harvest.harvesters import HarvesterBase
from ckanext.ddi.importer import metadata
import ckanapi
import logging
log = logging.getLogger(__name__)
class DdiImporter(HarvesterBase):
def run(self, file_path=None, url=None):
pkg_dict = None
ckan_metadata = metadata.DdiCkanMetadata()
if file_path is not None:
with open(file_path) as xml_file:
pkg_dict = ckan_metadata.load(xml_file.read())
elif url is not None:
log.debug('Fetch file from %s' % url)
r = requests.get(url)
xml_file = r.text
pkg_dict = ckan_metadata.load(xml_file)
if pkg_dict['url'] == '':
pkg_dict['url'] = url
resources = []
resources.append({
'url': url,
'name': pkg_dict['title'],
'format': 'xml'
})
pkg_dict['resources'] = resources
pkg_dict = self.cleanup_pkg_dict(pkg_dict)
self.insert_or_update_pkg(pkg_dict)
def insert_or_update_pkg(self, pkg_dict):
try:
registry = ckanapi.LocalCKAN()
pprint(pkg_dict)
if pkg_dict['id'] and pkg_dict['id'] != '':
try:
registry.call_action('package_update', pkg_dict)
except ckanapi.NotFound:
del pkg_dict['id']
registry.call_action('package_create', pkg_dict)
else:
del pkg_dict['id']
registry.call_action('package_create', pkg_dict)
except:
traceback.print_exc()
def cleanup_pkg_dict(self, pkg_dict):
if pkg_dict['name'] != '':
pkg_dict['name'] = munge_title_to_name(pkg_dict['name'])
else:
pkg_dict['name'] = munge_title_to_name(pkg_dict['title'])
if pkg_dict['url'] == '':
del pkg_dict['url']
return pkg_dict
| Python | 1 |
fbe9de1d8f019b6f1c263337f04e5866131d0e60 | drop the chunk size of the kafka feed down | corehq/apps/change_feed/pillow.py | corehq/apps/change_feed/pillow.py | import json
from kafka import KeyedProducer
from kafka.common import KafkaUnavailableError
from casexml.apps.case.models import CommCareCase
from corehq.apps.change_feed import data_sources
from corehq.apps.change_feed.connection import get_kafka_client
from corehq.apps.change_feed.models import ChangeMeta
from corehq.apps.change_feed.topics import get_topic
from couchforms.models import all_known_formlike_doc_types
import logging
from pillowtop.checkpoints.manager import PillowCheckpoint, get_django_checkpoint_store
from pillowtop.couchdb import CachedCouchDB
from pillowtop.listener import PythonPillow
class ChangeFeedPillow(PythonPillow):
def __init__(self, couch_db, kafka, checkpoint):
super(ChangeFeedPillow, self).__init__(couch_db=couch_db, checkpoint=checkpoint, chunk_size=10)
self._kafka = kafka
self._producer = KeyedProducer(self._kafka)
def get_db_name(self):
return self.get_couch_db().dbname
def process_change(self, change, is_retry_attempt=False):
document_type = _get_document_type(change.document)
if document_type:
assert change.document is not None
change_meta = ChangeMeta(
document_id=change.id,
data_source_type=data_sources.COUCH,
data_source_name=self.get_db_name(),
document_type=document_type,
document_subtype=_get_document_subtype(change.document),
domain=change.document.get('domain', None),
is_deletion=change.deleted,
)
self._producer.send_messages(
bytes(get_topic(document_type)),
bytes(change_meta.domain),
bytes(json.dumps(change_meta.to_json())),
)
def get_default_couch_db_change_feed_pillow():
default_couch_db = CachedCouchDB(CommCareCase.get_db().uri, readonly=False)
try:
kafka_client = get_kafka_client()
except KafkaUnavailableError:
logging.warning('Ignoring missing kafka client during unit testing')
kafka_client = None
return ChangeFeedPillow(
couch_db=default_couch_db,
kafka=kafka_client,
checkpoint=PillowCheckpoint(get_django_checkpoint_store(), 'default-couch-change-feed')
)
def _get_document_type(document_or_none):
return document_or_none.get('doc_type', None) if document_or_none else None
def _get_document_subtype(document_or_none):
type = _get_document_type(document_or_none)
if type in ('CommCareCase', 'CommCareCase-Deleted'):
return document_or_none.get('type', None)
elif type in all_known_formlike_doc_types():
return document_or_none.get('xmlns', None)
return None
| import json
from kafka import KeyedProducer
from kafka.common import KafkaUnavailableError
from casexml.apps.case.models import CommCareCase
from corehq.apps.change_feed import data_sources
from corehq.apps.change_feed.connection import get_kafka_client
from corehq.apps.change_feed.models import ChangeMeta
from corehq.apps.change_feed.topics import get_topic
from couchforms.models import all_known_formlike_doc_types
import logging
from pillowtop.checkpoints.manager import PillowCheckpoint, get_django_checkpoint_store
from pillowtop.couchdb import CachedCouchDB
from pillowtop.listener import PythonPillow
class ChangeFeedPillow(PythonPillow):
def __init__(self, couch_db, kafka, checkpoint):
super(ChangeFeedPillow, self).__init__(couch_db=couch_db, checkpoint=checkpoint)
self._kafka = kafka
self._producer = KeyedProducer(self._kafka)
def get_db_name(self):
return self.get_couch_db().dbname
def process_change(self, change, is_retry_attempt=False):
document_type = _get_document_type(change.document)
if document_type:
assert change.document is not None
change_meta = ChangeMeta(
document_id=change.id,
data_source_type=data_sources.COUCH,
data_source_name=self.get_db_name(),
document_type=document_type,
document_subtype=_get_document_subtype(change.document),
domain=change.document.get('domain', None),
is_deletion=change.deleted,
)
self._producer.send_messages(
bytes(get_topic(document_type)),
bytes(change_meta.domain),
bytes(json.dumps(change_meta.to_json())),
)
def get_default_couch_db_change_feed_pillow():
default_couch_db = CachedCouchDB(CommCareCase.get_db().uri, readonly=False)
try:
kafka_client = get_kafka_client()
except KafkaUnavailableError:
logging.warning('Ignoring missing kafka client during unit testing')
kafka_client = None
return ChangeFeedPillow(
couch_db=default_couch_db,
kafka=kafka_client,
checkpoint=PillowCheckpoint(get_django_checkpoint_store(), 'default-couch-change-feed')
)
def _get_document_type(document_or_none):
return document_or_none.get('doc_type', None) if document_or_none else None
def _get_document_subtype(document_or_none):
type = _get_document_type(document_or_none)
if type in ('CommCareCase', 'CommCareCase-Deleted'):
return document_or_none.get('type', None)
elif type in all_known_formlike_doc_types():
return document_or_none.get('xmlns', None)
return None
| Python | 0 |
41d6c18aee851c9b2430d74c51ef51b49948b0f4 | raise version | brilws/_version.py | brilws/_version.py | __version__ = "3.5.0"
| __version__ = "3.4.1"
| Python | 0 |
fadab627469d008a2bf39a9544a77a3bd6518b20 | use the local path in the gui to run stuff. | rp-mt-scripts-graphical.py | rp-mt-scripts-graphical.py | #! /usr/bin/env python
"""Main module to create GTK interface to the MT scripts."""
import os.path
import gtk
import gobject
import subprocess
class ScriptsWindow:
"""Class to manage the demo window for the pile manager."""
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file("rp-mt-scripts-interface.glade")
self.builder.connect_signals(self)
self.window = self.builder.get_object("winMain")
self.window.show()
self.filename = self.builder.get_object("filechooserbutton1")
## GTK+ Signal Handlers
def on_winMain_destroy(self, _widget, _callback_data=None):
"""Callback to exit mainloop on window destruction."""
print "destroy signal occurred"
gtk.main_quit()
## Automatic tab
def on_btnConfigInstall_clicked(self, _widget, _callback_data=None):
run(MTROOT+"./configure")
## Manual tab
def on_btnConfigureNoInstall_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/1_mt_scripts_configuration.sh")
def on_btnInstallTbeta_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/install_tbeta_1.1.sh")
def on_btnInstallPocoLocal_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/install_libpoco_workaround_local.sh")
def on_btnInstallPocoGlobal_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/install_libpoco_workaround_local.sh")
def on_btnInstallFlash_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/install_flashplayer_standalone.sh")
def on_btnInstallPyMT_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/install_pymt_hg.sh")
## Update tab
def on_btnUpdatePyMT_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/update_pymt_hg.sh")
## Run tab
def on_btnRunTbeta_clicked(self, _widget, _callback_data=None):
self.tbeta = run(MTROOT+"scripts/install_flashplayer_standalone.sh",
wait=False)
def on_btnRunFlashPlayer_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/run_flashplayer_standalone.sh")
def on_btnRunPyMT_clicked(self, _widget, _callback_data=None):
run(MTROOT+"scripts/run_pymt_hg_examples.sh")
def on_btnRunOtherPy_clicked(self, _widget, _callback_data=None):
run(MTROOT+"python "+self.filename.get_filename())
def main(self):
"""Start the GTK mainloop"""
gtk.main()
def run(cmdline, show_terminal=True, wait=True):
if show_terminal:
cmdline="gnome-terminal -x " + cmdline
process = subprocess.Popen(cmdline)
if wait:
return process.wait()
else:
return process
if __name__ == "__main__":
print __file__
# Assume we're located in MTROOT, and go from there.
MTROOT=os.path.dirname(__file__)
app = ScriptsWindow()
app.main()
| #! /usr/bin/env python
"""Main module to create GTK interface to the MT scripts."""
import gtk
import gobject
import subprocess
class ScriptsWindow:
"""Class to manage the demo window for the pile manager."""
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file("rp-mt-scripts-interface.glade")
self.builder.connect_signals(self)
self.window = self.builder.get_object("winMain")
self.window.show()
self.filename = self.builder.get_object("filechooserbutton1")
## GTK+ Signal Handlers
def on_winMain_destroy(self, _widget, _callback_data=None):
"""Callback to exit mainloop on window destruction."""
print "destroy signal occurred"
gtk.main_quit()
## Automatic tab
def on_btnConfigInstall_clicked(self, _widget, _callback_data=None):
run("./configure")
## Manual tab
def on_btnConfigureNoInstall_clicked(self, _widget, _callback_data=None):
run("scripts/1_mt_scripts_configuration.sh")
def on_btnInstallTbeta_clicked(self, _widget, _callback_data=None):
run("scripts/install_tbeta_1.1.sh")
def on_btnInstallPocoLocal_clicked(self, _widget, _callback_data=None):
run("scripts/install_libpoco_workaround_local.sh")
def on_btnInstallPocoGlobal_clicked(self, _widget, _callback_data=None):
run("scripts/install_libpoco_workaround_local.sh")
def on_btnInstallFlash_clicked(self, _widget, _callback_data=None):
run("scripts/install_flashplayer_standalone.sh")
def on_btnInstallPyMT_clicked(self, _widget, _callback_data=None):
run("scripts/install_pymt_hg.sh")
## Update tab
def on_btnUpdatePyMT_clicked(self, _widget, _callback_data=None):
run("scripts/update_pymt_hg.sh")
## Run tab
def on_btnRunTbeta_clicked(self, _widget, _callback_data=None):
self.tbeta = run("scripts/install_flashplayer_standalone.sh",
wait=False)
def on_btnRunFlashPlayer_clicked(self, _widget, _callback_data=None):
run("scripts/run_flashplayer_standalone.sh")
def on_btnRunPyMT_clicked(self, _widget, _callback_data=None):
run("scripts/run_pymt_hg_examples.sh")
def on_btnRunOtherPy_clicked(self, _widget, _callback_data=None):
run("python "+self.filename.get_filename())
def main(self):
"""Start the GTK mainloop"""
gtk.main()
def run(cmdline, show_terminal=True, wait=True):
if show_terminal:
cmdline="gnome-terminal -x " + cmdline
process = subprocess.Popen(cmdline)
if wait:
return process.wait()
else:
return process
if __name__ == "__main__":
print __file__
app = ScriptsWindow()
app.main()
| Python | 0 |
6e2362351d9ccaa46a5a2bc69c4360e4faff166d | Add encoding spec to comply Python 2 | iclib/qibla.py | iclib/qibla.py | # -*- coding: utf-8 -*-
from . import formula
def direction(lat, lng):
return formula.qibla(lat, lng)
def direction_dms(lat, lng):
return _dms(formula.qibla(lat, lng))
def direction_str(lat, lng, prec=0):
d, m, s = direction_dms(lat, lng)
# negative input might returns wrong result
return '{}° {}\' {:.{}f}"'.format(d, m, s, prec)
def _dms(deg):
seconds = deg * 3600
m, s = divmod(seconds, 60)
d, m = divmod(m, 60)
return (int(d), int(m), s)
| from . import formula
def direction(lat, lng):
return formula.qibla(lat, lng)
def direction_dms(lat, lng):
return _dms(formula.qibla(lat, lng))
def direction_str(lat, lng, prec=0):
d, m, s = direction_dms(lat, lng)
# negative input might returns wrong result
return '{}° {}\' {:.{}f}"'.format(d, m, s, prec)
def _dms(deg):
seconds = deg * 3600
m, s = divmod(seconds, 60)
d, m = divmod(m, 60)
return (int(d), int(m), s)
| Python | 0.000002 |
9f1913ca658228c2c6551b2c8de1d48ddd73c8aa | raise version to 2 | brilws/_version.py | brilws/_version.py | __version__ = "2.0.0"
| __version__ = "1.0.3"
| Python | 0.000001 |
97831652f0d06236d83d0731813ffcdc44a4e190 | Update pypi version | fontdump/__init__.py | fontdump/__init__.py | __version__ = '1.1.0' | __version__ = '0.1.0' | Python | 0 |
22461c6ddc1a6bff0ee8637139146b8531b3e0b4 | improve python error message when tp fails to start | python/perfetto/trace_processor/shell.py | python/perfetto/trace_processor/shell.py | #!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
from urllib import request, error
from perfetto.trace_processor.platform import PlatformDelegate
# Default port that trace_processor_shell runs on
TP_PORT = 9001
def load_shell(bin_path: str, unique_port: bool, verbose: bool,
ingest_ftrace_in_raw: bool, platform_delegate: PlatformDelegate):
addr, port = platform_delegate.get_bind_addr(
port=0 if unique_port else TP_PORT)
url = f'{addr}:{str(port)}'
shell_path = platform_delegate.get_shell_path(bin_path=bin_path)
if os.name == 'nt' and not shell_path.endswith('.exe'):
tp_exec = [sys.executable, shell_path]
else:
tp_exec = [shell_path]
args = ['-D', '--http-port', str(port)]
if not ingest_ftrace_in_raw:
args.append('--no-ftrace-raw')
p = subprocess.Popen(
tp_exec + args,
stdout=subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL)
success = False
for i in range(3):
try:
if p.poll() is None:
_ = request.urlretrieve(f'http://{url}/status')
success = True
break
except error.URLError:
time.sleep(1)
if not success:
raise Exception(
"Trace processor failed to start. Try rerunning with "
"verbose=True in TraceProcessorConfig for more detailed "
"information and file a bug at https://goto.google.com/perfetto-bug "
"or https://github.com/google/perfetto/issues if necessary.")
return url, p
| #!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
from urllib import request, error
from perfetto.trace_processor.platform import PlatformDelegate
# Default port that trace_processor_shell runs on
TP_PORT = 9001
def load_shell(bin_path: str, unique_port: bool, verbose: bool,
ingest_ftrace_in_raw: bool, platform_delegate: PlatformDelegate):
addr, port = platform_delegate.get_bind_addr(
port=0 if unique_port else TP_PORT)
url = f'{addr}:{str(port)}'
shell_path = platform_delegate.get_shell_path(bin_path=bin_path)
if os.name == 'nt' and not shell_path.endswith('.exe'):
tp_exec = [sys.executable, shell_path]
else:
tp_exec = [shell_path]
args = ['-D', '--http-port', str(port)]
if not ingest_ftrace_in_raw:
args.append('--no-ftrace-raw')
p = subprocess.Popen(
tp_exec + args,
stdout=subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL)
while True:
try:
if p.poll() != None:
if unique_port:
raise Exception(
"Random port allocation failed, please file a bug at https://goto.google.com/perfetto-bug"
)
raise Exception(
"Trace processor failed to start, please file a bug at https://goto.google.com/perfetto-bug"
)
_ = request.urlretrieve(f'http://{url}/status')
time.sleep(1)
break
except error.URLError:
pass
return url, p
| Python | 0.000001 |
c182e4f3d7df431fe5c542988fcef9f05825913c | Update the raw_parameter_script | examples/raw_parameter_script.py | examples/raw_parameter_script.py | """ The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't, the script can autodetect
how this file was run. With pure Python, it will initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The setUp() and tearDown() methods are also
now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
sb = MyTestClass("test_basic")
sb.browser = "chrome"
sb.headless = False
sb.headed = False
sb.start_page = None
sb.servername = "localhost"
sb.port = 4444
sb.data = None
sb.environment = "test"
sb.user_agent = None
sb.extension_zip = None
sb.extension_dir = None
sb.database_env = "test"
sb.log_path = "latest_logs/"
sb.archive_logs = False
sb.disable_csp = False
sb.enable_sync = False
sb.visual_baseline = False
sb.maximize_option = False
sb.save_screenshot_after_test = False
sb.timeout_multiplier = None
sb.pytest_html_report = None
sb.report_on = False
sb.with_db_reporting = False
sb.with_s3_logging = False
sb.js_checking_on = False
sb.is_pytest = False
sb.demo_mode = False
sb.demo_sleep = 1
sb.message_duration = 2
sb.settings_file = None
sb.user_data_dir = None
sb.proxy_string = None
sb.ad_block_on = False
sb.highlights = None
sb.check_js = False
sb.cap_file = None
sb.setUp()
try:
sb.test_basic()
finally:
sb.tearDown()
del sb
| """ The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't, the script can autodetect
how this file was run. With pure Python, it will initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The setUp() and tearDown() methods are also
now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.headed = False
b.start_page = None
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.user_agent = None
b.extension_zip = None
b.extension_dir = None
b.database_env = "test"
b.log_path = "latest_logs/"
b.archive_logs = False
b.disable_csp = False
b.enable_sync = False
b.visual_baseline = False
b.maximize_option = False
b.save_screenshot_after_test = False
b.timeout_multiplier = None
b.pytest_html_report = None
b.report_on = False
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.settings_file = None
b.user_data_dir = None
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
| Python | 0.000193 |
a713bbb1226863b4417362019431de0266faa2d9 | Update automateprojectscript.py | automateprojectscript.py | automateprojectscript.py | #!/usr/bin/python
"""
This python file just runs all of the terminal commands needed to run the project. It just saves time not having to manually type in these commands every time you want to run the project.
At the moment it only works for the example project, as the project later develops this script might be updated if the other people in the team decide to use this.
This is a first version, next I might work on getting a seperate terminal open to run each robot in order for it to be easy to see the positions of each robot. At the moment, since only 1 terminal is used, all of the output is put in it (which of course makes it messy)
To run the script, simply open up a terminal and type: python automateprojectscript.py
Author: ttho618
"""
import os
from subprocess import Popen, PIPE, signal
from os.path import join
findRoscorePro = Popen("pgrep roscore", stdout=PIPE, shell=True)
killroscorePro = Popen("kill "+findRoscorePro.communicate()[0], shell=True)
# The world file to look for
lookfor = "myworld.world"
# I assume that the project on your computer is located within the home directory
for root, dirs, files in os.walk('/home', topdown=True):
#print "searching", root
if '.local' in dirs:
dirs.remove('.local')
if 'catkin_ws' in dirs: # If the project is within this directory, then you need to change this to rosbuild_ws
dirs.remove('catkin_ws')
if lookfor in files:
print "found: %s" % join(root, lookfor)
worldfile = join(root, lookfor)
print worldfile
# This would need to be changed if your project is named something different
rosmakePro= Popen('rosmake se306Project',shell=True)
rosmakePro.communicate() # Waits until rosmake has finished
core = Popen('roscore',shell=True)
stagePro = Popen('rosrun stage stageros %s' %worldfile,shell=True)
# These below lines would need to be changed to fit what you are wanting to run.
runNode= Popen('rosrun se306Project R0',shell=True)
runNode= Popen('rosrun se306Project R1',shell=True)
| #!/usr/bin/python
"""
This python file just runs all of the terminal commands needed to run the project. It just saves time not having to manually type in these commands every time you want to run the project.
At the moment it only works for the example project, as the project later develops this script might be updated if the other people in the team decide to use this.
This is a first version, next I might work on getting a seperate terminal open to run each robot in order for it to be easy to see the positions of each robot. At the moment, since only 1 terminal is used, all of the output is put in it (which of course makes it messy)
To run the script, simply open up a terminal and type: python automateprojectscript.py
Author: ttho618
"""
import os
from subprocess import Popen, PIPE, signal
from os.path import join
findRoscorePro = Popen("pgrep roscore", stdout=PIPE, shell=True)
killroscorePro = Popen("kill "+findRoscorePro.communicate()[0], shell=True)
# The world file to look for
lookfor = "myworld.world"
# I assume that the project on your computer is located within the home directory
for root, dirs, files in os.walk('/home', topdown=True):
#print "searching", root
if '.local' in dirs:
dirs.remove('.local')
if 'catkin_ws' in dirs: # If the project is within this directory, then you need to change this to rosbuild_ws
dirs.remove('catkin_ws')
if lookfor in files:
print "found: %s" % join(root, lookfor)
worldfile = join(root, lookfor)
print worldfile
core = Popen('roscore',shell=True)
stagePro = Popen('rosrun stage stageros %s' %worldfile,shell=True)
# These below lines would need to be changed to fit what you are wanting to run.
runNode= Popen('rosrun se306Project R0',shell=True)
runNode= Popen('rosrun se306Project R1',shell=True)
| Python | 0 |
252d4212e7952db3d36e0324ba237cc109d62279 | Replace . by _ in signal and entity names. | src/dynamic_graph/sot/core/feature_position.py | src/dynamic_graph/sot/core/feature_position.py | # -*- coding: utf-8 -*-
# Copyright 2011, Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST
#
# This file is part of dynamic-graph.
# dynamic-graph is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# dynamic-graph is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# dynamic-graph. If not, see <http://www.gnu.org/licenses/>.
from dynamic_graph.sot.core import FeaturePoint6d
from dynamic_graph import plug
from dynamic_graph.entity import Entity
class FeaturePosition (Entity):
"""
Position of a rigid-body in space as a feature
Input:
a string: name of the feature,
a signal<MatrixHomo>: defining the value of the feature,
a signal<Matrix>: defining the jacobian of the feature with respect
to the robot configuration,
an homogeneous matrix: defining the reference value of the feature.
Members containing a signal:
position: position input signal (MatrixHomo),
reference: reference position input signal (MatrixHomo),
Jq: Jacobian input signal (Matrix),
select: selection flag "RzRyRxTzTyTx" (string).
"""
signalMap = dict()
def __init__(self, name, signalPosition=None, signalJacobian = None,
referencePosition = None):
self._feature = FeaturePoint6d(name)
self.obj = self._feature.obj
self._reference = FeaturePoint6d(name + '_ref')
if referencePosition:
self._reference.signal('position').value = tuple(referencePosition)
if signalPosition:
plug(signalPosition, self._feature.signal('position'))
if signalJacobian:
plug(signalJacobian, self._feature.signal('Jq'))
self._feature.signal('sdes').value = self._reference
self._feature.signal('selec').value = '111111'
self._feature.frame('current')
# Signals stored in members
self.position = self._feature.signal('position')
self.reference = self._reference.signal('position')
self.Jq = self._feature.signal('Jq')
self.error = self._feature.signal('error')
self.select = self._feature.signal('selec')
self.signalMap = {'position':self.position,
'reference':self.reference,
'Jq':self.Jq,
'error':self.error,
'selec':self.select}
@property
def name(self) :
return self._feature.name
def signal (self, name):
"""
Get a signal of the entity from signal name
"""
if name in self.signalMap.keys():
return self.signalMap[name]
else:
raise RunTimeError('No signal with this name')
def signals(self) :
"""
Return the list of signals
"""
return self.signalMap.values()
def commands(self):
"""
Return the list of commands.
"""
return self._feature.commands()
def frame(self, f):
return self._feature.frame(f)
| # -*- coding: utf-8 -*-
# Copyright 2011, Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST
#
# This file is part of dynamic-graph.
# dynamic-graph is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# dynamic-graph is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# dynamic-graph. If not, see <http://www.gnu.org/licenses/>.
from dynamic_graph.sot.core import FeaturePoint6d
from dynamic_graph import plug
from dynamic_graph.entity import Entity
class FeaturePosition (Entity):
"""
Position of a rigid-body in space as a feature
Input:
a string: name of the feature,
a signal<MatrixHomo>: defining the value of the feature,
a signal<Matrix>: defining the jacobian of the feature with respect
to the robot configuration,
an homogeneous matrix: defining the reference value of the feature.
Members containing a signal:
position: position input signal (MatrixHomo),
reference: reference position input signal (MatrixHomo),
Jq: Jacobian input signal (Matrix),
select: selection flag "RzRyRxTzTyTx" (string).
"""
signalMap = dict()
def __init__(self, name, signalPosition=None, signalJacobian = None,
referencePosition = None):
self._feature = FeaturePoint6d(name)
self.obj = self._feature.obj
self._reference = FeaturePoint6d(name + '.ref')
if referencePosition:
self._reference.signal('position').value = tuple(referencePosition)
if signalPosition:
plug(signalPosition, self._feature.signal('position'))
if signalJacobian:
plug(signalJacobian, self._feature.signal('Jq'))
self._feature.signal('sdes').value = self._reference
self._feature.signal('selec').value = '111111'
self._feature.frame('current')
# Signals stored in members
self.position = self._feature.signal('position')
self.reference = self._reference.signal('position')
self.Jq = self._feature.signal('Jq')
self.error = self._feature.signal('error')
self.select = self._feature.signal('selec')
self.signalMap = {'position':self.position,
'reference':self.reference,
'Jq':self.Jq,
'error':self.error,
'selec':self.select}
@property
def name(self) :
return self._feature.name
def signal (self, name):
"""
Get a signal of the entity from signal name
"""
if name in self.signalMap.keys():
return self.signalMap[name]
else:
raise RunTimeError('No signal with this name')
def signals(self) :
"""
Return the list of signals
"""
return self.signalMap.values()
def commands(self):
"""
Return the list of commands.
"""
return self._feature.commands()
def frame(self, f):
return self._feature.frame(f)
| Python | 0.000027 |
ae9b94f28b3677be2867bfffb9e1dcec8851aaa0 | Fix typo in example usage for extract_variable.py script. | prompt_tuning/scripts/extract_variable.py | prompt_tuning/scripts/extract_variable.py | # Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Extract a variable from a t5x checkpoint and save it as a numpy file.
Example usage:
python -m prompt_tuning.scripts.extract_variable \
--checkpoint_dir=/path/to/t5x/checkpoint_step \
--variable_path=target/encoder/prompt/prompt/prompt \
--restore_dtype=float32 \
--output_path=/path/to/save/prompt.npy
"""
import os
import re
from typing import Mapping, Any, Sequence
from absl import app
from absl import flags
from absl import logging
import jax.numpy as jnp
import numpy as np
from t5x import checkpoints
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string(
"checkpoint_dir", None, "The path to the t5x checkpoint directory")
flags.DEFINE_string(
"variable_path",
None,
"The path to the variable in the checkpoint tree, using `/` for scoping. "
"Leading `/` or `/target` is optional.")
flags.DEFINE_enum(
"restore_dtype",
"float32",
["float32", "bfloat16"],
"The data type to use when restoring the variable.")
flags.DEFINE_string(
"output_path",
None,
"The path to where the numpy variable should be saved.")
flags.mark_flag_as_required("checkpoint_dir")
flags.mark_flag_as_required("variable_path")
flags.mark_flag_as_required("output_path")
def normalize_variable_path(path: str, sep: str = "/") -> str:
"""Make sure path starts with `target/`."""
# TODO: enable saving all variables within a scope if the path
# ends in the separator.
path = path.strip(sep)
path = re.sub(r"^target/", "", path)
return f"target/{path}"
def extract_nested_key(
nested_key: str, blob: Mapping[str, Any], sep: str = "/") -> Any:
"""Extract a key nested dicts using a scoping separator."""
# TODO: Add nicer error handling that shows where in the nested
# dicts your key lookup fails.
for key in nested_key.split(sep):
blob = blob[key]
return blob
def save_variable(output_path: str, variable: np.ndarray):
"""Save variable at output path using numpy."""
dir_name = os.path.dirname(output_path)
if not gfile.exists(dir_name):
gfile.makedirs(dir_name)
with gfile.GFile(output_path, "wb") as wf:
np.save(wf, variable)
def main(argv: Sequence[str]):
"""Extract a numpy value from a t5x checkpoint."""
if len(argv) > 1:
raise app.UsageError("Too many command-line-arguments.")
restore_dtype = jnp.dtype(FLAGS.restore_dtype)
checkpoint = checkpoints.load_t5x_checkpoint(
FLAGS.checkpoint_dir,
restore_dtype=restore_dtype,
lazy_parameters=True)
logging.info("Reading variables from %s as dtype=%s",
FLAGS.checkpoint_dir,
restore_dtype)
variable_path = normalize_variable_path(FLAGS.variable_path)
logging.info("Extracting variable found at %s", variable_path)
variable = extract_nested_key(variable_path, checkpoint)
variable = variable.get()
logging.info("Read variable with shape %s", variable.shape)
logging.info("Saving variable to %s", FLAGS.output_path)
save_variable(FLAGS.output_path, variable)
if __name__ == "__main__":
app.run(main)
| # Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Extract a variable from a t5x checkpoint and save it as a numpy file.
Example usage:
python -m prompt_tuning.scripts.extract_variable \
--checkpoint_dir=/path/to/t5x/checkpoint_step \
--variable_path=target/encoder/prompt/prompt/prompt \
--restore_type=float32 \
--output_path=/path/to/save/prompt.npy
"""
import os
import re
from typing import Mapping, Any, Sequence
from absl import app
from absl import flags
from absl import logging
import jax.numpy as jnp
import numpy as np
from t5x import checkpoints
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string(
"checkpoint_dir", None, "The path to the t5x checkpoint directory")
flags.DEFINE_string(
"variable_path",
None,
"The path to the variable in the checkpoint tree, using `/` for scoping. "
"Leading `/` or `/target` is optional.")
flags.DEFINE_enum(
"restore_dtype",
"float32",
["float32", "bfloat16"],
"The data type to use when restoring the variable.")
flags.DEFINE_string(
"output_path",
None,
"The path to where the numpy variable should be saved.")
flags.mark_flag_as_required("checkpoint_dir")
flags.mark_flag_as_required("variable_path")
flags.mark_flag_as_required("output_path")
def normalize_variable_path(path: str, sep: str = "/") -> str:
"""Make sure path starts with `target/`."""
# TODO: enable saving all variables within a scope if the path
# ends in the separator.
path = path.strip(sep)
path = re.sub(r"^target/", "", path)
return f"target/{path}"
def extract_nested_key(
nested_key: str, blob: Mapping[str, Any], sep: str = "/") -> Any:
"""Extract a key nested dicts using a scoping separator."""
# TODO: Add nicer error handling that shows where in the nested
# dicts your key lookup fails.
for key in nested_key.split(sep):
blob = blob[key]
return blob
def save_variable(output_path: str, variable: np.ndarray):
"""Save variable at output path using numpy."""
dir_name = os.path.dirname(output_path)
if not gfile.exists(dir_name):
gfile.makedirs(dir_name)
with gfile.GFile(output_path, "wb") as wf:
np.save(wf, variable)
def main(argv: Sequence[str]):
"""Extract a numpy value from a t5x checkpoint."""
if len(argv) > 1:
raise app.UsageError("Too many command-line-arguments.")
restore_dtype = jnp.dtype(FLAGS.restore_dtype)
checkpoint = checkpoints.load_t5x_checkpoint(
FLAGS.checkpoint_dir,
restore_dtype=restore_dtype,
lazy_parameters=True)
logging.info("Reading variables from %s as dtype=%s",
FLAGS.checkpoint_dir,
restore_dtype)
variable_path = normalize_variable_path(FLAGS.variable_path)
logging.info("Extracting variable found at %s", variable_path)
variable = extract_nested_key(variable_path, checkpoint)
variable = variable.get()
logging.info("Read variable with shape %s", variable.shape)
logging.info("Saving variable to %s", FLAGS.output_path)
save_variable(FLAGS.output_path, variable)
if __name__ == "__main__":
app.run(main)
| Python | 0.99978 |
3f1d30c2aeff73bb4863f2d0fd0660a264715739 | Tidy up | src/planner.py | src/planner.py | from collections import deque
class GamePlan(object):
"""
initialise the tournament object with an overall list of players' IDs
input:
a list of players
output:
a list (len = number of rounds) of lists of tuples
with players' names (maybe change to IDs from db) in white, black order
GamePlans with odd number of players have each person sitting out
Created as a tuple with ('_BYE', 'real player')
Template needs to check for '_BYE' in each tuple and
"""
def __init__(self, players):
self.players = list(players)
def berger_robin(self, players):
"""
Input:
array of player names/ids
Returns:
tournament - an array of hashmaps,
each containing matches and bye for the round
taken from
https://en.wikipedia.org/wiki/Round-robin_tournament#Scheduling_algorithm
"""
number_of_players = len(players)
shift = number_of_players / 2
last = players.pop()
pl_deque = deque(players)
tournament = []
for stage in xrange(number_of_players - 1):
round_dict = {'matches': [], 'bye': "__NONE"}
if last == '_BYE':
round_dict['bye'] = pl_deque[0]
else:
if stage % 2 == 0:
round_dict['matches'].append((last, pl_deque[0]))
else:
round_dict['matches'].append((pl_deque[0], last))
other_games = [(pl_deque[idx], pl_deque[idx + 1])
for idx in xrange(1, (len(pl_deque) - 1), 2)]
round_dict['matches'] += other_games
tournament.append(round_dict)
pl_deque.rotate(shift) # for the next for-loop iteration
return tournament
def generate(self):
players = self.players
if len(players) % 2 == 1:
players.append('_BYE')
return self.berger_robin(players)
| from collections import deque
class GamePlan(object):
"""
initialise the tournament object with an overall list of players' IDs
input:
a list of players
output:
a list (len = number of rounds) of lists of tuples
with players' names (maybe change to IDs from db) in white, black order
GamePlans with odd number of players have each person sitting out
Created as a tuple with ('_BYE', 'real player')
Template needs to check for '_BYE' in each tuple and
"""
def __init__(self, players):
self.players = list(players)
def berger_robin(self, players):
"""
Input:
array of player names/ids
Returns:
tournament - an array of hashmaps,
each containing matches and bye for the round
taken from
https://en.wikipedia.org/wiki/Round-robin_tournament#Scheduling_algorithm
"""
number_of_players = len(players)
shift = number_of_players / 2
last = players.pop()
pl_deque = deque(players)
tournament = []
for x in xrange(number_of_players - 1):
round_dict = {'matches': [], 'bye': "__NONE"}
if last == '_BYE':
round_dict['bye'] = pl_deque[0]
else:
if x % 2 == 0:
round_dict['matches'].append((last, pl_deque[0]))
else:
round_dict['matches'].append((pl_deque[0], last))
other_games = [(pl_deque[idx], pl_deque[idx + 1])
for idx in xrange(1, (len(pl_deque) - 1), 2)]
round_dict['matches'] += other_games
tournament.append(round_dict)
pl_deque.rotate(shift) # for the next for-loop iteration
return tournament
def generate(self):
if len(self.players) % 2 == 0:
players = self.players
else:
players = self.players
players.append('_BYE')
return self.berger_robin(players)
| Python | 0.000001 |
856171e4933b872b1537945d3e6033da4313a1cb | enable gzip in django | ses_maker/settings.py | ses_maker/settings.py | """
Django settings for ses_maker project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')h0rgw)fug4%m70t73cqyxc80v1fv8lc4%20e^vm-m3qua=k@r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'canteen',
]
MIDDLEWARE = [
'django.middleware.gzip.GZipMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ses_maker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ses_maker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
# For easier local development
if 'POSTGRESQL_PASSWORD' in os.environ:
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['POSTGRESQL_INSTANCE_NAME'],
'USER': os.environ['POSTGRESQL_USERNAME'],
'PASSWORD': os.environ['POSTGRESQL_PASSWORD'],
'HOST': os.environ['POSTGRESQL_PORT_5432_TCP_ADDR'],
'PORT': os.environ['POSTGRESQL_PORT_5432_TCP_PORT']
}
DEBUG = False
| """
Django settings for ses_maker project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')h0rgw)fug4%m70t73cqyxc80v1fv8lc4%20e^vm-m3qua=k@r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'canteen',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ses_maker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ses_maker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
# For easier local development
if 'POSTGRESQL_PASSWORD' in os.environ:
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['POSTGRESQL_INSTANCE_NAME'],
'USER': os.environ['POSTGRESQL_USERNAME'],
'PASSWORD': os.environ['POSTGRESQL_PASSWORD'],
'HOST': os.environ['POSTGRESQL_PORT_5432_TCP_ADDR'],
'PORT': os.environ['POSTGRESQL_PORT_5432_TCP_PORT']
}
DEBUG = False
| Python | 0.000001 |
65783ec0baac5886232a5334905a748750b3c0c2 | fix NameError | sfa/methods/Update.py | sfa/methods/Update.py | ### $Id: update.py 16477 2010-01-05 16:31:37Z thierry $
### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/update.py $
import time
from sfa.util.faults import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.credential import Credential
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
api.auth.verify_object_permission(record_dict.get('hrn', ''))
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, None, self.name))
manager = self.api.get_interface_manager()
return manager.update(self.api, record_dict)
| ### $Id: update.py 16477 2010-01-05 16:31:37Z thierry $
### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/update.py $
import time
from sfa.util.faults import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.credential import Credential
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
api.auth.verify_object_permission(record.get('hrn', ''))
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, None, self.name))
manager = self.api.get_interface_manager()
return manager.update(self.api, record_dict)
| Python | 0.000003 |
6a582b6e2fa852d6a80268c7ddd305d45416c8ef | Fix YUM and DNF usage. | hotness/repository.py | hotness/repository.py | import logging
import subprocess
import os
import ConfigParser
from six import StringIO
from hotness.cache import cache
log = logging.getLogger('fedmsg')
thn_section = 'thn'
class ThnConfigParser(ConfigParser.ConfigParser):
def read(self, filename):
try:
text = open(filename).read()
except IOError:
pass
else:
section = "[%s]\n" % thn_section
file = StringIO(section + text)
self.readfp(file, filename)
def get_pkg_manager():
release_file = '/etc/os-release'
config = ThnConfigParser()
config.read(release_file)
name = config.get(thn_section, 'ID')
if name == 'fedora':
return 'dnf'
else:
return 'yum'
def get_version(package_name, yumconfig):
nvr_dict = build_nvr_dict(yumconfig)
try:
version = nvr_dict[package_name]
except KeyError:
log.warn("Did not find %r in nvr_dict, forcing refresh" % package_name)
force_cache_refresh(yumconfig)
nvr_dict = build_nvr_dict(yumconfig)
# This might still fail.. but we did the best we could.
version = nvr_dict[package_name]
return version
def force_cache_refresh(yumconfig):
# First, invalidate our in-memory cache of the results
cache.invalidate(hard=True)
# But also ask yum/dnf to kill its on-disk cache
cmdline = [os.path.join("/usr/bin", get_pkg_manager()),
"--config", yumconfig,
"clean",
"all"]
log.info("Running %r" % ' '.join(cmdline))
cleanall = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
(stdout, stderr) = cleanall.communicate()
if stderr:
log.warn(stderr)
log.debug("Done with cache cleaning.")
@cache.cache_on_arguments()
def build_nvr_dict(yumconfig):
pkg_manager = get_pkg_manager()
cmdline = []
if pkg_manager == 'yum':
cmdline = ["/usr/bin/repoquery"]
else:
cmdline = [os.path.join("/usr/bin", pkg_manager),
"repoquery"]
cmdline.extend(["--config", yumconfig,
"--quiet",
#"--archlist=src",
"--all",
"--qf",
"%{name}\t%{version}\t%{release}"])
log.info("Running %r" % ' '.join(cmdline))
repoquery = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
(stdout, stderr) = repoquery.communicate()
log.debug("Done with repoquery.")
if stderr:
log.warn(stderr)
new_nvr_dict = {}
for line in stdout.split("\n"):
line = line.strip()
if line:
name, version, release = line.split("\t")
new_nvr_dict[name] = (version, release)
log.info("Rebuilt nvr_dict with %r entries" % len(new_nvr_dict))
return new_nvr_dict
| import logging
import subprocess
import os
import ConfigParser
from six import StringIO
from hotness.cache import cache
log = logging.getLogger('fedmsg')
thn_section = 'thn'
class ThnConfigParser(ConfigParser.ConfigParser):
def read(self, filename):
try:
text = open(filename).read()
except IOError:
pass
else:
section = "[%s]\n" % thn_section
file = StringIO.StringIO(section + text)
self.readfp(file, filename)
def get_pkg_manager():
release_file = '/etc/os-release'
config = ThnConfigParser()
config.read(release_file)
name = config.get(thn_section, 'ID')
if name == 'fedora':
return 'dnf'
else:
return 'yum'
def get_version(package_name, yumconfig):
nvr_dict = build_nvr_dict(yumconfig)
try:
version = nvr_dict[package_name]
except KeyError:
log.warn("Did not find %r in nvr_dict, forcing refresh" % package_name)
force_cache_refresh(yumconfig)
nvr_dict = build_nvr_dict(yumconfig)
# This might still fail.. but we did the best we could.
version = nvr_dict[package_name]
return version
def force_cache_refresh(yumconfig):
# First, invalidate our in-memory cache of the results
cache.invalidate(hard=True)
# But also ask yum/dnf to kill its on-disk cache
pkg_manager = get_pkg_manager()
cmdline = [os.path.join("/usr/bin", pkg_manager),
"--config", yumconfig,
"clean",
"all"]
log.info("Running %r" % ' '.join(cmdline))
cleanall = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
(stdout, stderr) = cleanall.communicate()
if stderr:
log.warn(stderr)
log.debug("Done with cache cleaning.")
@cache.cache_on_arguments()
def build_nvr_dict(yumconfig):
pkg_manager = get_pkg_manager()
cmdline = []
if pkg_manager == 'yum':
cmdline.append("/usr/bin/repoquery")
else:
cmdline.append("/usr/bin/dnf",
"repoquery")
cmdline.append("--config", yumconfig,
"--quiet",
#"--archlist=src",
"--all",
"--qf",
"%{name}\t%{version}\t%{release}")
log.info("Running %r" % ' '.join(cmdline))
repoquery = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
(stdout, stderr) = repoquery.communicate()
log.debug("Done with repoquery.")
if stderr:
log.warn(stderr)
new_nvr_dict = {}
for line in stdout.split("\n"):
line = line.strip()
if line:
name, version, release = line.split("\t")
new_nvr_dict[name] = (version, release)
log.info("Rebuilt nvr_dict with %r entries" % len(new_nvr_dict))
return new_nvr_dict
| Python | 0 |
d307b65f8bf5f9ae8eaaefa071fd2055304a6725 | Remove custom form from admin. | saskatoon/harvest/admin.py | saskatoon/harvest/admin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from forms import RFPForm, PropertyForm, HarvestForm, HarvestYieldForm, EquipmentForm
from member.models import *
from harvest.models import *
from harvest.forms import *
class PropertyInline(admin.TabularInline):
model = Property
extra = 0
class PersonInline(admin.TabularInline):
model = RequestForParticipation
verbose_name = "Cueilleurs pour cette récolte"
verbose_name_plural = "Cueilleurs pour cette récolte"
form = RFPForm
exclude = ['creation_date', 'confirmation_date']
extra = 3
class OrganizationAdmin(admin.ModelAdmin):
inlines = [
PropertyInline,
]
search_fields = ['name', 'description']
class HarvestYieldInline(admin.TabularInline):
model = HarvestYield
form = HarvestYieldForm
class HarvestAdmin(admin.ModelAdmin):
#form = HarvestForm
inlines = (PersonInline, HarvestYieldInline)
class RequestForParticipationAdmin(admin.ModelAdmin):
form = RFPForm
class EquipmentAdmin(admin.ModelAdmin):
form = EquipmentForm
class PropertyImageInline(admin.TabularInline):
model = PropertyImage
extra = 3
class PropertyAdmin(admin.ModelAdmin):
model = Property
inlines = [ PropertyImageInline, ]
form = PropertyForm
admin.site.register(Property, PropertyAdmin)
admin.site.register(Harvest, HarvestAdmin)
admin.site.register(RequestForParticipation, RequestForParticipationAdmin)
admin.site.register(TreeType)
admin.site.register(Equipment, EquipmentAdmin)
admin.site.register(EquipmentType)
admin.site.register(HarvestYield)
admin.site.register(Comment)
admin.site.register(Actor)
admin.site.register(Language)
admin.site.register(Person)
admin.site.register(Organization)
admin.site.register(Neighborhood)
admin.site.register(City)
admin.site.register(State)
admin.site.register(Country)
admin.site.register(PropertyImage)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from forms import RFPForm, PropertyForm, HarvestForm, HarvestYieldForm, EquipmentForm
from member.models import *
from harvest.models import *
from harvest.forms import *
class PropertyInline(admin.TabularInline):
model = Property
extra = 0
class PersonInline(admin.TabularInline):
model = RequestForParticipation
verbose_name = "Cueilleurs pour cette récolte"
verbose_name_plural = "Cueilleurs pour cette récolte"
form = RFPForm
exclude = ['creation_date', 'confirmation_date']
extra = 3
class OrganizationAdmin(admin.ModelAdmin):
inlines = [
PropertyInline,
]
search_fields = ['name', 'description']
class HarvestYieldInline(admin.TabularInline):
model = HarvestYield
form = HarvestYieldForm
class HarvestAdmin(admin.ModelAdmin):
form = HarvestForm
inlines = (PersonInline, HarvestYieldInline)
class RequestForParticipationAdmin(admin.ModelAdmin):
form = RFPForm
class EquipmentAdmin(admin.ModelAdmin):
form = EquipmentForm
class PropertyImageInline(admin.TabularInline):
model = PropertyImage
extra = 3
class PropertyAdmin(admin.ModelAdmin):
model = Property
inlines = [ PropertyImageInline, ]
form = PropertyForm
admin.site.register(Property, PropertyAdmin)
admin.site.register(Harvest, HarvestAdmin)
admin.site.register(RequestForParticipation, RequestForParticipationAdmin)
admin.site.register(TreeType)
admin.site.register(Equipment, EquipmentAdmin)
admin.site.register(EquipmentType)
admin.site.register(HarvestYield)
admin.site.register(Comment)
admin.site.register(Actor)
admin.site.register(Language)
admin.site.register(Person)
admin.site.register(Organization)
admin.site.register(Neighborhood)
admin.site.register(City)
admin.site.register(State)
admin.site.register(Country)
admin.site.register(PropertyImage)
| Python | 0 |
5bb92bea9d910c788efa3ea5b7ca41499d92be26 | update cuba.py with the autogenerated one | simphony/core/cuba.py | simphony/core/cuba.py | # code auto-generated by the cuba-generate.py script.
from enum import IntEnum, unique
@unique
class CUBA(IntEnum):
NAME = 1
DIRECTION = 3
STATUS = 4
LABEL = 5
MATERIAL_ID = 6
CHEMICAL_SPECIE = 7
MATERIAL_TYPE = 8
SHAPE_CENTER = 9
SHAPE_LENGTH_UC = 10
SHAPE_LENGTH = 11
SHAPE_RADIUS = 12
SHAPE_SIDE = 13
CRYSTAL_STORAGE = 14
NAME_UC = 15
LATTICE_VECTORS = 16
SYMMETRY_LATTICE_VECTORS = 17
OCCUPANCY = 18
BOND_LABEL = 19
BOND_TYPE = 20
VELOCITY = 21
ACCELERATION = 22
NUMBER_OF_POINTS = 23
RADIUS = 24
SIZE = 25
MASS = 26
VOLUME = 27
ANGULAR_VELOCITY = 28
ANGULAR_ACCELERATION = 29
SIMULATION_DOMAIN_DIMENSIONS = 30
SIMULATION_DOMAIN_ORIGIN = 31
DYNAMIC_VISCOSITY = 32
KINEMATIC_VISCOSITY = 33
DIFFUSION_COEFFICIENT = 34
PROBABILITY_COEFFICIENT = 35
FRICTION_COEFFICIENT = 36
SCALING_COEFFICIENT = 37
EQUATION_OF_STATE_COEFFICIENT = 38
CONTANCT_ANGLE = 39
AMPHIPHILICITY = 40
PHASE_INTERACTION_STRENGTH = 41
HAMAKER_CONSTANT = 42
ZETA_POTENTIAL = 43
ION_VALENCE_EFFECT = 44
DEBYE_LENGTH = 45
SMOOTHING_LENGTH = 46
LATTICE_SPACING = 47
TIME_STEP = 48
NUMBER_OF_TIME_STEPS = 49
FORCE = 50
TORQUE = 51
DENSITY = 52
CONCENTRATION = 53
PRESSURE = 54
TEMPERATURE = 55
DISTRIBUTION = 56
ORDER_PARAMETER = 57
ORIGINAL_POSITION = 58
DELTA_DISPLACEMENT = 59
EXTERNAL_APPLIED_FORCE = 60
EULER_ANGLES = 61
SPHERICITY = 62
YOUNG_MODULUS = 63
POISSON_RATIO = 64
LN_OF_RESTITUTION_COEFFICIENT = 65
ROLLING_FRICTION = 66
VOLUME_FRACTION = 67
| from enum import IntEnum, unique
@unique
class CUBA(IntEnum):
NAME = 0
DIRECTION = 1
STATUS = 2
LABEL = 3
MATERIAL_ID = 4
MATERIAL_TYPE = 5
SHAPE_CENTER = 6
SHAPE_LENGTH_UC = 7
SHAPE_LENGTH = 8
SHAPE_RADIUS = 9
SHAPE_SIDE = 10
CRYSTAL_STORAGE = 11
NAME_UC = 12
LATTICE_VECTORS = 13
SYMMETRY_LATTICE_VECTORS = 14
OCCUPANCY = 15
BOND_LABEL = 16
BOND_TYPE = 17
VELOCITY = 18
ACCELERATION = 19
NUMBER_OF_POINTS = 20
RADIUS = 21
SIZE = 22
MASS = 23
VOLUME = 24
ANGULAR_VELOCITY = 25
ANGULAR_ACCELERATION = 26
SIMULATION_DOMAIN_DIMENSIONS = 27
SIMULATION_DOMAIN_ORIGIN = 28
DYNAMIC_VISCOSITY = 29
KINEMATIC_VISCOSITY = 30
DIFFUSION_COEFFICIENT = 31
PROBABILITY_COEFFICIENT = 32
FRICTION_COEFFICIENT = 33
SCALING_COEFFICIENT = 34
EQUATION_OF_STATE_COEFFICIENT = 35
CONTANCT_ANGLE = 36
AMPHIPHILICITY = 37
PHASE_INTERACTION_STRENGTH = 38
HAMAKER_CONSTANT = 39
ZETA_POTENTIAL = 40
ION_VALENCE_EFFECT = 41
DEBYE_LENGTH = 42
SMOOTHING_LENGTH = 43
LATTICE_SPACING = 44
TIME_STEP = 45
NUMBEROF_TIME_STEPS = 46
FORCE = 47
TORQUE = 48
DENSITY = 49
CONCENTRATION = 50
PRESSURE = 51
TEMPERATURE = 52
DISTRIBUTION = 53
ORDER_PARAMETER = 54
ORIGINAL_POSITION = 55
DELTA_DISPLACEMENT = 56
EXTERNAL_APPLIED_FORCE = 57
EULE_RANGLES = 58
SPHERICITY = 59
YOUNG_MODULUS = 60
POISSON_RATIO = 61
LN_OF_RESTITUTION_COEFFICIENT = 62
ROLLING_FRICTION = 63
VOLUME_FRACTION = 64
| Python | 0 |
e28a41e5996651aefdf7966ead73310a5a761040 | fix flake8 violation | simphony/cuds/bond.py | simphony/cuds/bond.py | class Bond(object):
"""
Bond entity
"""
def __init__(self, id, particles, data=None):
self.id = id
self.particles = particles
if data is None:
self.data = {}
else:
self.data = data
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.id == other.id and
self.particles == other.particles and
self.data == self.data)
else:
return False
def __ne__(self, other):
return not self == other
| class Bond(object):
"""
Bond entity
"""
def __init__(self, id, particles, data=None):
self.id = id
self.particles = particles
if data is None:
self.data = {}
else:
self.data = data
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.id == other.id and
self.particles == other.particles and
self.data == self.data)
else:
return False
def __ne__(self, other):
return not self == other
| Python | 0 |
2e2f6d2a6480a4ca43c76e6559cfe6aadc434a8b | change to dumps | functions/webhook.py | functions/webhook.py | #!/usr/bin/python
# Written by: Andrew Jackson
# This is used to send a JSON payload to a webhook.
import json
import logging
import os
import time
import uuid
import boto3
import requests
import decimal
#def default(obj):
# if isinstance(obj, decimal.Decimal):
# return int(obj)
# return o.__dict__
def handler(event, context):
print "event.dump = " + json.dumps(event)
data = json.dumps(event)
url = data['webhookurl']
payload = data['payload']
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload))
#print(r.text)
| #!/usr/bin/python
# Written by: Andrew Jackson
# This is used to send a JSON payload to a webhook.
import json
import logging
import os
import time
import uuid
import boto3
import requests
import decimal
#def default(obj):
# if isinstance(obj, decimal.Decimal):
# return int(obj)
# return o.__dict__
def handler(event, context):
print "event.dump = " + json.dumps(event)
data = json.loads(event)
url = data['webhookurl']
payload = data['payload']
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload))
print(r.text)
| Python | 0.000004 |
f83369a263fb606a6f92b62a45d72e8faf0f1770 | Add RunGM and RunBench steps for Android Review URL: https://codereview.appspot.com/5987049 | master/skia_master_scripts/android_factory.py | master/skia_master_scripts/android_factory.py | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def Build(self, clobber=None):
"""Build and return the complete BuildFactory.
clobber: boolean indicating whether we should clean before building
"""
if clobber is None:
clobber = self._default_clobber
if clobber:
self._skia_cmd_obj.AddClean()
self._skia_cmd_obj.AddRunCommand(
command='../android/bin/android_make all -d nexus_s %s' % (
self._make_flags),
description='BuildAll')
self.PushBinaryToDeviceAndRun(binary_name='tests', description='RunTests')
self.PushBinaryToDeviceAndRun(binary_name='gm',
arguments='--nopdf --noreplay',
description='RunGM')
self.PushBinaryToDeviceAndRun(binary_name='bench', description='RunBench')
return self._factory
def PushBinaryToDeviceAndRun(self, binary_name, arguments='',
description=None, timeout=None):
"""Adds a build step: push a binary file to the USB-connected Android
device and run it.
binary_name: which binary to run on the device
arguments: additional arguments to pass to the binary when running it
description: text description (e.g., 'RunTests')
timeout: timeout in seconds, or None to use the default timeout
The shell command (running on the buildbot slave) will exit with a nonzero
return code if and only if the command running on the Android device
exits with a nonzero return code... so a nonzero return code from the
command running on the Android device will turn the buildbot red.
"""
if not description:
description = 'Run %s' % binary_name
path_to_adb = self.TargetPathJoin('..', 'android', 'bin', 'linux', 'adb')
command_list = [
'%s root' % path_to_adb,
'%s remount' % path_to_adb,
'%s push out/%s/%s /system/bin/skia_%s' % (
path_to_adb, self._configuration, binary_name, binary_name),
'%s logcat -c' % path_to_adb,
'STDOUT=$(%s shell "skia_%s %s && echo ADB_SHELL_SUCCESS")' % (
path_to_adb, binary_name, arguments),
'echo $STDOUT',
'%s logcat -d' % path_to_adb,
'echo $STDOUT | grep ADB_SHELL_SUCCESS',
]
self._skia_cmd_obj.AddRunCommandList(
command_list=command_list, description=description)
| # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def Build(self, clobber=None):
"""Build and return the complete BuildFactory.
clobber: boolean indicating whether we should clean before building
"""
if clobber is None:
clobber = self._default_clobber
if clobber:
self._skia_cmd_obj.AddClean()
self._skia_cmd_obj.AddRunCommand(
command='../android/bin/android_make all -d nexus_s %s' % (
self._make_flags),
description='BuildAll')
self.PushBinaryToDeviceAndRun(binary_name='tests', description='RunTests')
return self._factory
def PushBinaryToDeviceAndRun(self, binary_name, description, timeout=None):
"""Adds a build step: push a binary file to the USB-connected Android
device and run it.
binary_name: which binary to run on the device
description: text description (e.g., 'RunTests')
timeout: timeout in seconds, or None to use the default timeout
The shell command (running on the buildbot slave) will exit with a nonzero
return code if and only if the command running on the Android device
exits with a nonzero return code... so a nonzero return code from the
command running on the Android device will turn the buildbot red.
"""
path_to_adb = self.TargetPathJoin('..', 'android', 'bin', 'linux', 'adb')
command_list = [
'%s root' % path_to_adb,
'%s remount' % path_to_adb,
'%s push out/%s/%s /system/bin/skia_%s' % (
path_to_adb, self._configuration, binary_name, binary_name),
'%s logcat -c' % path_to_adb,
'STDOUT=$(%s shell "skia_%s && echo ADB_SHELL_SUCCESS")' % (
path_to_adb, binary_name),
'echo $STDOUT',
'%s logcat -d' % path_to_adb,
'echo $STDOUT | grep ADB_SHELL_SUCCESS',
]
self._skia_cmd_obj.AddRunCommandList(
command_list=command_list, description=description)
| Python | 0 |
984422fe3fb0b34a17e42910a9c1b98afa572452 | Revert r9607 -- it caused a BuildbotSelfTest failure | master/skia_master_scripts/android_factory.py | master/skia_master_scripts/android_factory.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from buildbot.process.properties import WithProperties
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def __init__(self, device, **kwargs):
""" Instantiates an AndroidFactory with properties and build steps specific
to Android devices.
device: string indicating which Android device type we are targeting
"""
skia_factory.SkiaFactory.__init__(self, bench_pictures_cfg=device,
deps_target_os='android',
flavor='android',
build_targets=['all'],
**kwargs)
self._device = device
self._common_args += ['--device', self._device,
'--serial', WithProperties('%(serial:-None)s'),
'--has_root', WithProperties('%(has_root:-True)s'),
'--android_sdk_root',
WithProperties('%(android_sdk_root)s')]
self._default_clobber = True
def CompareGMs(self):
""" Run the "skdiff" tool to compare the "actual" GM images we just
generated to the baselines in _gm_image_subdir. """
# We have bypass the Android-flavored compile in order to build SkDiff for
# the host.
self.AddSlaveScript(script='compile.py',
description='BuildSkDiff',
is_rebaseline_step=True,
args=['--target', 'tools',
'--gyp_defines',
' '.join('%s=%s' % (k, v)
for k, v in self._gyp_defines.items())])
skia_factory.SkiaFactory.CompareGMs(self)
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from buildbot.process.properties import WithProperties
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def __init__(self, device, **kwargs):
""" Instantiates an AndroidFactory with properties and build steps specific
to Android devices.
device: string indicating which Android device type we are targeting
"""
skia_factory.SkiaFactory.__init__(self, bench_pictures_cfg=device,
deps_target_os='android',
flavor='android',
build_targets=['all'],
**kwargs)
self._device = device
self._common_args += ['--device', self._device,
'--serial', WithProperties('%(serial:-None)s'),
'--has_root', WithProperties('%(has_root:-True)s'),
'--android_sdk_root',
WithProperties('%(android_sdk_root)s')]
self._default_clobber = True
def PreRender(self):
""" Before chaining to SkiaFactory.PreRender(), build tools (skdiff,
skimage) that we might need on the buildslave host machine. """
# We bypass the Android-flavored compile in order to build tools for
# the host.
self.AddSlaveScript(script='compile.py',
description='BuildHostTools',
is_rebaseline_step=True,
args=['--target', 'tools',
'--gyp_defines',
' '.join('%s=%s' % (k, v)
for k, v in self._gyp_defines.items())])
skia_factory.SkiaFactory.PreRender(self)
| Python | 0 |
4b740ddb11fb5c4b2b29bc6eef0a5569349272f8 | make random_metadata compliant | datalake_common/tests/conftest.py | datalake_common/tests/conftest.py | import pytest
import random
import string
from datetime import datetime, timedelta
@pytest.fixture
def basic_metadata():
return {
'version': 0,
'start': 1426809600000,
'end': 1426895999999,
'where': 'nebraska',
'what': 'apache',
'hash': '12345'
}
def random_word(length):
return ''.join(random.choice(string.lowercase) for i in xrange(length))
def random_hex(length):
return ('%0' + str(length) + 'x') % random.randrange(16**length)
def random_interval():
now = datetime.now()
start = now - timedelta(days=random.randint(0, 365*3))
end = start - timedelta(days=random.randint(1, 10))
return start.isoformat(), end.isoformat()
def random_work_id():
if random.randint(0, 1):
return None
return '{}-{}'.format(random_word(5), random.randint(0,2**15))
@pytest.fixture
def random_metadata():
start, end = random_interval()
return {
'version': 0,
'start': start,
'end': end,
'work_id': random_work_id(),
'where': random_word(10),
'what': random_word(10),
'id': random_hex(40),
'hash': random_hex(40),
}
| import pytest
import random
import string
from datetime import datetime, timedelta
@pytest.fixture
def basic_metadata():
return {
'version': 0,
'start': 1426809600000,
'end': 1426895999999,
'where': 'nebraska',
'what': 'apache',
'hash': '12345'
}
def random_word(length):
return ''.join(random.choice(string.lowercase) for i in xrange(length))
def random_interval():
now = datetime.now()
start = now - timedelta(days=random.randint(0, 365*3))
end = start - timedelta(days=random.randint(1, 10))
return start.isoformat(), end.isoformat()
@pytest.fixture
def random_metadata():
start, end = random_interval()
return {
'version': 0,
'start': start,
'end': end,
'where': random_word(10),
'what': random_word(10),
}
| Python | 0.000004 |
4507c0cb56ed72253d52c92f621ec33600e5e36b | Add version number for future use | sla_bot.py | sla_bot.py | import asyncio
import datetime as dt
import math
import os
import traceback
import discord
from discord.ext import commands
from SLA_bot.config import Config as cf
from SLA_bot.schedule import Schedule
VERSION = 0.10
curr_dir = os.path.dirname(__file__)
configs = [
os.path.join(curr_dir, 'docs', 'default_config.ini'),
os.path.join(curr_dir, 'config.ini')
]
cf.cal_path = os.path.join(curr_dir, cf.cal_path)
cf.chan_path = os.path.join(curr_dir, cf.chan_path)
cf.load_config(configs)
bot = commands.Bot(command_prefix='!', description='test')
event_schedule = Schedule(bot)
bot.add_cog(event_schedule)
async def update_schedule():
while not bot.is_closed:
await event_schedule.update()
await asyncio.sleep(cf.refresh_time)
bot.loop.create_task(update_schedule())
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
#bot.loop.create_task(make_alert())
@bot.command(pass_context=True, no_pm=True)
async def announce(ctx, filters='1,2,3,4,5,6,7,8,9,10'):
perm = ctx.message.channel.permissions_for(ctx.message.author)
id = ctx.message.channel.id
if perm.manage_channels:
cf.set_chan(id, filters)
def alert_text(event, ref_time):
time_left = math.ceil((event.start - ref_time).total_seconds() / 60)
return '[{}min] - {}'.format(time_left, event.duration(cf.tz))
async def alert(id, event, first_resend, resend_time):
channel = bot.get_channel(id)
now = dt.datetime.now(dt.timezone.utc)
resend = first_resend
message = None
while now < event.start:
now = dt.datetime.now(dt.timezone.utc)
alert_msg = alert_text(event, now)
if now >= resend:
try:
await bot.delete_message(message)
resend = resend + resend_time
message = None
except discord.errors.HTTPException:
continue
if message == None:
try:
message = await bot.send_message(channel, alert_msg)
except (discord.errors.HTTPException, discord.errors.Forbidden):
continue
except (discord.errors.NotFound, discord.errors.InvalidArgument):
break
else:
try:
message = await bot.edit_message(message, alert_msg)
#not found should break
except discord.errors.HTTPException:
continue
await asyncio.sleep(60)
if message != None:
try:
alert_msg = '[Started] - {}'.format(event.duration(cf.tz))
message = await bot.edit_message(message, alert_msg)
except discord.errors.HTTPException:
pass
async def make_alert():
await bot.wait_until_ready()
last_alert = dt.datetime.now(dt.timezone.utc)
while not bot.is_closed:
now = dt.datetime.now(dt.timezone.utc)
alert_time = now + cf.alert_before
alertable = event_schedule.from_range(last_alert, alert_time)
for event in alertable:
first_resend = event.start
while first_resend > now:
first_resend -= cf.alert_every
first_resend += cf.alert_every
for chan in cf.channels:
bot.loop.create_task(alert(chan[0], event, first_resend,
cf.alert_every))
if len(alertable) > 0:
last_alert = alert_time
await asyncio.sleep(60)
bot.run(cf.token)
| import asyncio
import datetime as dt
import math
import os
import traceback
import discord
from discord.ext import commands
from SLA_bot.config import Config as cf
from SLA_bot.schedule import Schedule
curr_dir = os.path.dirname(__file__)
configs = [
os.path.join(curr_dir, 'docs', 'default_config.ini'),
os.path.join(curr_dir, 'config.ini')
]
cf.cal_path = os.path.join(curr_dir, cf.cal_path)
cf.chan_path = os.path.join(curr_dir, cf.chan_path)
cf.load_config(configs)
bot = commands.Bot(command_prefix='!', description='test')
event_schedule = Schedule(bot)
bot.add_cog(event_schedule)
async def update_schedule():
while not bot.is_closed:
await event_schedule.update()
await asyncio.sleep(cf.refresh_time)
bot.loop.create_task(update_schedule())
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
#bot.loop.create_task(make_alert())
@bot.command(pass_context=True, no_pm=True)
async def announce(ctx, filters='1,2,3,4,5,6,7,8,9,10'):
perm = ctx.message.channel.permissions_for(ctx.message.author)
id = ctx.message.channel.id
if perm.manage_channels:
cf.set_chan(id, filters)
def alert_text(event, ref_time):
time_left = math.ceil((event.start - ref_time).total_seconds() / 60)
return '[{}min] - {}'.format(time_left, event.duration(cf.tz))
async def alert(id, event, first_resend, resend_time):
channel = bot.get_channel(id)
now = dt.datetime.now(dt.timezone.utc)
resend = first_resend
message = None
while now < event.start:
now = dt.datetime.now(dt.timezone.utc)
alert_msg = alert_text(event, now)
if now >= resend:
try:
await bot.delete_message(message)
resend = resend + resend_time
message = None
except discord.errors.HTTPException:
continue
if message == None:
try:
message = await bot.send_message(channel, alert_msg)
except (discord.errors.HTTPException, discord.errors.Forbidden):
continue
except (discord.errors.NotFound, discord.errors.InvalidArgument):
break
else:
try:
message = await bot.edit_message(message, alert_msg)
#not found should break
except discord.errors.HTTPException:
continue
await asyncio.sleep(60)
if message != None:
try:
alert_msg = '[Started] - {}'.format(event.duration(cf.tz))
message = await bot.edit_message(message, alert_msg)
except discord.errors.HTTPException:
pass
async def make_alert():
await bot.wait_until_ready()
last_alert = dt.datetime.now(dt.timezone.utc)
while not bot.is_closed:
now = dt.datetime.now(dt.timezone.utc)
alert_time = now + cf.alert_before
alertable = event_schedule.from_range(last_alert, alert_time)
for event in alertable:
first_resend = event.start
while first_resend > now:
first_resend -= cf.alert_every
first_resend += cf.alert_every
for chan in cf.channels:
bot.loop.create_task(alert(chan[0], event, first_resend,
cf.alert_every))
if len(alertable) > 0:
last_alert = alert_time
await asyncio.sleep(60)
bot.run(cf.token)
| Python | 0 |
5d82c2d9f6d2874ae4621edb4dc1e6455652666b | Remove Dropout and unnecessary imports | examples/imdb_fasttext.py | examples/imdb_fasttext.py | '''This example demonstrates the use of fasttext for text classification
Based on Joulin et al's paper:
Bags of Tricks for Efficient Text Classification
https://arxiv.org/abs/1607.01759
Can achieve accuracy around 88% after 5 epochs in 70s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Embedding
from keras.layers import AveragePooling1D
from keras.datasets import imdb
from keras import backend as K
# set parameters:
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 20
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_length=model.output_shape[1]))
# We flatten the output of the conv layer
model.add(Flatten())
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
| '''This example demonstrates the use of fasttext for text classification
Based on Joulin et al's paper:
Bags of Tricks for Efficient Text Classification
https://arxiv.org/abs/1607.01759
Can achieve accuracy around 88% after 5 epochs in 70s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Embedding
from keras.layers import AveragePooling1D
from keras.datasets import imdb
from keras import backend as K
# set parameters:
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 20
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_length=model.output_shape[1]))
# We flatten the output of the conv layer,
# so that we can add a dense layer:
model.add(Flatten())
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
| Python | 0 |
20ef3aed661d5b77bedf48df9ed6917e24319c01 | Fix typo | factory/glideFactoryLogParser.py | factory/glideFactoryLogParser.py | #
# Description:
# This module implements classes to track
# changes in glidein status logs
#
# Author:
# Igor Sfiligoi (Feb 2nd 2007)
#
import os, os.path
import condorLogParser
# for now it is just a constructor wrapper
# Further on it will need to implement glidein exit code checks
class dirSummaryTimings(condorLogParser.dirSummaryTimings):
def __init__(self,dirname,client_name,inactive_files=None):
condorLogParser.dirSummaryTimings.__init__(self,dirname,log_prefix="condor_activity_",log_suffix="_"+client_name+".log",inactive_files=inactive_files)
| #
# Description:
# This module implements classes to track
# changes in glidein status logs
#
# Author:
# Igor Sfiligoi (Feb 2nd 2007)
#
import os, os.path
import condorLogParser
# for now it is just a constructor wrapper
# Further on it will need to implement glidein exit code checks
class dirSummaryTimings(condorLogParser.dirSummary):
def __init__(self,dirname,client_name,inactive_files=None):
condorLogParser.dirSummaryTimings.__init__(self,dirname,log_prefix="condor_activity_",log_suffix="_"+client_name+".log",inactive_files=inactive_files)
| Python | 0.999999 |
ae38884444be3b3e0f98ca406352fe92037423f1 | making the products model abstract | scofield/product/models.py | scofield/product/models.py | from django.db import models
from datetime import datetime
from scofield.category.models import *
from scofield.manufacturer.models import Manufacturer
class ProductModel(models.Model):
"""
Base class for products
"""
#timestamps
date_added = models.DateTimeField(default=datetime.now)
date_updated = models.DateTimeField(default=datetime.now)
class Meta:
abstract = True
class Product(ProductModel):
"""
Product Model
"""
name = models.CharField(max_length=200, null=False, blank=False, help_text='Product Name')
slug = models.SlugField(max_length=210, null=False, blank=False, help_text='Used for URLs, auto-generated from name if blank')
sku = models.CharField(max_length=100, null=True, blank=True)
category = models.ManyToManyField(Category, blank=False, null=False)
manufacturer = models.ForeignKey(Manufacturer, blank=True, null=True)
msrp = models.DecimalField(max_digits=14, decimal_places=2)
class Price(models.Model):
"""
Base class for product pricing
"""
product = models.ForeignKey(Product)
price = models.DecimalField(max_digits=14, decimal_places=2)
| from django.db import models
from scofield.category.models import *
from scofield.manufacturer.models import Manufacturer
class Product(models.Model):
"""
Base class for products
"""
name = models.CharField(max_length=200, null=False, blank=False, help_text='Product Name')
slug = models.SlugField(max_length=210, null=False, blank=False, help_text='Used for URLs, auto-generated from name if blank')
sku = models.CharField(max_length=100, null=True, blank=True)
category = models.ManyToManyField(Category, blank=False, null=False)
manufacturer = models.ForeignKey(Manufacturer, blank=True, null=True)
msrp = models.DecimalField(max_digits=14, decimal_places=2)
class Meta:
abstract = True
| Python | 0.999999 |
c47a51db4f7ccc514aa687a1859ed592574d1a58 | Change API Endpoint to BzAPI Compatibility Layer | bugzilla/agents.py | bugzilla/agents.py | from bugzilla.models import *
from bugzilla.utils import *
class InvalidAPI_ROOT(Exception):
def __str__(self):
return "Invalid API url specified. " + \
"Please set BZ_API_ROOT in your environment " + \
"or pass it to the agent constructor"
class BugzillaAgent(object):
def __init__(self, api_root=None, username=None, password=None):
if not api_root:
api_root = os.environ.get('BZ_API_ROOT')
if not api_root:
raise InvalidAPI_ROOT
self.API_ROOT = api_root
self.username, self.password = username, password
def get_bug(self, bug, include_fields='_default,token,cc,keywords,whiteboard,comments', exclude_fields=None, params={}):
params['include_fields'] = [include_fields]
params['exclude_fields'] = [exclude_fields]
url = urljoin(self.API_ROOT, 'bug/%s?%s' % (bug, self.qs(**params)))
return Bug.get(url)
def get_bug_list(self, params={}):
url = url = urljoin(self.API_ROOT, 'bug/?%s' % (self.qs(**params)))
return BugSearch.get(url).bugs
def qs(self, **params):
if self.username and self.password:
params['username'] = [self.username]
params['password'] = [self.password]
return qs(**params)
class BMOAgent(BugzillaAgent):
def __init__(self, username=None, password=None):
super(BMOAgent, self).__init__('https://bugzilla.mozilla.org/bzapi/', username, password)
| from bugzilla.models import *
from bugzilla.utils import *
class InvalidAPI_ROOT(Exception):
def __str__(self):
return "Invalid API url specified. " + \
"Please set BZ_API_ROOT in your environment " + \
"or pass it to the agent constructor"
class BugzillaAgent(object):
def __init__(self, api_root=None, username=None, password=None):
if not api_root:
api_root = os.environ.get('BZ_API_ROOT')
if not api_root:
raise InvalidAPI_ROOT
self.API_ROOT = api_root
self.username, self.password = username, password
def get_bug(self, bug, include_fields='_default,token,cc,keywords,whiteboard,comments', exclude_fields=None, params={}):
params['include_fields'] = [include_fields]
params['exclude_fields'] = [exclude_fields]
url = urljoin(self.API_ROOT, 'bug/%s?%s' % (bug, self.qs(**params)))
return Bug.get(url)
def get_bug_list(self, params={}):
url = url = urljoin(self.API_ROOT, 'bug/?%s' % (self.qs(**params)))
return BugSearch.get(url).bugs
def qs(self, **params):
if self.username and self.password:
params['username'] = [self.username]
params['password'] = [self.password]
return qs(**params)
class BMOAgent(BugzillaAgent):
def __init__(self, username=None, password=None):
super(BMOAgent, self).__init__('https://api-dev.bugzilla.mozilla.org/latest/', username, password)
| Python | 0 |
22b91d3f58eb9a6c021645a4aea56c864d151bba | Fix get_favorite_for typo in templatetags | favit/templatetags/favit_tags.py | favit/templatetags/favit_tags.py | # -*- coding: utf-8 -*-
from django import template
from django.db.models import get_model
from django.template.loader import render_to_string
from ..models import Favorite
register = template.Library()
@register.simple_tag(takes_context=True)
def favorite_button(context, target):
user = context['request'].user
# do nothing when user isn't authenticated
if not user.is_authenticated():
return ''
target_model = '.'.join((target._meta.app_label, target._meta.object_name))
undo = False
# prepare button to unfave if the user
# already faved this object
if Favorite.objects.get_favorite(user, target):
undo = True
return render_to_string(
'favit/button.html', {
'target_model': target_model,
'target_object_id': target.id,
'undo': undo,
'fav_count': Favorite.objects.for_object(target).count()
}
)
@register.simple_tag(takes_context=True)
def unfave_button(context, target):
user = context['request'].user
# do nothing when user isn't authenticated
if not user.is_authenticated():
return ''
if Favorite.objects.get_favorite(user, target) is None:
return ''
target_model = '.'.join((target._meta.app_label, target._meta.object_name))
return render_to_string(
'favit/unfave-button.html', {
'target_model': target_model,
'target_object_id': target.id,
}
)
@register.filter
def get_favorite_for(obj, user):
"""
Get Favorite instance for an object (obj) and a user (user)
Usage:
{% with obj|get_favorite_for:user as fav_object %}
...
{% endwith %}
"""
return Favorite.objects.get_favorite(user, obj)
@register.filter
def favorites_count(obj):
"""
Usage:
Given an object `obj` you may show it fav count like this:
<p>Favorite Count {{ obj|favorites_count }}</p>
"""
return Favorite.objects.for_object(obj).count()
@register.assignment_tag
def user_favorites(user, app_model=None):
"""
Usage:
Get all user favorited objects:
{% with user_favorites <user> as favorite_list %}
{% for fav_obj in favorite_list %}
{# do something with fav_obj #}
{% endfor %}
{% endwith %}
or, just favorites from one model:
{% with user_favorites <user> "app_label.model" as favorite_list %}
{% for fav_obj in favorite_list %}
{# do something with fav_obj #}
{%
{% endwith %}
"""
return Favorite.objects.for_user(user, app_model)
@register.assignment_tag
def model_favorites(app_model):
"""
Gets all favorited objects that are instances of a model
given in module notation.
Usage:
{% with model_favorites "app_label.model" as favorite_list %}
{% for fav_obj in favorite_list %}
{# do something with fav_obj #}
{% endfor %}
{% endwith %}
"""
return Favorite.objects.for_model(app_model)
| # -*- coding: utf-8 -*-
from django import template
from django.db.models import get_model
from django.template.loader import render_to_string
from ..models import Favorite
register = template.Library()
@register.simple_tag(takes_context=True)
def favorite_button(context, target):
user = context['request'].user
# do nothing when user isn't authenticated
if not user.is_authenticated():
return ''
target_model = '.'.join((target._meta.app_label, target._meta.object_name))
undo = False
# prepare button to unfave if the user
# already faved this object
if Favorite.objects.get_favorite(user, target):
undo = True
return render_to_string(
'favit/button.html', {
'target_model': target_model,
'target_object_id': target.id,
'undo': undo,
'fav_count': Favorite.objects.for_object(target).count()
}
)
@register.simple_tag(takes_context=True)
def unfave_button(context, target):
user = context['request'].user
# do nothing when user isn't authenticated
if not user.is_authenticated():
return ''
if Favorite.objects.get_favorite(user, target) is None:
return ''
target_model = '.'.join((target._meta.app_label, target._meta.object_name))
return render_to_string(
'favit/unfave-button.html', {
'target_model': target_model,
'target_object_id': target.id,
}
)
@register.filter
def get_favorite_for(obj, user):
"""
Get Favorite instance for an object (obj) and a user (user)
Usage:
{% with obj|get_favorite_for:user as fav_object %}
...
{% endwith %}
"""
return Favorites.objects.get_favorite(user, obj)
@register.filter
def favorites_count(obj):
"""
Usage:
Given an object `obj` you may show it fav count like this:
<p>Favorite Count {{ obj|favorites_count }}</p>
"""
return Favorite.objects.for_object(obj).count()
@register.assignment_tag
def user_favorites(user, app_model=None):
"""
Usage:
Get all user favorited objects:
{% with user_favorites <user> as favorite_list %}
{% for fav_obj in favorite_list %}
{# do something with fav_obj #}
{% endfor %}
{% endwith %}
or, just favorites from one model:
{% with user_favorites <user> "app_label.model" as favorite_list %}
{% for fav_obj in favorite_list %}
{# do something with fav_obj #}
{%
{% endwith %}
"""
return Favorite.objects.for_user(user, app_model)
@register.assignment_tag
def model_favorites(app_model):
"""
Gets all favorited objects that are instances of a model
given in module notation.
Usage:
{% with model_favorites "app_label.model" as favorite_list %}
{% for fav_obj in favorite_list %}
{# do something with fav_obj #}
{% endfor %}
{% endwith %}
"""
return Favorite.objects.for_model(app_model)
| Python | 0 |
66ad5e449b1f28dbde2bc30a37ad3c568ae9166f | Fix bins | examples/plot_dom_hits.py | examples/plot_dom_hits.py | # -*- coding: utf-8 -*-
"""
==================
DOM hits.
==================
Estimate track/DOM distances using the number of hits per DOM.
"""
from __future__ import absolute_import, print_function, division
# Author: Tamas Gal <tgal@km3net.de>
# License: BSD-3
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import km3pipe as kp
from km3pipe.dataclasses import Table
from km3pipe.math import pld3
from km3modules.common import StatusBar
import km3pipe.style
km3pipe.style.use("km3pipe")
filename = "data/atmospheric_muons_sample.h5"
cal = kp.calib.Calibration(filename="data/KM3NeT_-00000001_20171212.detx")
def filter_muons(blob):
"""Write all muons from McTracks to Muons."""
tracks = blob['McTracks']
muons = tracks[tracks.type == -13] # PDG particle code
blob["Muons"] = Table(muons)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = Table(muons[np.argmax(muons.energy)])
muon = highest_energetic_muon
triggered_hits = hits.triggered_rows
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.items():
try:
distance = pld3(
cal.detector.dom_positions[dom_id], muon.pos, muon.dir
)
except KeyError:
self.log.warning("DOM ID %s not found!" % dom_id)
continue
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
print(df)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
bins = (int(max(sdf['distance'])) - 1, int(max(sdf['n_hits']) - 1))
plt.hist2d(
sdf['distance'],
sdf['n_hits'],
cmap='plasma',
bins=bins,
norm=LogNorm()
)
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(filter_muons)
pipe.attach(DOMHits)
pipe.drain()
| # -*- coding: utf-8 -*-
"""
==================
DOM hits.
==================
Estimate track/DOM distances using the number of hits per DOM.
"""
from __future__ import absolute_import, print_function, division
# Author: Tamas Gal <tgal@km3net.de>
# License: BSD-3
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import km3pipe as kp
from km3pipe.dataclasses import Table
from km3pipe.math import pld3
from km3modules.common import StatusBar
import km3pipe.style
km3pipe.style.use("km3pipe")
filename = "data/atmospheric_muons_sample.h5"
cal = kp.calib.Calibration(filename="data/KM3NeT_-00000001_20171212.detx")
def filter_muons(blob):
"""Write all muons from McTracks to Muons."""
tracks = blob['McTracks']
muons = tracks[tracks.type == -13] # PDG particle code
blob["Muons"] = Table(muons)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = Table(muons[np.argmax(muons.energy)])
muon = highest_energetic_muon
triggered_hits = hits.triggered_rows
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.items():
try:
distance = pld3(
cal.detector.dom_positions[dom_id], muon.pos, muon.dir
)
except KeyError:
self.log.warning("DOM ID %s not found!" % dom_id)
continue
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
print(df)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
bins = (max(sdf['distance']) - 1, max(sdf['n_hits']) - 1)
plt.hist2d(
sdf['distance'],
sdf['n_hits'],
cmap='plasma',
bins=bins,
norm=LogNorm()
)
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(filter_muons)
pipe.attach(DOMHits)
pipe.drain()
| Python | 0.000001 |
14c31307fd31631ecce0378aedbef95cec8531f2 | Fix autodiscovery | gargoyle/__init__.py | gargoyle/__init__.py | """
gargoyle
~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.utils.module_loading import autodiscover_modules
from gargoyle.manager import gargoyle
__version__ = '1.2.0'
VERSION = __version__ # old version compat
__all__ = ('gargoyle', 'autodiscover', '__version__', 'VERSION')
default_app_config = 'gargoyle.apps.GargoyleAppConfig'
def autodiscover():
"""
Auto-discover INSTALLED_APPS' gargoyle modules and fail silently when
not present. This forces an import on them to register any gargoyle bits they
may want.
"""
import gargoyle.builtins # noqa
autodiscover_modules('gargoyle')
| """
gargoyle
~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.utils.module_loading import autodiscover_modules
from gargoyle.manager import gargoyle
__version__ = '1.2.0'
VERSION = __version__ # old version compat
__all__ = ('gargoyle', 'autodiscover', '__version__', 'VERSION')
default_app_config = 'gargoyle.apps.GargoyleAppConfig'
def autodiscover():
"""
Auto-discover INSTALLED_APPS' gargoyle modules and fail silently when
not present. This forces an import on them to register any gargoyle bits they
may want.
"""
autodiscover_modules('gargoyle')
| Python | 0 |
e0def112fda555307cc9d8249056b92c7f86f29a | Pass the amount of values to softmax | eva/models/wavenet.py | eva/models/wavenet.py | from keras.models import Model
from keras.layers import Input, Convolution1D, Activation, Merge, Lambda
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Nadam
from eva.layers.causal_atrous_convolution1d import CausalAtrousConvolution1D
from eva.layers.wavenet_block import WavenetBlock, WavenetBlocks
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
# TODO: Soft targets? A float to make targets a gaussian with stdev.
# TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
# TODO: Global conditioning?
# TODO: Local conditioning?
_, nb_bins = input_shape
input_audio = Input(input_shape, name='audio_input')
model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)
out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)
out = Merge(mode='sum', name='merging_skips')(skip_connections)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
# https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
if last > 0:
out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)
out = Activation('softmax')(out)
if build:
model = Model(input_audio, out)
model.compile(Nadam(), 'sparse_categorical_crossentropy')
return model
def compute_receptive_field(sample_rate, depth, stacks):
receptive_field = stacks * (2 ** depth * 2) - (stacks - 1)
receptive_field_ms = (receptive_field * 1000) / sample_rate
return receptive_field, receptive_field_ms | from keras.models import Model
from keras.layers import Input, Convolution1D, Activation, Merge, Lambda
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Nadam
from eva.layers.causal_atrous_convolution1d import CausalAtrousConvolution1D
from eva.layers.wavenet_block import WavenetBlock, WavenetBlocks
def Wavenet(input_shape, filters, depth, stacks, learn_all=False, h=None, build=True):
# TODO: Soft targets? A float to make targets a gaussian with stdev.
# TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
# TODO: Global conditioning?
# TODO: Local conditioning?
_, nb_bins = input_shape
input_audio = Input(input_shape, name='audio_input')
model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)
out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)
out = Merge(mode='sum', name='merging_skips')(skip_connections)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
out = PReLU()(out)
out = Convolution1D(nb_bins, 1, border_mode='same')(out)
# https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
if not learn_all:
out = Lambda(lambda x: x[:, -1, :], output_shape=(out._keras_shape[-1],), name='last_out')(out)
out = Activation('softmax')(out)
if build:
model = Model(input_audio, out)
model.compile(Nadam(), 'sparse_categorical_crossentropy')
return model
def compute_receptive_field(sample_rate, depth, stacks):
receptive_field = stacks * (2 ** depth * 2) - (stacks - 1)
receptive_field_ms = (receptive_field * 1000) / sample_rate
return receptive_field, receptive_field_ms | Python | 0.9994 |
f4063d86404adbb5489edefd6c12d855de246dee | test that we can decode all doubly-encoded characters (doesn't pass yet) | ftfy/test_unicode.py | ftfy/test_unicode.py | # -*- coding: utf-8 -*-
from ftfy.fixes import fix_text_encoding
import unicodedata
import sys
from nose.tools import eq_
if sys.hexversion >= 0x03000000:
unichr = chr
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in range(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn', 'Cs', 'Mc', 'Mn'):
garble = char.encode('utf-8').decode('latin-1')
garble2 = char.encode('utf-8').decode('latin-1').encode('utf-8').decode('latin-1')
eq_(fix_text_encoding(garble), char)
eq_(fix_text_encoding(garble2), char)
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
eq_(fix_text_encoding(text), text)
| # -*- coding: utf-8 -*-
from ftfy.fixes import fix_text_encoding
import unicodedata
import sys
if sys.hexversion >= 0x03000000:
unichr = chr
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in range(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn', 'Cs', 'Mc', 'Mn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_text_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_text_encoding(text) == text, text
| Python | 0.000001 |
c9e37f9b241c2bef2ffdb4811cec41c951b21ef9 | Update fluid_cat_slim.py | cat_boxing/caged_cat/python/fluid_cat_slim.py | cat_boxing/caged_cat/python/fluid_cat_slim.py | from random import randint
def generate_cat():
cat_size = randint(1,100)
return cat_size
def fill_box():
empty_room = 400
j = 0
while empty_room > 0:
cat = generate_cat()
empty_room = empty_room - cat
j = j + 1
return j
def fill_truck():
truck_size = 40
cat_num = 0
i = 0
while i <= truck_size:
cats_in_box = fill_box()
cat_num = cat_num + cats_in_box
i = i + 1
print("There are ", cat_num, " cats in our truck. Let's move out!")
| from random import randint
def generate_cat():
cat_size = randint(1,100)
return cat_size
def fill_box():
box_size = 400
empty_room = 400
j = 0
while empty_room > 0:
cat = generate_cat()
empty_room = empty_room - cat
j = j + 1
return j
def fill_truck():
truck_size = 40
cat_num = 0
i = 0
while i <= truck_size:
cats_in_box = fill_box()
cat_num = cat_num + cats_in_box
i = i + 1
print("There are ", cat_num, " cats in our truck. Let's move out!")
| Python | 0.000003 |
f9e543f8c84f8a6f9d6ead0d2a1f9979d6a0ab8b | add write timing | humanhive/audio_interface.py | humanhive/audio_interface.py | import pyaudio
import time
class AudioInterface:
"""
Manages the sound interface. This manages the main callback for the audio
interface and delegates behaviour to the Playback and Recording modules.
"""
def __init__(self,
playback,
recording_queue,
n_channels,
sample_rate,
sample_width,
device_id,
frame_count=1024):
self.playback = playback
self.recording_queue = recording_queue
self.n_channels = n_channels
self.sample_rate = sample_rate
self.sample_width = sample_width
self.frame_count = frame_count
print("frame_count: {}".format(frame_count))
# Initialise pyaudio interface
self.p = pyaudio.PyAudio()
print("Device parameters for device with id: {}\n{}".format(
device_id, self.p.get_device_info_by_index(device_id)))
self.stream = self.p.open(
format=self.p.get_format_from_width(2),
channels=self.n_channels,
rate=self.sample_rate,
output_device_index=device_id,
# input=True,
output=True,
#stream_callback=self.audio_callback,
)
print("Finished initialising audio")
def audio_callback(self, in_data, frame_count, time_info, status):
st = time.time()
# Send recording data
if self.recording_queue is not None:
self.recording_queue.put((in_data, frame_count))
print("qsize: {}".format(self.playback.qsize()))
# Get output audio
samples = self.playback.get()
te = time.time() - st
print("Time elapsed: {}".format(te))
return (samples, pyaudio.paContinue)
def start_stream(self):
self.stream.start_stream()
def close_stream(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def is_active(self):
return self.stream.is_active()
def run(self):
while True:
(data, status) = self.audio_callback(
None, self.frame_count, None, None)
st = time.time()
self.stream.write(data, self.frame_count, exception_on_underflow=False)
print("Write time: {}".format(time.time() - st))
| import pyaudio
import time
class AudioInterface:
"""
Manages the sound interface. This manages the main callback for the audio
interface and delegates behaviour to the Playback and Recording modules.
"""
def __init__(self,
playback,
recording_queue,
n_channels,
sample_rate,
sample_width,
device_id,
frame_count=1024):
self.playback = playback
self.recording_queue = recording_queue
self.n_channels = n_channels
self.sample_rate = sample_rate
self.sample_width = sample_width
self.frame_count = frame_count
print("frame_count: {}".format(frame_count))
# Initialise pyaudio interface
self.p = pyaudio.PyAudio()
print("Device parameters for device with id: {}\n{}".format(
device_id, self.p.get_device_info_by_index(device_id)))
self.stream = self.p.open(
format=self.p.get_format_from_width(2),
channels=self.n_channels,
rate=self.sample_rate,
output_device_index=device_id,
# input=True,
output=True,
#stream_callback=self.audio_callback,
)
print("Finished initialising audio")
def audio_callback(self, in_data, frame_count, time_info, status):
st = time.time()
# Send recording data
if self.recording_queue is not None:
self.recording_queue.put((in_data, frame_count))
# Get output audio
samples = self.playback.get()
te = time.time() - st
print("Time elapsed: {}".format(te))
return (samples, pyaudio.paContinue)
def start_stream(self):
self.stream.start_stream()
def close_stream(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def is_active(self):
return self.stream.is_active()
def run(self):
while True:
(data, status) = self.audio_callback(
None, self.frame_count, None, None)
self.stream.write(data, self.frame_count, exception_on_underflow=False)
| Python | 0.00001 |
d1e66c414aac60cc7770ddeff091dedc5c0047f6 | Remove debug `print` from feature extraction | feature_extraction/extraction.py | feature_extraction/extraction.py | import numpy as np
import skimage.exposure as exposure
from .util import AttributeDict
def extract_features(image, measurements):
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.hstack([m.compute(image) for m in measurements])
def normalize_features(X):
# recenter features and normalize over the dataset
X -= np.mean(X, axis=0)
X /= np.linalg.norm(X, axis=0)
# normalize for each record
X /= np.vstack(np.linalg.norm(X, axis=1))
return X
def feature_postprocessing(X, options):
_options = AttributeDict({'normalize': True, 'fill_nans': False})
_options.update(options or {}); options = _options
if options.fill_nans:
X = np.nan_to_num(X)
if options.normalize:
X = normalize_features(X)
return X
def image_preprocessing(im, options):
_options = AttributeDict({'normalize': True, 'equalize': None})
_options.update(options or {}); options = _options
if options.normalize:
im = exposure.rescale_intensity(im)
if options.equalize:
if options.equalize['method'] == "histogram":
im = exposure.equalize_hist(im)
elif options.equalize['method'] == "stretch":
pmin, pmax = np.percentile(im,
(options.equalize['saturation'], 100-options.equalize['saturation']))
im = exposure.rescale_intensity(im, in_range=(pmin, pmax))
return im
| import numpy as np
import skimage.exposure as exposure
from .util import AttributeDict
def extract_features(image, measurements):
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.hstack([m.compute(image) for m in measurements])
def normalize_features(X):
# recenter features and normalize over the dataset
X -= np.mean(X, axis=0)
X /= np.linalg.norm(X, axis=0)
# normalize for each record
X /= np.vstack(np.linalg.norm(X, axis=1))
return X
def feature_postprocessing(X, options):
_options = AttributeDict({'normalize': True, 'fill_nans': False})
_options.update(options or {}); options = _options
if options.fill_nans:
X = np.nan_to_num(X)
if options.normalize:
X = normalize_features(X)
return X
def image_preprocessing(im, options):
_options = AttributeDict({'normalize': True, 'equalize': None})
_options.update(options or {}); options = _options
if options.normalize:
im = exposure.rescale_intensity(im)
print options
if options.equalize:
if options.equalize['method'] == "histogram":
im = exposure.equalize_hist(im)
elif options.equalize['method'] == "stretch":
pmin, pmax = np.percentile(im,
(options.equalize['saturation'], 100-options.equalize['saturation']))
im = exposure.rescale_intensity(im, in_range=(pmin, pmax))
return im
| Python | 0.000001 |
15652a0b80b0fa0c87ac9ccd33eaada22859bfa2 | Update the_most_numbers.py | checkio/python/elementary/the_most_numbers.py | checkio/python/elementary/the_most_numbers.py | def distance(*args):
if args:
min = args[0]
max = args[0]
for x in args:
if x < min:
min = x
if x > max:
max = x
else:
min = 0
max = 0
return max - min
| Python | 0.998495 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.