commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
2c2d594b7d8d5d74732e30c46859779b88621baa | Create __init__.py | FireModules/__init__.py | FireModules/__init__.py | Python | 0.000429 | ||
328274b6a938667326dbd435e7020c619cd80d3d | Add EmojiUtils.py | EmojiUtils.py | EmojiUtils.py | # coding=utf-8
# 2015-11-19
# yaoms
# emoji 表情符号转义
"""
emoji 表情 编码/解码 类
encode_string_emoji(string)
decode_string_emoji(string)
"""
import re
def __is_normal_char(c):
"""
判断字符是不是普通字符, 非普通字符, 认定为 emoji 字符
:param c:
:return: 普通字符 True, Emoji 字符 False
"""
i = ord(c)
return (
i == 0x0 or
i == 0x9 or
i == 0xA or
i == 0xD or
(i >= 0x20 and i <= 0xD7FF) or
(i >= 0xE000 and i <= 0xFFFD) or
(i >= 0x10000 and i <= 0x10FFFF)
)
def __emoji_encode(c):
"""
Emoji 字符编码
:param c:
:return: Emoji 代码
"""
if __is_normal_char(c):
return c
else:
return "[emj]%s[/emj]" % format(ord(c), 'x')
def __emoji_decode(code):
"""
解码 Emoji 代码
:param code:
:return: Emoji 字符
"""
m = re.match(r"\[emj\]([a-fA-F0-9]+)\[/emj\]", code)
if m:
h = m.group(1)
return unichr(int(h, 16))
def encode_string_emoji(string):
"""
遍历并转换其中的 Emoji 字符
:param string:
:return: Emoji 代码字符串
"""
return "".join([__emoji_encode(c) for c in string])
def decode_string_emoji(string):
"""
解码 Emoji 代码 字符串
:param string:
:return: 含有 Emoji 字符的字符串
"""
p = re.compile(r"\[emj\][a-fA-F0-9]+\[/emj\]")
new_string = ""
n_start = 0
for m in p.finditer(string):
new_string += string[n_start:m.start()] + \
__emoji_decode(string[m.start():m.end()])
n_start = m.end()
if n_start < len(string):
new_string += string[n_start:]
return new_string
if __name__ == '__main__':
__string_raw = open("/tmp/emoji.txt").read().decode("utf-8")
print __string_raw
__s1 = encode_string_emoji(__string_raw)
print __s1
__s2 = decode_string_emoji(__s1)
print __s2 | Python | 0.000002 | |
54a10f78a6c71d88e1c2441bb636e6b636f74613 | add unit test | rstem/led2/test_unittest.py | rstem/led2/test_unittest.py | #!/usr/bin/python3
import unittest
import os
from rstem import led2
# TODO: single setup for all testcases????
# TODO: make hardware versions that you can optionally skip
if __name__ == '__main__':
unittest.main()
def query(question):
ret = int(input(question + " [yes=1,no=0]: "))
if ret == 1:
return True
elif ret == 0:
return False
else:
raise ValueError("Please only provide 1 or 0. Thank You!")
class PrivateFunctions(unittest.TestCase):
def test_valid_color(self):
for i in range(16):
self.assertTrue(led2._valid_color(i))
self.assertTrue(led2._valid_color(hex(i)[2:]))
self.assertTrue(led2._valid_color('-'))
self.assertFalse(led2._valid_color(17))
self.assertFalse(led2._valid_color(-1))
# random invalid characters
self.assertFalse(led2._valid_color('j'))
self.assertFalse(led2._valid_color('%'))
self.assertFalse(led2._valid_color('10')) #TODO : should we allow none hex versions?
def test_convert_color(self):
self.assertRaises(ValueError, led2._convert_color('J')) # FIXME: syntax correct?
for i in range(16):
self.assertEquals(led2._convert_color(hex(i)[2:]), i)
self.assertEquals(led2._convert_color(i), i)
class PrimativeFunctions(unittest.TestCase):
def setUp(self):
led2.initGrid(1,2)
def tearDown(self):
led2.close()
def test_point(self):
for y in range(8):
for x in range(16):
# test without a color
self.assertEquals(led2.point(x,y), 1)
self.assertEquals(led2.point((x,y)), 1)
# test with all the colors
for color in range(17):
self.assertEquals(led2.point(x,y), 1)
self.assertEquals(led2.point((x,y)), 1)
self.assertEquals(led2.show(), 1)
self.assertTrue(query("Is the entire matrix at full brightness?"))
def test_line(self):
self.assertEquals(led2.point((0,0),(15,7)), 1)
self.assertEquals(led2.show(), 1)
self.assertTrue(query("Is there a line from (0,0) to (15,7)?"))
class TestingSprites(unittest.TestCase):
def setUp(self):
led2.initGrid(1,2)
def tearDown(self):
led2.close()
def test_init_sprite(self):
if font_path is None: # if none, set up default font location
this_dir, this_filename = os.path.split(__file__)
self.assertEquals(led2.sprite(this_dir + "/test_sprite"), 1)
self.assertEquals(led2.show(), 1)
self.assertTrue(query("Do you see the test pattern on the left led matrix?"))
self.assertEquals(led2.fill(0), 1)
self.assertEquals(led2.show(), 1)
self.assertFalse(query("What about now?"))
def test_text(self):
self.assertNotEquals(led2.text("jon"), 0)
self.assertEquals(led2.show(), 1)
self.assertTrue(query("Is 'jon' displayed with large font?"))
| Python | 0.000001 | |
f0587e44be1c7f85dbbf54e1d6c47458a4960d7c | Create date_time.py | date_time.py | date_time.py | #!/usr/bin/env python
# -*_ coding: utf-8 -*-
import datetime
import sys
def main():
now = datetime.datetime.now()
while True:
user_request = input("\nCurrent [time, day, date]: ")
if user_request == "quit":
sys.exit()
if user_request == "time":
second = str(now.second)
time_request = (str(now.hour)
+ ":"
+ str(now.minute)
+ ":"
+ str(now.second))
print("\nIt is: " + time_request)
if user_request == "day":
day_request = str(now.strftime("%A"))
print("\nIt is " + day_request)
if user_request == "date":
date_request = (str(now.year)
+ "-"
+ str(now.month)
+ "-"
+ str(now.day))
print("\nIt is: " + date_request)
if __name__ == '__main__':
main()
| Python | 0.000939 | |
c15f8805d3ce5eab9f46dc24a6845ce27b117ac3 | Add TeamTest | dbaas/account/tests/test_team.py | dbaas/account/tests/test_team.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.db import IntegrityError
from . import factory
from ..models import Team
from drivers import base
import logging
LOG = logging.getLogger(__name__)
class TeamTest(TestCase):
def setUp(self):
self.new_team = factory.TeamFactory()
def test_create_new_team(self):
"""
Test new Team creation
"""
self.assertTrue(self.new_team.pk)
def test_cant_create_a_new_user(self):
self.team = Team()
self.team.name = "Team1"
self.team.role_id = factory.RoleFactory().pk
self.assertFalse(self.team.save())
| Python | 0 | |
264863c1a8d60dd35babec22470626d13ebf3e66 | Remove unused import.t | debug_toolbar/panels/__init__.py | debug_toolbar/panels/__init__.py | from __future__ import absolute_import, unicode_literals
import warnings
from django.template.loader import render_to_string
class Panel(object):
"""
Base class for panels.
"""
# name = 'Base'
# template = 'debug_toolbar/panels/base.html'
# If content returns something, set to True in subclass
has_content = False
# We'll maintain a local context instance so we can expose our template
# context variables to panels which need them:
context = {}
# Panel methods
def __init__(self, toolbar, context={}):
self.toolbar = toolbar
self.context.update(context)
def content(self):
if self.has_content:
context = self.context.copy()
context.update(self.get_stats())
return render_to_string(self.template, context)
@property
def panel_id(self):
return self.__class__.__name__
@property
def enabled(self):
return self.toolbar.request.COOKIES.get('djdt' + self.panel_id, 'on') == 'on'
# URLs for panel-specific views
@classmethod
def get_urls(cls):
return []
# Titles and subtitles
def nav_title(self):
"""Title showing in sidebar"""
raise NotImplementedError
def nav_subtitle(self):
"""Subtitle showing under title in sidebar"""
return ''
def title(self):
"""Title showing in panel"""
raise NotImplementedError
# Enable and disable (expensive) instrumentation, must be idempotent
def enable_instrumentation(self):
pass
def disable_instrumentation(self):
pass
# Store and retrieve stats (shared between panels for no good reason)
def record_stats(self, stats):
self.toolbar.stats.setdefault(self.panel_id, {}).update(stats)
def get_stats(self):
return self.toolbar.stats.get(self.panel_id, {})
# Standard middleware methods
def process_request(self, request):
pass
def process_view(self, request, view_func, view_args, view_kwargs):
pass
def process_response(self, request, response):
pass
# Backward-compatibility for 1.0, remove in 2.0.
class DebugPanel(Panel):
def __init__(self, *args, **kwargs):
warnings.warn("DebugPanel was renamed to Panel.", DeprecationWarning)
super(DebugPanel, self).__init__(*args, **kwargs)
| from __future__ import absolute_import, unicode_literals
import warnings
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
class Panel(object):
"""
Base class for panels.
"""
# name = 'Base'
# template = 'debug_toolbar/panels/base.html'
# If content returns something, set to True in subclass
has_content = False
# We'll maintain a local context instance so we can expose our template
# context variables to panels which need them:
context = {}
# Panel methods
def __init__(self, toolbar, context={}):
self.toolbar = toolbar
self.context.update(context)
def content(self):
if self.has_content:
context = self.context.copy()
context.update(self.get_stats())
return render_to_string(self.template, context)
@property
def panel_id(self):
return self.__class__.__name__
@property
def enabled(self):
return self.toolbar.request.COOKIES.get('djdt' + self.panel_id, 'on') == 'on'
# URLs for panel-specific views
@classmethod
def get_urls(cls):
return []
# Titles and subtitles
def nav_title(self):
"""Title showing in sidebar"""
raise NotImplementedError
def nav_subtitle(self):
"""Subtitle showing under title in sidebar"""
return ''
def title(self):
"""Title showing in panel"""
raise NotImplementedError
# Enable and disable (expensive) instrumentation, must be idempotent
def enable_instrumentation(self):
pass
def disable_instrumentation(self):
pass
# Store and retrieve stats (shared between panels for no good reason)
def record_stats(self, stats):
self.toolbar.stats.setdefault(self.panel_id, {}).update(stats)
def get_stats(self):
return self.toolbar.stats.get(self.panel_id, {})
# Standard middleware methods
def process_request(self, request):
pass
def process_view(self, request, view_func, view_args, view_kwargs):
pass
def process_response(self, request, response):
pass
# Backward-compatibility for 1.0, remove in 2.0.
class DebugPanel(Panel):
def __init__(self, *args, **kwargs):
warnings.warn("DebugPanel was renamed to Panel.", DeprecationWarning)
super(DebugPanel, self).__init__(*args, **kwargs)
| Python | 0 |
4bfd69bc49b17e7844077949560bd6259ea33e9b | test the root scrubadub api | tests/test_api.py | tests/test_api.py | import unittest
import scrubadub
class APITestCase(unittest.TestCase):
def test_clean(self):
"""Test the top level clean api"""
self.assertEqual(
scrubadub.clean("This is a test message for example@exampe.com"),
"This is a test message for {{EMAIL}}",
)
def test_clean_docuemnts(self):
"""Test the top level clean_docuemnts api"""
self.assertEqual(
scrubadub.clean_documents(
{
"first.txt": "This is a test message for example@exampe.com",
"second.txt": "Hello Jane I am Tom.",
}
),
{
"first.txt": "This is a test message for {{EMAIL}}",
"second.txt": "Hello {{NAME}} I am {{NAME}}.",
}
)
def test_list_filth(self):
"""Test the top level list_filth api"""
filths = scrubadub.list_filth("This is a test message for example@example.com")
self.assertEqual(
filths,
[scrubadub.filth.EmailFilth(text='example@example.com', detector_name='email', beg=27, end=46)],
)
def test_list_filth_docuemnts(self):
"""Test the top level list_filth_docuemnts api"""
filths = scrubadub.list_filth_documents(
{
"first.txt": "This is a test message for example@example.com",
"second.txt": "Hello Jane I am Tom.",
}
)
print(filths)
self.assertEqual(
filths,
[
scrubadub.filth.EmailFilth(text='example@example.com', document_name='first.txt', detector_name='email', beg=27, end=46),
scrubadub.filth.NameFilth(text='Jane', document_name='second.txt', detector_name='name', beg=6, end=10),
scrubadub.filth.NameFilth(text='Tom', document_name='second.txt', detector_name='name', beg=16, end=19)
]
)
| Python | 0.000007 | |
cd906789b4ed339542722c04dd09f8aca04fd7ff | add missing revision | crowdsourcing/migrations/0170_task_price.py | crowdsourcing/migrations/0170_task_price.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-05-24 00:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0169_auto_20170524_0002'),
]
operations = [
migrations.AddField(
model_name='task',
name='price',
field=models.FloatField(null=True),
),
]
| Python | 0.000004 | |
369dff642883ded68eca98754ce81369634da94d | Add box tests | tests/test_box.py | tests/test_box.py | import pytest
from rich.box import ASCII, DOUBLE, ROUNDED, HEAVY
def test_str():
assert str(ASCII) == "+--+\n| ||\n|-+|\n| ||\n|-+|\n|-+|\n| ||\n+--+\n"
def test_repr():
assert repr(ASCII) == "Box(...)"
def test_get_top():
top = HEAVY.get_top(widths=[1, 2])
assert top == "┏━┳━━┓"
def test_get_row():
head_row = DOUBLE.get_row(widths=[3, 2, 1], level="head")
assert head_row == "╠═══╬══╬═╣"
row = ASCII.get_row(widths=[1, 2, 3], level="row")
assert row == "|-+--+---|"
foot_row = ROUNDED.get_row(widths=[2, 1, 3], level="foot")
assert foot_row == "├──┼─┼───┤"
with pytest.raises(ValueError):
ROUNDED.get_row(widths=[1, 2, 3], level="FOO")
def test_get_bottom():
bottom = HEAVY.get_bottom(widths=[1, 2, 3])
assert bottom == "┗━┻━━┻━━━┛"
| Python | 0.000001 | |
dbaad481ab9ddbdccd4430765e3eee0d0433fbd8 | Create doc_check.py | doc_check.py | doc_check.py | import requests,json,ctypes,time
tasks = [
# speciality_id, clinic_id, name '' - for any name, description
(40,279,'','Невролог'),
(2122,314,'Гусаров','Дерматолог'),
]
h = { 'Host': 'gorzdrav.spb.ru',
'Connection': 'keep-alive',
'Accept': '*/*',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'Sec-Fetch-Mode': 'cors',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://gorzdrav.spb.ru',
'Sec-Fetch-Site': 'same-origin',
'Referer': 'https://gorzdrav.spb.ru/signup/free/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ru-RU,ru;q=0.9,en-GB;q=0.8,en;q=0.7,en-US;q=0.6'}
def doc_check():
for task in tasks:
r = requests.post("https://gorzdrav.spb.ru/api/doctor_list/",
data="speciality_form-speciality_id="+str(task[0])+"&speciality_form-clinic_id=+"+str(task[1])+"&speciality_form-patient_id=&speciality_form-history_id=",
headers=h)
resp = json.loads(r.text)
for doc in resp['response']:
if doc['CountFreeTicket'] is not 0 and task[2] in doc['Name']:
ctypes.windll.user32.MessageBoxW(0, "New ticket avalible!\n"+task[3]+" match: " + task[2], time.ctime(), 4096)
if task[2] is '':
break
while True:
doc_check()
time.sleep(65)
| Python | 0.000001 | |
8eec6e7596e8a5bd8159753be2aeaaffb53f613b | Add Python version | Python/shorturl.py | Python/shorturl.py | class ShortURL:
"""
ShortURL: Bijective conversion between natural numbers (IDs) and short strings
ShortURL.encode() takes an ID and turns it into a short string
ShortURL.decode() takes a short string and turns it into an ID
Features:
+ large alphabet (51 chars) and thus very short resulting strings
+ proof against offensive words (removed 'a', 'e', 'i', 'o' and 'u')
+ unambiguous (removed 'I', 'l', '1', 'O' and '0')
Example output:
123456789 <=> pgK8p
Source: https://github.com/delight-im/ShortURL (Apache License 2.0)
"""
_alphabet = '23456789bcdfghjkmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ-_'
_base = len(_alphabet)
def encode(self, number):
string = ''
while(number > 0):
string = self._alphabet[number % self._base] + string
number //= self._base
return string
def decode(self, string):
number = 0
for char in string:
number = number * self._base + self._alphabet.index(char)
return number
| Python | 0.000023 | |
7a5ca2f63dab36664ace637b713d7772870a800a | Create make-fingerprint.py | make-fingerprint.py | make-fingerprint.py | Python | 0.000102 | ||
cd7187dc916ebbd49a324f1f43b24fbb44e9c9dc | Create afstand_sensor.py | afstand_sensor.py | afstand_sensor.py | import gpiozero
from time import sleep
sensor = gpiozero.DistanceSensor(echo=18,trigger=17,max_distance=2, threshold_distance=0.5)
led = gpiozero.LED(22)
while True:
afstand = round(sensor.distance*100)
print('obstakel op', afstand, 'cm')
if sensor.in_range:
led.on()
sleep(1)
led.off()
| Python | 0.000011 | |
f1bda6deeb97c50a5606bea59d1684d6d96b10b4 | Create api_call.py | PYTHON/api_call.py | PYTHON/api_call.py | def product_import_tme(request):
# /product/product_import_tme/
token = '<your's token(Anonymous key:)>'
app_secret = '<Application secret>'
params = {
'SymbolList[0]': '1N4007',
'Country': 'PL',
'Currency': 'PLN',
'Language': 'PL',
}
response = api_call('Products/GetPrices', params, token, app_secret, True);
response = json.loads(response)
print response
def api_call(action, params, token, app_secret, show_header=False):
api_url = u'https://api.tme.eu/' + action + '.json';
params['Token'] = token;
params = collections.OrderedDict(sorted(params.items()))
encoded_params = urllib.urlencode(params, '')
signature_base = 'POST' + '&' + urllib.quote(api_url, '') + '&' + urllib.quote(encoded_params, '')
api_signature = base64.encodestring(hmac.new(app_secret, signature_base, hashlib.sha1).digest()).rstrip()
params['ApiSignature'] = api_signature
opts = {'http' :
{
'method' : 'POST',
'header' : 'Content-type: application/x-www-form-urlencoded',
'content' : urllib.urlencode(params)
}
}
http_header = {
"Content-type": "application/x-www-form-urlencoded",
}
# create your HTTP request
req = urllib2.Request(api_url, urllib.urlencode(params), http_header)
# submit your request
res = urllib2.urlopen(req)
html = res.read()
return html
| Python | 0.000002 | |
1166ef7520ee26836402f028cb52ed95db7173e6 | Add CTC_new_refund_limited_all_payroll migration file | webapp/apps/taxbrain/migrations/0058_taxsaveinputs_ctc_new_refund_limited_all_payroll.py | webapp/apps/taxbrain/migrations/0058_taxsaveinputs_ctc_new_refund_limited_all_payroll.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0057_jsonreformtaxcalculator_errors_warnings_text'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='CTC_new_refund_limited_all_payroll',
field=models.CharField(default=b'False', max_length=50, null=True, blank=True),
),
]
| Python | 0 | |
fc1a9b7870f4d7e789c3968df6ddda698a7c4d62 | update to search all the TEMPLATES configurations | django_extensions/compat.py | django_extensions/compat.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from io import BytesIO
import csv
import six
import codecs
import importlib
import django
from django.conf import settings
#
# Django compatibility
#
def load_tag_library(libname):
"""Load a templatetag library on multiple Django versions.
Returns None if the library isn't loaded.
"""
if django.VERSION < (1, 9):
from django.template.base import get_library, InvalidTemplateLibrary
try:
lib = get_library(libname)
return lib
except InvalidTemplateLibrary:
return None
else:
from django.template.backends.django import get_installed_libraries
from django.template.library import InvalidTemplateLibrary
try:
lib = get_installed_libraries()[libname]
lib = importlib.import_module(lib).register
return lib
except (InvalidTemplateLibrary, KeyError):
return None
def get_template_setting(template_key, default=None):
""" Read template settings pre and post django 1.8 """
templates_var = getattr(settings, 'TEMPLATES', None)
if templates_var:
for tdict in templates_var:
if template_key in tdict:
return tdict[template_key]
if template_key == 'DIRS':
pre18_template_key = 'TEMPLATES_%s' % template_key
value = getattr(settings, pre18_template_key, default)
return value
return default
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
We are using this custom UnicodeWriter for python versions 2.x
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.queue = BytesIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
from csv import writer # noqa
# Default csv.writer for PY3 versions
csv_writer = writer
if six.PY2:
# unicode CSVWriter for PY2
csv_writer = UnicodeWriter # noqa
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from io import BytesIO
import csv
import six
import codecs
import importlib
import django
from django.conf import settings
#
# Django compatibility
#
def load_tag_library(libname):
"""Load a templatetag library on multiple Django versions.
Returns None if the library isn't loaded.
"""
if django.VERSION < (1, 9):
from django.template.base import get_library, InvalidTemplateLibrary
try:
lib = get_library(libname)
return lib
except InvalidTemplateLibrary:
return None
else:
from django.template.backends.django import get_installed_libraries
from django.template.library import InvalidTemplateLibrary
try:
lib = get_installed_libraries()[libname]
lib = importlib.import_module(lib).register
return lib
except (InvalidTemplateLibrary, KeyError):
return None
def get_template_setting(template_key, default=None):
""" Read template settings pre and post django 1.8 """
templates_var = getattr(settings, 'TEMPLATES', None)
if templates_var is not None and template_key in templates_var[0]:
return templates_var[0][template_key]
if template_key == 'DIRS':
pre18_template_key = 'TEMPLATES_%s' % template_key
value = getattr(settings, pre18_template_key, default)
return value
return default
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
We are using this custom UnicodeWriter for python versions 2.x
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.queue = BytesIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
from csv import writer # noqa
# Default csv.writer for PY3 versions
csv_writer = writer
if six.PY2:
# unicode CSVWriter for PY2
csv_writer = UnicodeWriter # noqa
| Python | 0 |
55d9610f519713b889ffb68daa3c72ef6c349d3d | Add ExportPPMs.py script. | TrueType/ExportPPMs.py | TrueType/ExportPPMs.py | #FLM: Export PPMs
"""
This script will write (or overwrite) a 'ppms' file in the same directory
as the opened VFB file. This 'ppms' file contains the TrueType stem values
and the ppm values at which the pixel jumps occur. These values can later
be edited as the 'ppms' file is used as part of the conversion process.
"""
import os
from FL import fl
kPPMsFileName = "ppms"
def collectPPMs():
ppmsList = ["#Name\tWidth\tppm2\tppm3\tppm4\tppm5\tppm6\n"]
for x in fl.font.ttinfo.hstem_data:
hstem = '%s\t%d\t%d\t%d\t%d\t%d\t%d\n' % (
x.name, x.width, x.ppm2, x.ppm3, x.ppm4, x.ppm5, x.ppm6)
ppmsList.append(hstem)
for y in fl.font.ttinfo.vstem_data:
vstem = '%s\t%d\t%d\t%d\t%d\t%d\t%d\n' % (
y.name, y.width, y.ppm2, y.ppm3, y.ppm4, y.ppm5, y.ppm6)
ppmsList.append(vstem)
return ppmsList
def writePPMsFile(content):
# path to the folder where the font is contained and the font's file name:
folderPath, fontFileName = os.path.split(fl.font.file_name)
filePath = os.path.join(folderPath, kPPMsFileName)
outfile = open(filePath, 'w')
outfile.writelines(content)
outfile.close()
def run():
if len(fl):
if (fl.font.file_name is None):
print "ERROR: You must save the VFB first."
return
if len(fl.font.ttinfo.hstem_data):
ppmsList = collectPPMs()
writePPMsFile(ppmsList)
print "Done!"
else:
print "ERROR: The font has no TT stems data."
else:
print "ERROR: No font opened."
if __name__ == "__main__":
run()
| Python | 0 | |
5aff575cec6ddb10cba2e52ab841ec2197a0e172 | Add SignalTimeout context manager | Utils/SignalTimeout.py | Utils/SignalTimeout.py | # Taken from https://gist.github.com/ekimekim/b01158dc36c6e2155046684511595d57
import os
import signal
import subprocess
class Timeout(Exception):
"""This is raised when a timeout occurs"""
class SignalTimeout(object):
"""Context manager that raises a Timeout if the inner block takes too long.
Will even interrupt hard loops in C by raising from an OS signal."""
def __init__(self, timeout, signal=signal.SIGUSR1, to_raise=Timeout):
self.timeout = float(timeout)
self.signal = signal
self.to_raise = to_raise
self.old_handler = None
self.proc = None
def __enter__(self):
self.old_handler = signal.signal(self.signal, self._on_signal)
self.proc = subprocess.Popen('sleep {timeout} && kill -{signal} {pid}'.format(
timeout = self.timeout,
signal = self.signal,
pid = os.getpid(),
),
shell = True,
)
def __exit__(self, *exc_info):
if self.proc.poll() is None:
self.proc.kill()
my_handler = signal.signal(self.signal, self.old_handler)
assert my_handler == self._on_signal, "someone else has been fiddling with our signal handler?"
def _on_signal(self, signum, frame):
if self.old_handler:
self.old_handler(signum, frame)
raise self.to_raise
| Python | 0.000001 | |
58020c2c207e02525d310a43af39e1282538957b | add new metric classes | mfr/core/metrics.py | mfr/core/metrics.py | import copy
def _merge_dicts(a, b, path=None):
""""merges b into a
Taken from: http://stackoverflow.com/a/7205107
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
class MetricsBase():
"""Lightweight wrapper around a dict to make keeping track of metrics a little easier.
Current functionality is limited, but may be extended later. To do:
* update/override method to indicate expectations of existing key
self.metrics.add_default('some.flag', True)
<later>
self.metrics.override('some.flag', False) # dies if 'some.flag' doesn't already exist
* optional type validation?
self.metrics.add('some.flag', True, bool())
-or-
self.metrics.define('some.flag', bool())
<later>
self.metrics.add('some.flag', 'foobar') # dies, 'foobar' isn't a bool
"""
def __init__(self):
self._metrics = {}
def key(self):
"""ID string for this object"""
raise NotImplementedError
def add(self, key, value):
"""add() stores the given value under the given key. Subkeys can be specified by placing
a dot between the parent and child keys. e.g. 'foo.bar' will be interpreted as
``self._metrics['foo']['bar']``
:param str key: the key to store ``value`` under
:param value: the value to store, type unrestricted
"""
self._set_dotted_key(self._metrics, key, value)
def merge(self, record):
"""Merges a dict into the current metrics.
:param dict record: a dict to merge with the current metrics
"""
_merge_dicts(self._metrics, record)
def serialize(self):
"""Return a copy of the metrics"""
return copy.deepcopy(self._metrics)
def manifesto(self):
"""'This is who I am and this is what I stand for!'
Returns a dict with one entry: our key pointing to our metrics
"""
return {self.key: self.serialize()}
def _set_dotted_key(self, store, key, value):
"""Naive method to set nested dict values via dot-separated keys. e.g
``_set_dotted_keys(self._metrics, 'foo.bar', 'moo')`` is equivalent to
``self._metrics['foo']['bar'] = 'moo'``. This method is neither resilient nor intelligent
and will react with bad grace if one of the keys already exists and is not a dict key.
"""
parts = key.split('.')
current = store
for part in parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[parts[-1]] = value
class MetricsRecord(MetricsBase):
"""An extension to MetricsBase that carries a category and list of submetrics. When
serialized, will include the serialized child metrics
"""
def __init__(self, category):
super().__init__()
self.category = category
self.subrecords = []
@property
def key(self):
"""ID string for this record: '{category}'"""
return self.category
def serialize(self):
"""Returns its metrics with the metrics for each of the subrecords included under their key.
"""
metrics = super().serialize()
for subrecord in self.subrecords:
metrics[subrecord.key] = subrecord.serialize()
return metrics
def new_subrecord(self, name):
"""Create a new MetricsSubRecord object with our category and save it to the subrecords
list."""
subrecord = MetricsSubRecord(self.category, name)
self.subrecords.append(subrecord)
return subrecord
class MetricsSubRecord(MetricsRecord):
"""An extension to MetricsRecord that carries a name in addition to a category. Will identify
itself as {category}_{name}. Can create its own subrecord whose category will be this
subrecord's ``name``.
"""
def __init__(self, category, name):
super().__init__(category)
self.name = name
@property
def key(self):
"""ID string for this subrecord: '{category}_{name}'"""
return '{}_{}'.format(self.category, self.name)
def new_subrecord(self, name):
"""Creates and saves a new subrecord. The new subrecord will have its category set to the
parent subrecord's ``name``. ex::
parent = MetricsRecord('foo')
child = parent.new_subrecord('bar')
grandchild = child.new_subrecord('baz')
print(parent.key) # foo
print(child.key) # foo_bar
print(grandchild.key) # bar_baz
"""
subrecord = MetricsSubRecord(self.name, name)
self.subrecords.append(subrecord)
return subrecord
| Python | 0 | |
e18ee4aacd42ec28b2d54437f61d592b1cfaf594 | Create national_user_data_pull.py | custom/icds_reports/management/commands/national_user_data_pull.py | custom/icds_reports/management/commands/national_user_data_pull.py | import csv
from django.core.management.base import BaseCommand
from corehq.apps.reports.util import get_all_users_by_domain
from custom.icds_reports.const import INDIA_TIMEZONE
from custom.icds_reports.models import ICDSAuditEntryRecord
from django.db.models import Max
class Command(BaseCommand):
help = "Custom data pull"
def convert_to_ist(self, date):
if date is None:
return 'N/A'
date = date.astimezone(INDIA_TIMEZONE)
date_formatted = date.strftime(date, "%d/%m/%Y, %I:%M %p")
return date_formatted
def handle(self, *args, **options):
users = get_all_users_by_domain('icds-cas')
usernames = []
for user in users:
if user.has_permission('icds-cas', 'access_all_locations'):
usernames.append(user.username)
usage_data = ICDSAuditEntryRecord.objects.filter(username__in=usernames).values('username').annotate(time=Max('time_of_use'))
headers = ["S.No", "username", "time"]
count = 1
rows = [headers]
usernames_usage = []
for usage in usage_data:
row_data = [
count,
usage.username,
self.convert_to_ist(usage.time)
]
usernames_usage.append(usage.username)
rows.append(row_data)
count = count + 1
for user in usernames:
if user not in usernames_usage:
row_data = [
count,
user,
"N/A"
]
rows.append(row_data)
count = count + 1
fout = open('/home/cchq/National_users_data.csv', 'w')
writer = csv.writer(fout)
writer.writerows(rows)
| Python | 0.000125 | |
0f983464451e828eff1f99859bc4334536e2d131 | add solarized theme for code snippets | docs/source/_themes/solarized.py | docs/source/_themes/solarized.py | # -*- coding: utf-8 -*-
"""
pygments.styles.solarized.light
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Solarized style, inspired by Schoonover.
:copyright: Copyright 2012 by the Shoji KUMAGAI, see AUTHORS.
:license: MIT, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Other, Literal, Punctuation
class LightStyle(Style):
"""
The Solarized Light style, inspired by Schoonover.
"""
background_color = '#fdf6e3'
default_style = ""
styles = {
Text: '#657b83', # base00 ; class: ''
Whitespace: '#fdf6e3', # base3 ; class: 'w'
Error: '#dc322f', # red ; class: 'err'
Other: '#657b83', # base00 ; class: 'x'
Comment: 'italic #93a1a1', # base1 ; class: 'c'
Comment.Multiline: 'italic #93a1a1', # base1 ; class: 'cm'
Comment.Preproc: 'italic #93a1a1', # base1 ; class: 'cp'
Comment.Single: 'italic #93a1a1', # base1 ; class: 'c1'
Comment.Special: 'italic #93a1a1', # base1 ; class: 'cs'
Keyword: '#859900', # green ; class: 'k'
Keyword.Constant: '#859900', # green ; class: 'kc'
Keyword.Declaration: '#859900', # green ; class: 'kd'
Keyword.Namespace: '#cb4b16', # orange ; class: 'kn'
Keyword.Pseudo: '#cb4b16', # orange ; class: 'kp'
Keyword.Reserved: '#859900', # green ; class: 'kr'
Keyword.Type: '#859900', # green ; class: 'kt'
Operator: '#657b83', # base00 ; class: 'o'
Operator.Word: '#859900', # green ; class: 'ow'
Name: '#586e75', # base01 ; class: 'n'
Name.Attribute: '#657b83', # base00 ; class: 'na'
Name.Builtin: '#268bd2', # blue ; class: 'nb'
Name.Builtin.Pseudo: 'bold #268bd2', # blue ; class: 'bp'
Name.Class: '#268bd2', # blue ; class: 'nc'
Name.Constant: '#b58900', # yellow ; class: 'no'
Name.Decorator: '#cb4b16', # orange ; class: 'nd'
Name.Entity: '#cb4b16', # orange ; class: 'ni'
Name.Exception: '#cb4b16', # orange ; class: 'ne'
Name.Function: '#268bd2', # blue ; class: 'nf'
Name.Property: '#268bd2', # blue ; class: 'py'
Name.Label: '#657b83', # base00 ; class: 'nc'
Name.Namespace: '#b58900', # yellow ; class: 'nn'
Name.Other: '#657b83', # base00 ; class: 'nx'
Name.Tag: '#859900', # green ; class: 'nt'
Name.Variable: '#cb4b16', # orange ; class: 'nv'
Name.Variable.Class: '#268bd2', # blue ; class: 'vc'
Name.Variable.Global: '#268bd2', # blue ; class: 'vg'
Name.Variable.Instance: '#268bd2', # blue ; class: 'vi'
Number: '#2aa198', # cyan ; class: 'm'
Number.Float: '#2aa198', # cyan ; class: 'mf'
Number.Hex: '#2aa198', # cyan ; class: 'mh'
Number.Integer: '#2aa198', # cyan ; class: 'mi'
Number.Integer.Long: '#2aa198', # cyan ; class: 'il'
Number.Oct: '#2aa198', # cyan ; class: 'mo'
Literal: '#657b83', # base00 ; class: 'l'
Literal.Date: '#657b83', # base00 ; class: 'ld'
Punctuation: '#657b83', # base00 ; class: 'p'
String: '#2aa198', # cyan ; class: 's'
String.Backtick: '#2aa198', # cyan ; class: 'sb'
String.Char: '#2aa198', # cyan ; class: 'sc'
String.Doc: '#2aa198', # cyan ; class: 'sd'
String.Double: '#2aa198', # cyan ; class: 's2'
String.Escape: '#cb4b16', # orange ; class: 'se'
String.Heredoc: '#2aa198', # cyan ; class: 'sh'
String.Interpol: '#cb4b16', # orange ; class: 'si'
String.Other: '#2aa198', # cyan ; class: 'sx'
String.Regex: '#2aa198', # cyan ; class: 'sr'
String.Single: '#2aa198', # cyan ; class: 's1'
String.Symbol: '#2aa198', # cyan ; class: 'ss'
Generic: '#657b83', # base00 ; class: 'g'
Generic.Deleted: '#657b83', # base00 ; class: 'gd'
Generic.Emph: '#657b83', # base00 ; class: 'ge'
Generic.Error: '#657b83', # base00 ; class: 'gr'
Generic.Heading: '#657b83', # base00 ; class: 'gh'
Generic.Inserted: '#657b83', # base00 ; class: 'gi'
Generic.Output: '#657b83', # base00 ; class: 'go'
Generic.Prompt: '#657b83', # base00 ; class: 'gp'
Generic.Strong: '#657b83', # base00 ; class: 'gs'
Generic.Subheading: '#657b83', # base00 ; class: 'gu'
Generic.Traceback: '#657b83', # base00 ; class: 'gt'
}
| Python | 0 | |
ed157602d965be952aadc9fe33b2e517c7f98ccf | Add urls | dumpling/urls.py | dumpling/urls.py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'css/(?P<name>.*)\.css$', views.styles, name='styles'),
url(r'(?P<path>.*)', views.PageView.as_view()),
]
| Python | 0.000013 | |
380178c585d4b9e2689ffdd72c9fa80be94fe3a9 | add more calculation | examples/ingap_gaas_radeta_paper.py | examples/ingap_gaas_radeta_paper.py | __author__ = 'kanhua'
import numpy as np
from scipy.interpolate import interp2d
import matplotlib.pyplot as plt
from scipy.io import savemat
from iii_v_si import calc_2j_si_eta, calc_3j_si_eta
if __name__=="__main__":
algaas_top_ere=np.logspace(-7,0,num=50)
algaas_mid_ere=np.logspace(-7,0,num=50)
eta_array=np.zeros((algaas_top_ere.shape[0],algaas_mid_ere.shape[0]))
for i,teg in enumerate(algaas_top_ere):
for j,meg in enumerate(algaas_mid_ere):
eta,_,_,_= calc_3j_si_eta(teg, meg, 1, top_band_gap=1.87, top_cell_qe=0.75, mid_band_gap=1.42,
mid_cell_qe=0.75,bot_cell_qe=0.75,bot_cell_eta=5e-4)
eta_array[i,j]=eta
np.savez("ingap_gaas_ere_3J_paper.npz",tbg=algaas_top_ere,mbg=algaas_mid_ere,eta=eta_array)
plt.pcolormesh(algaas_mid_ere,algaas_top_ere,eta_array)
plt.colorbar()
plt.xlabel("ERE of middle cell (eV)")
plt.ylabel("ERE of top cell (eV)")
plt.xlim([np.min(algaas_mid_ere),np.max(algaas_mid_ere)])
plt.ylim([np.min(algaas_top_ere),np.max(algaas_top_ere)])
plt.xscale("log")
plt.yscale("log")
plt.savefig("ingap_gaas_ere_3J_paper.pdf")
plt.show() | Python | 0 | |
93b38901b25f6c5db4700343050c5bb2fc6ef7e6 | add utility to make digraph out of router | emit/graphviz.py | emit/graphviz.py | def make_digraph(router, name='router'):
header = 'digraph %s {\n' % name
footer = '\n}'
lines = []
for origin, destinations in router.routes.items():
for destination in destinations:
lines.append('"%s" -> "%s";' % (origin, destination))
return header + '\n'.join(lines) + footer
| Python | 0 | |
c16620dffd2cd6396eb6b7db76a9c29849a16500 | Add support for cheminformatics descriptors | components/lie_structures/lie_structures/cheminfo_descriptors.py | components/lie_structures/lie_structures/cheminfo_descriptors.py | # -*- coding: utf-8 -*-
"""
file: cheminfo_molhandle.py
Cinfony driven cheminformatics fingerprint functions
"""
import logging
from twisted.logger import Logger
from . import toolkits
logging = Logger()
def available_descriptors():
"""
List available molecular descriptors for all active cheminformatics
packages
The webel toolkit has a descriptor service but the supported
descriptors are not listed in Cinfony. The toolkit is available
however.
:rtype: :py:dict
"""
available_descs = {'webel': None}
for toolkit, obj in toolkits.items():
if hasattr(obj, 'descs'):
available_descs[toolkit] = obj.descs
return available_descs | Python | 0 | |
718b8dcb87ae2b78e5ce0aded0504a81d599daf7 | Create envToFish.py | envToFish.py | envToFish.py | #!/usr/bin/env python
import os
import subprocess
badKeys = ['HOME', 'PWD', 'USER', '_', 'OLDPWD']
with open('profile.fish', 'w') as f:
for key, val in os.environ.items():
if key in badKeys:
continue
if key == 'PATH':
f.write("set -e PATH\n")
pathUnique = set(val.split(':'))
for elem in pathUnique:
f.write("set -gx PATH $PATH %s\n" % elem)
else:
f.write("set -gx %s '%s'\n" % (key, val))
| Python | 0 | |
01328db808d3f5f1f9df55117ef70924fb615a6a | Create config reader | escpos/config.py | escpos/config.py | from __future__ import absolute_import
import os
import appdirs
from localconfig import config
from . import printer
from .exceptions import *
class Config(object):
_app_name = 'python-escpos'
_config_file = 'config.ini'
def __init__(self):
self._has_loaded = False
self._printer = None
self._printer_name = None
self._printer_config = None
def load(self, config_path=None):
# If they didn't pass one, load default
if not config_path:
config_path = os.path.join(
appdirs.user_config_dir(self._app_name),
self._config_file
)
# Deal with one config or a list of them
# Configparser does this, but I need it for the list in the error message
if isinstance(config_path, basestring):
config_path = [config_path]
files_read = config.read(config_path)
if not files_read:
raise ConfigNotFoundError('Couldn\'t read config at one or more of {config_path}'.format(
config_path="\n".join(config_path),
))
if 'printer' in config:
# For some reason, dict(config.printer) raises
# TypeError: attribute of type 'NoneType' is not callable
self._printer_config = dict(list(config.printer))
self._printer_name = self._printer_config.pop('type').title()
if not self._printer_name or not hasattr(printer, self._printer_name):
raise ConfigSyntaxError('Printer type "{printer_name}" is invalid'.format(
printer_name=self._printer_name,
))
self._has_loaded = True
def printer(self):
if not self._has_loaded:
self.load()
if not self._printer:
# We could catch init errors and make them a ConfigSyntaxError,
# but I'll just let them pass
self._printer = getattr(printer, self._printer_name)(**self._printer_config)
return self._printer
| Python | 0.000001 | |
0f43f3bdc9b22e84da51e490664aeedc4295c8c9 | Add test for ELB | tests/test_elb.py | tests/test_elb.py | # -*- coding: utf-8 -*-
from jungle import cli
def test_elb_ls(runner, elb):
"""test for elb ls"""
result = runner.invoke(cli.cli, ['elb', 'ls'])
assert result.exit_code == 0
def test_elb_ls_with_l(runner, elb):
"""test for elb ls -l"""
result = runner.invoke(cli.cli, ['elb', 'ls', '-l'])
assert result.exit_code == 0
| Python | 0.000001 | |
c1bf53c5c278cafa3b1c070f8a232d5820dcb7a4 | add elb list test. | tests/test_elb.py | tests/test_elb.py | from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output.elb import (output_elb_info, output_elbs)
from acli.services.elb import (elb_info, elb_list)
from acli.config import Config
from moto import mock_elb
import pytest
from boto3.session import Session
session = Session(region_name="eu-west-1")
@pytest.yield_fixture(scope='function')
def elb_instances():
"""ELB mock service"""
mock = mock_elb()
mock.start()
client = session.client('elb')
zones = ['eu-west-1a', 'eu-west-1b']
listeners = [{
'Protocol': 'string',
'LoadBalancerPort': 123,
'InstanceProtocol': 'string',
'InstancePort': 123,
'SSLCertificateId': 'string'}]
client.create_load_balancer(LoadBalancerName='my-lb', AvailabilityZones=zones, Listeners=listeners)
yield client.describe_load_balancers()
mock.stop()
config = Config(cli_args={'--region': 'eu-west-1',
'--access_key_id': 'AKIAIOSFODNN7EXAMPLE',
'--secret_access_key': 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'})
def test_elb_list_service(elb_instances):
with pytest.raises(SystemExit):
assert elb_list(aws_config=config)
# def test_elb_info_service(ec2_instances):
# with pytest.raises(SystemExit):
# assert elb_info(aws_config=config, elb_name=list(ec2_instances)[0].id)
| Python | 0 | |
ee3e04d32e39d6ac7ef4ac7abc2363a1ac9b8917 | Add an example for the music module | example_music.py | example_music.py | from screenfactory import create_screen
from modules.music import Music
import config
import time
import pygame
screen = create_screen()
music = Music(screen)
music.start()
while True:
if config.virtual_hardware:
pygame.time.wait(10)
for event in pygame.event.get():
pass
else:
time.sleep(0.01) | Python | 0.000002 | |
9be3bf6d71c54fe95db08c6bc1cd969dfbb2ebd1 | Add Dia generated .py file | doc/StationMeteo.Diagram.py | doc/StationMeteo.Diagram.py | class Station :
def __init__(self) :
self.ser = SerialInput() #
self.parser = InputParser() #
self.datab = DatabManager() #
self.input = None # str
self.sensor_dict = dict('id': ,'name': ) #
self.last_meterings_list = LastMeteringList() #
pass
def __get_serial_input_content (self, ser) :
# returns
pass
def __parse (self, serial_input_content) :
# returns
pass
def __store_meterings (self) :
# returns
pass
def __connect_serial (self) :
# returns
pass
def setup (self) :
# returns
pass
def loop (self) :
# returns
pass
class DatabManager :
'''
http://docs.sqlalchemy.org/en/rel_0_8/orm/tutorial.html#adding-new-objects'''
def __init__(self) :
self.engine_url = 'sqlite:///:memory:' # str
self.engine = sqlalchemy.create_engine(engine_url, echo = True) #
self.Session = sqlalchemy.orm.sessionmaker(bind=engine) #
self.session = Session() #
self.metering = Metering() #
self.Sensors = Sensors() #
pass
class Sensors :
'''
https://www.google.fr/#q=NVARCHAR+encodage+mysql
https://stackoverflow.com/questions/612430/when-must-we-use-nvarchar-nchar-instead-of-varchar-char-in-sql-servers
Nvarchar ne sert que pour les utilisateurs MS-SQL. '''
def __init__(self) :
pass
class Metering :
'''
http://docs.sqlalchemy.org/en/rel_0_8/orm/tutorial.html#declare-a-mapping
>>> from sqlalchemy.ext.declarative import declarative_base
>>> declarative_base()
<class 'sqlalchemy.ext.declarative.Base'>
>>>
'''
def __init__(self) :
pass
class /dev/tty :
def __init__(self) :
self.name = None # string
pass
class SerialInput :
'''
>>> from serial import Serial
>>> Serial()
Serial<id=0xb767eb6c, open=False>(port=None, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=None, xonxoff=False, rtscts=False, dsrdtr=False)
>>> help(Serial())
>>> help(Serial.readline())
>>> help(Serial.readlines())
'''
def __init__(self) :
self.port = '/dev/ttyUSB0' #
self.baudrate = 115200 # int
pass
def readline (self) :
# returns str
pass
class InputParser :
def __init__(self) :
self.serial_input_content = None # str
self.last_meterings_list = LastMeteringList() #
pass
def parse (self, ) :
# returns
pass
class LastMeteringList :
def __init__(self) :
pass
| Python | 0 | |
54718d95c4398d816546b45ed3f6a1faf2cdace8 | add modules/flexins/nsversion.py | modules/flexins/nsversion.py | modules/flexins/nsversion.py | """Analysis and Check the FlexiNS software version.
"""
import re
from libs.checker import ResultInfo,CheckStatus
from libs.log_spliter import LogSpliter,LOG_TYPE_FLEXI_NS
from libs.tools import read_cmdblock_from_log
## Mandatory variables
##--------------------------------------------
module_id = 'fnsbase.01'
tag = ['flexins','base']
priority = 'normal'
name = "Check the FNS software version"
desc = __doc__
criteria = "FNS version is ['N5 1.19-3']."
result = ResultInfo(name)
##--------------------------------------------
## Optional variables
##--------------------------------------------
target_version = ['N5 1.19-3']
version_info = "Packages Info:\n %s"
pat_pkgid= re.compile("\s+(BU|FB|NW)\s+.*?\n\s+(\w\d [\d\.-]+)")
check_commands = [
("ZWQO:CR;","show the NS packages information"),
]
##
def check_version(logtxt):
error = ''
info = ''
status = ''
pkgid = dict(pat_pkgid.findall(str(logtxt)))
try:
if pkgid['BU'] in target_version:
status = CheckStatus.PASSED
else:
status = CheckStatus.FAILED
info = str(pkgid)
except (KeyError,ValueError) as e:
status = CheckStatus.UNKNOWN
info = e
return status,info,error
##--------------------------------------------
## Mandatory function: run
##--------------------------------------------
def run(logfile):
"""The 'run' function is a mandatory fucntion. and it must return a ResultInfo.
"""
ns_command_end_mark = "COMMAND EXECUTED"
logtxt = read_cmdblock_from_log(logfile,'ZWQO:CR;',ns_command_end_mark)
if logtxt:
status,info,error = check_version(logtxt)
result.update(status=status,info=(version_info % info).split('\n'),error=error)
else:
status = CheckStatus.UNKNOWN
error = "Can't find the version info in the log."
result.update(status=status,info='',error=error)
return result
| Python | 0 | |
8cb94efa41e5350fccdc606f4959f958fc309017 | Add lldb debug visualizer | tools/rlm_lldb.py | tools/rlm_lldb.py | ##############################################################################
#
# Copyright 2014 Realm Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
import lldb
# command script import /src/rlm_lldb.py --allow-reload
property_types = {
0: 'int64_t',
10: 'double',
1: 'bool',
9: 'float',
}
def to_str(val):
return val.GetProcess().ReadCStringFromMemory(val.GetValueAsUnsigned(), 1024, lldb.SBError())
cached_schemas = {}
class SyntheticChildrenProvider(object):
def _eval(self, expr):
frame = self.obj.GetThread().GetSelectedFrame()
return frame.EvaluateExpression(expr)
class RLMObject_SyntheticChildrenProvider(SyntheticChildrenProvider):
def __init__(self, obj, _):
self.obj = obj
objectSchema = self._eval("((RLMObject *){})->_objectSchema".format(self.obj.GetAddress())).GetValueAsUnsigned()
self.props = cached_schemas.get(objectSchema, None)
if not self.props:
properties = self._eval("((RLMObjectSchema *){})->_properties".format(objectSchema)).GetValueAsUnsigned()
count = self._eval("(NSUInteger)[((NSArray *){}) count]".format(properties)).GetValueAsUnsigned()
self.props = [self._get_prop(properties, i) for i in range(count)]
cached_schemas[objectSchema] = self.props
def num_children(self):
return len(self.props)
def has_children(self):
return True
def get_child_index(self, name):
return next(i for i, (prop_name, _) in enumerate(self.props) if prop_name == name)
def get_child_at_index(self, index):
name, getter = self.props[index]
value = self._eval(getter)
return self.obj.CreateValueFromData(name, value.GetData(), value.GetType())
def update(self):
pass
def _get_prop(self, props, i):
prop = self._eval("(NSUInteger)[((NSArray *){}) objectAtIndex:{}]".format(props, i)).GetValueAsUnsigned()
name = to_str(self._eval("[((RLMProperty *){})->_name UTF8String]".format(prop)))
type = self._eval("((RLMProperty *){})->_type".format(prop)).GetValueAsUnsigned()
getter = "({})[(id){} {}]".format(property_types.get(type, 'id'), self.obj.GetAddress(), name)
return name, getter
def RLMArray_SummaryProvider(obj, _):
className = to_str(eval_objc(obj, "(const char *)[(NSString *)[(RLMArray *){} objectClassName] UTF8String]"))
count = eval_objc(obj, "(NSUInteger)[(RLMArray *){} count]").GetValueAsUnsigned()
return "({}[{}])".format(className, count)
class RLMArray_SyntheticChildrenProvider(SyntheticChildrenProvider):
def __init__(self, valobj, _):
self.obj = valobj
self.addr = self.obj.GetAddress()
def num_children(self):
return self.count
def has_children(self):
return True
def get_child_index(self, name):
return int(name.lstrip('[').rstrip(']'))
def get_child_at_index(self, index):
value = self._eval('(id)[(id){} objectAtIndex:{}]'.format(self.addr, index))
return self.obj.CreateValueFromData('[' + str(index) + ']', value.GetData(), value.GetType())
def update(self):
self.count = self._eval("(NSUInteger)[(RLMArray *){} count]".format(self.addr)).GetValueAsUnsigned()
def __lldb_init_module(debugger, _):
debugger.HandleCommand('type summary add RLMArray -F rlm_lldb.RLMArray_SummaryProvider')
debugger.HandleCommand('type summary add RLMArrayLinkView -F rlm_lldb.RLMArray_SummaryProvider')
debugger.HandleCommand('type summary add RLMArrayTableView -F rlm_lldb.RLMArray_SummaryProvider')
debugger.HandleCommand('type synthetic add RLMArray --python-class rlm_lldb.RLMArray_SyntheticChildrenProvider')
debugger.HandleCommand('type synthetic add RLMArrayLinkView --python-class rlm_lldb.RLMArray_SyntheticChildrenProvider')
debugger.HandleCommand('type synthetic add RLMArrayTableView --python-class rlm_lldb.RLMArray_SyntheticChildrenProvider')
debugger.HandleCommand('type synthetic add -x RLMAccessor_.* --python-class rlm_lldb.RLMObject_SyntheticChildrenProvider')
| Python | 0 | |
ec41564bb99c8e79bcee1baabd75d2282601415c | add refund | shopify/resources/refund.py | shopify/resources/refund.py | from ..base import ShopifyResource
class Refund(ShopifyResource):
_prefix_source = "/admin/orders/$order_id/"
| Python | 0 | |
e7247c8c70a8cfefaee057e0c731aa5dab41ca9a | Create Contours.py | Contours.py | Contours.py | from PIL import Image
from pylab import *
#read image into an array
im = array(Image.open('s9.jpg').convert('L'))
#create a new figure
figure()
#don't use colors
gray()
#show contours
contour(im,origin = 'image')
axis('equal')
axis('off')
figure()
hist(im.flatten(),128)
show()
| Python | 0 | |
9315c59746e2be9f2f15ff2bae02e1b481e9a946 | Create mr.py | mr.py | mr.py | Mapper:
#!/usr/bin/python
import sys
while 1:
line = sys.stdin.readline()
if line == "":
break
fields = line.split(",")
year = fields[1]
runs = fields[8]
if year == "1956":
print runs
Reducer:
#!/usr/bin/python
import sys
total_count = 0
for line in sys.stdin:
try:
count = int(line)
total_count += count
except ValueError:
# count was not a number, so silently
# ignore/discard this line
continue
print total_count
| Python | 0.000004 | |
c91e231c8d71458a7c347088ad7ec6431df234d7 | add ss.py to update proxy automatically | ss.py | ss.py | # -*- coding:utf8 -*-
import urllib2
response = urllib2.urlopen("http://boafanx.tabboa.com/boafanx-ss/")
html = response.read()
print(html[:20000]) | Python | 0 | |
4015a16ec32660d25646f62772876d53166f46f2 | Add files via upload | PEP.py | PEP.py | #-*- coding: utf-8 -*-
from optparse import OptionParser
import genLabelData,genUnlabelData,mainEdit,genVecs
import os.path
def parse_args():
parser = OptionParser(usage="RNA editing prediction", add_help_option=False)
parser.add_option("-f", "--feature", default="300", help="Set the number of features of Word2Vec model")
parser.add_option("-g","--generate",default="true", help="Generate the Data or Use the ones before")
parser.add_option("-t","--type",default="ep",help="eep data or ep data")
parser.add_option("-c","--cell",default = "GM12878",help="Cell Line")
parser.add_option("-k","--k",default="1",help="k")
parser.add_option("-w","--word",default = "6")
parser.add_option("-i","--integrate",default="false", help="Use integrated features or not")
parser.add_option("-s","--sel",default=50, help="The number of motif feature to be used in the combination mode")
parser.add_option("-e","--thresh_mode",default=1,help="The mode of estimating threshold:0-default;1-simple mode;2-cv mode")
(opts, args) = parser.parse_args()
return opts
def run(word,num_features,generate,type,cell,k,integrate,sel):
if(os.path.exists("./Data/Learning")==False):
os.makedirs("./Data/Learning")
print "parameters are as followed\n" \
"feature=%r\tgenerate=%r\n" \
"type=%r\tcell=%r\n" \
"k=%r\n"\
%(num_features,generate,type,cell,k)
if generate == "true":
if not os.path.isfile("./Data/Learning/supervised_"+str(cell)+"_"+str(type)):
genLabelData.run(type,cell)
if not os.path.isfile("./Data/Learning/unlabeled_train_promoter_"+str(cell)+"_"+str(type)):
genUnlabelData.run(type,cell,word)
if not os.path.isfile("./Datavecs/datavecs_"+str(cell)+"_"+str(type)+".npy"):
genVecs.run(word,num_features,k,type,cell)
if integrate == "false":
mainPEP.run_word(word,num_features,k,type,cell)
else:
mainPEP.run_shuffle(word,num_features,k,type,cell,sel)
def main():
opts = parse_args()
run(opts.word,opts.feature,opts.generate,opts.type,opts.cell,opts.k,opts.integrate,opts.sel)
if __name__ == '__main__':
main()
| Python | 0 | |
8016dbc50238d2baf5f89c191ec3355df63af1a2 | Implement basic flask app to add subscribers | app.py | app.py | import iss
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template()
@app.route('/subscribe', methods=['POST'])
def subscribe():
number = request.form['number']
lat = request.form['latitude']
lon = request.form['longitude']
iss.add_to_queue(number, lat, lon)
return render_template()
app.run(host='0.0.0.0')
| Python | 0.000001 | |
6d8da9ec6a0dba1c5b61ea88de6a808f36d4f271 | Add Aruba device tracker | homeassistant/components/device_tracker/aruba.py | homeassistant/components/device_tracker/aruba.py | """
homeassistant.components.device_tracker.aruba
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a Aruba Access Point for device
presence.
This device tracker needs telnet to be enabled on the router.
Configuration:
To use the Aruba tracker you will need to add something like the following
to your config/configuration.yaml. You also need to enable Telnet in the
configuration pages.
device_tracker:
platform: aruba
host: YOUR_ACCESS_POINT_IP
username: YOUR_ADMIN_USERNAME
password: YOUR_ADMIN_PASSWORD
Variables:
host
*Required
The IP address of your router, e.g. 192.168.1.1.
username
*Required
The username of an user with administrative privileges, usually 'admin'.
password
*Required
The password for your given admin account.
"""
import logging
from datetime import timedelta
import re
import threading
import telnetlib
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r'(?P<name>([^\s]+))\s+' +
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s+' +
r'(?P<os>([^\s]+))\s+' +
r'(?P<network>([^\s]+))\s+' +
r'(?P<ap>([^\s]+))\s+' +
r'(?P<channel>([^\s]+))\s+' +
r'(?P<type>([^\s]+))\s+' +
r'(?P<role>([^\s]+))\s+' +
r'(?P<signal>([^\s]+))\s+' +
r'(?P<speed>([^\s]+))')
# pylint: disable=unused-argument
def get_scanner(hass, config):
""" Validates config and returns a Aruba scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = ArubaDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ArubaDeviceScanner(object):
""" This class queries a Aruba Acces Point for connected devices. """
def __init__(self, config):
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.lock = threading.Lock()
self.last_results = {}
# Test the router is accessible
data = self.get_aruba_data()
self.success_init = data is not None
def scan_devices(self):
""" Scans for new devices and return a list containing found device
ids. """
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['name']
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
""" Ensures the information from the Aruba Access Point is up to date.
Returns boolean if scanning successful. """
if not self.success_init:
return False
with self.lock:
data = self.get_aruba_data()
if not data:
return False
self.last_results = data.values()
return True
def get_aruba_data(self):
""" Retrieve data from Aruba Access Point and return parsed
result. """
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b'User: ')
telnet.write((self.username + '\r\n').encode('ascii'))
telnet.read_until(b'Password: ')
telnet.write((self.password + '\r\n').encode('ascii'))
telnet.read_until(b'#')
telnet.write(('show clients\r\n').encode('ascii'))
devices_result = telnet.read_until(b'#').split(b'\r\n')
telnet.write('exit\r\n'.encode('ascii'))
except EOFError:
_LOGGER.exception("Unexpected response from router")
return
except ConnectionRefusedError:
_LOGGER.exception("Connection refused by router," +
" is telnet enabled?")
return
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode('utf-8'))
if match:
devices[match.group('ip')] = {
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
'name': match.group('name')
}
return devices
| Python | 0 | |
341890bfff2d8a831e48ebb659ce7f31d4918773 | Update utils.py | tendrl/commons/central_store/utils.py | tendrl/commons/central_store/utils.py | from tendrl.commons.etcdobj import fields
def to_etcdobj(cls_etcd, obj):
for attr, value in vars(obj).iteritems():
if attr.startswith("_"):
continue
if attr in ["attrs", "enabled", "obj_list", "obj_value", "atoms", "flows"]:
continue
setattr(cls_etcd, attr, to_etcd_field(attr, value))
return cls_etcd
def to_etcd_field(name, value):
type_to_etcd_fields_map = {dict: fields.DictField,
str: fields.StrField,
int: fields.IntField,
bool: fields.StrField}
return type_to_etcd_fields_map[type(value)](name)
| from tendrl.commons.etcdobj import fields
def to_etcdobj(cls_etcd, obj):
for attr, value in vars(obj).iteritems():
if not attr.startswith("_"):
setattr(cls_etcd, attr, to_etcd_field(attr, value))
return cls_etcd
def to_etcd_field(name, value):
type_to_etcd_fields_map = {dict: fields.DictField,
str: fields.StrField,
int: fields.IntField,
bool: fields.StrField}
return type_to_etcd_fields_map[type(value)](name)
| Python | 0.000001 |
9d41eba840f954595a5cebbacaf56846cd52c1f4 | add new file | functions.py | functions.py | def add(a,b):
| Python | 0.000006 | |
3000a9c0b7213a3aeb9faa0c01e5b779b2db36d4 | add a noisy bezier example (todo: should noise be part of the animation, or stay out of it?) | examples/example_bezier_noise.py | examples/example_bezier_noise.py | if __name__ == "__main__":
import gizeh
import moviepy.editor as mpy
from vectortween.BezierCurveAnimation import BezierCurveAnimation
from vectortween.SequentialAnimation import SequentialAnimation
import noise
def random_color():
import random
return (random.uniform(0, 1) for _ in range(3))
W, H = 250, 250 # width, height, in pixels
duration = 5 # duration of the clip, in seconds
fps = 25
controlpoints_collections = [
[(120, 160), (35, 200), (220, 240), (220, 40)],
[(220, 40), (120, 40), (10, 200)]
]
b1 = BezierCurveAnimation(controlpoints=controlpoints_collections[0],tween=["easeOutBounce"])
b2 = BezierCurveAnimation(controlpoints=controlpoints_collections[1],tween=["easeOutBounce"])
b = SequentialAnimation([b1,b2])
colors = ((0,1,1),(1,1,0))
def make_frame(t):
surface = gizeh.Surface(W, H)
xy = b.make_frame(t, 0, 0, duration - 1, duration)
curve_points = b.curve_points(0, t, 0.01, 0, 0, duration - 1, duration)
# print (curve_points)
xnoise = []
ynoise = []
result = []
for i,c in enumerate(curve_points):
xnoise.append(2*noise.snoise3(c[0],c[1],0))
ynoise.append(2*noise.snoise3(c[0],c[1],1))
result.append([(c[0] + xnoise[i]), (c[1] + ynoise[i])])
if xy is not None and None not in xy:
gizeh.circle(5, xy=[xy[0]+xnoise[i],xy[1]+ynoise[i]], fill=(0, 1, 0)).draw(surface)
curve_points = result
gizeh.polyline(curve_points, stroke=(0, 0, 1), stroke_width=2).draw(surface)
for i, controlpoints in enumerate(controlpoints_collections):
gizeh.polyline(controlpoints, stroke=colors[i], stroke_width=2).draw(surface)
for cp in controlpoints:
gizeh.circle(5, xy=cp, fill=(1, 1, 1)).draw(surface)
return surface.get_npimage()
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_videofile("example_bezier_noise.mp4", fps=fps, codec="libx264")
| Python | 0 | |
05c7d62e0e26000440e72d0700c9806d7a409744 | Add migrations for game change suggestions | games/migrations/0023_auto_20171104_2246.py | games/migrations/0023_auto_20171104_2246.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0022_installer_reason'),
]
operations = [
migrations.AddField(
model_name='game',
name='change_for',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.Game'),
),
migrations.AddField(
model_name='gamesubmission',
name='reason',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='game',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
| Python | 0 | |
ad073b043b2965fb6a1939682aeca8ac90259210 | add daily import to database | daily.py | daily.py | import datetime
import httplib
import urllib
import redis
import json
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
rdb = redis.Redis('localhost')
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convfloat(value):
try:
return float(value)
except ValueError:
return -1
def convint(value):
try:
return int(value)
except ValueError:
return 0
def save2redis(key, value):
old = rdb.get("TW" + key)
if old is None:
val = []
val.append(value)
rdb.set("TW"+key ,json.dumps(val))
else:
l = json.loads(old)
l.append(value)
rdb.set("TW"+key ,json.dumps(l))
today = datetime.date.today()
#today = datetime.date(2015, 5, 15)
one_day = timedelta(days=1)
dl_date = today
httpreq = httplib.HTTPConnection('www.twse.com.tw')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
date_str = str(dl_date.year - 1911 ) + dl_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
httpres = httpreq.getresponse()
stock_csv = httpres.read()
lines = stock_csv.split("\n")
for line in lines:
r = line.split('","')
if len(r) == 16:
head = r[0].split("\"")
sid = head[1].strip(" ")
obj = {"volume": convint(r[2]), "open": convfloat(r[5]), "high": convfloat(r[6]), "low": convfloat(r[7]), "val": convfloat(r[8]), "date": dl_date.strftime("%Y-%m-%d"), "per": convfloat(r[15]), "buyQuantity": convint(r[12]), "buyPrice": convint(r[11]), "saleQuantity": convint(r[14]), "salePrice": convint(r[13])}
print sid
print obj
save2redis(sid, obj)
| Python | 0 | |
09d51aef1127b55fdc6bf595ca85285d9d7d64d1 | Add wrapwrite utility method | gignore/utils.py | gignore/utils.py | import sys
def wrapwrite(text):
"""
:type text: str
:rtype: str
"""
text = text.encode('utf-8')
try: # Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
| Python | 0.000001 | |
eebaea9bb57fa276207775612d669b5af61b3c86 | Add some work | gen.py | gen.py | #! /usr/bin/python3
import xml.etree.ElementTree as ET
from copy import deepcopy
def print_comment(s, multiline=False):
if not multiline:
print('/*', s.strip(), '*/')
return
print('/*')
for line in s.splitlines():
print(' *', line.strip())
print(' */')
def objc_case(name, first_capital=False):
res = name.replace('_', ' ').title().replace(' ', '')
if not first_capital:
res = res[0].lower() + res[1:]
return res
class Protocol:
def parse(node):
assert(node.tag == 'protocol')
res = Protocol()
res.name = node.attrib['name']
res.interfaces = []
for child in node:
if child.tag == 'copyright':
res.copyright = child.text
if child.tag == 'interface':
interface = Interface.parse(child)
res.interfaces.append(interface)
return res
def print(self):
print_comment(self.name + ' protocol')
if self.copyright:
print_comment(self.copyright, multiline=True)
for interface in self.interfaces:
interface.print()
print()
class Interface:
def parse(node):
assert(node.tag == 'interface')
res = Interface()
res.name = node.attrib['name']
res.version = node.attrib['version']
res.requests = []
res.events = []
for child in node:
if child.tag == 'description':
res.description = child.text
if child.tag == 'request':
request = Request.parse(child)
res.requests.append(request)
if child.tag == 'event':
event = Event.parse(child)
res.events.append(event)
return res
def objc_name(name):
if isinstance(name, Interface):
name = name.name
res = objc_case(name, first_capital=True)
for prefix in 'wl', 'xdg', 'zxdg':
if res.lower().startswith(prefix):
l = len(prefix)
res = prefix.upper() + res[l:]
return res
def print(self):
if self.description:
print_comment(self.description, multiline=True)
print('@interface', self.objc_name(), '{')
print('struct', self.name, '*rawHandle;')
print('}')
for request in self.requests:
request.print_decl()
for event in self.events:
event.print_decl()
print('@end')
class Request:
def parse(node):
assert(node.tag == 'request')
res = Request()
res.name = node.attrib['name']
res.args = []
for child in node:
if child.tag == 'description':
res.description = child.text
if child.tag == 'arg':
arg = Arg.parse(child)
res.args.append(arg)
# generate objc names and args
if not res.name.startswith('get'):
res.objc_name = objc_case(res.name)
else:
res.objc_name = objc_case(res.name[3:])
if not res.args:
res.new_id = None
res.objc_args = []
return res
if res.args[0].type == 'new_id':
res.new_id = res.args[0]
res.args = res.args[1:]
else:
res.new_id = None
res.objc_args = deepcopy(res.args)
for arg in res.objc_args:
arg.name = objc_case(arg.name)
if arg.type == 'object':
arg.type = Interface.objc_name(arg.interface) + ' *'
elif arg.type == 'string':
arg.type = 'NSString *'
elif arg.type == 'uint':
arg.type = 'uint32_t'
else:
# TODO: other types
pass
if res.args:
a0n = objc_case(res.args[0].name, first_capital=True)
if not res.objc_name.endswith(a0n):
res.objc_name += 'With' + a0n
return res
def print_decl(self):
if self.description:
print_comment(self.description, multiline=True)
return_type = 'void'
if self.new_id is not None:
return_type = Interface.objc_name(self.new_id.interface) + ' *'
print('- ({})'.format(return_type), end='')
if not self.objc_args:
print(' {};'.format(self.objc_name))
else:
print(end=' ')
self.objc_args[0].print(label=self.objc_name)
for arg in self.objc_args[1:]:
print(end=' ')
arg.print()
print(';')
class Event:
def parse(node):
assert(node.tag == 'event')
res = Event()
res.name = node.attrib['name']
res.args = []
for child in node:
if child.tag == 'description':
res.description = child.text
if child.tag == 'arg':
arg = Arg.parse(child)
res.args.append(arg)
return res
def print_decl(self):
if self.description:
print_comment(self.description, multiline=True)
print('- (TODO)', objc_case(self.name))
class Arg:
def parse(node):
assert(node.tag == 'arg')
res = Arg()
res.name = node.attrib['name']
res.type = node.attrib['type']
res.summary = node.attrib['summary']
if 'interface' in node.attrib:
res.interface = node.attrib['interface']
res.allow_null = False
if 'allow-null' in node.attrib:
res.allow_null = node.attrib['allow-null'] == 'true'
return res
def print(self, label=None):
if label is None:
label = self.name
print('{}: ({}) {}'.format(label, self.type, self.name), end='')
print_comment('Automatically generated by wl-objc bindings generator')
tree = ET.parse('/dev/stdin')
root = tree.getroot()
Protocol.parse(root).print()
| Python | 0.000109 | |
617a44bcce3e6e19383065f7fcab5b44ceb82714 | add logger | log.py | log.py | import logging
import sys
logger = logging.getLogger('micro-meta')
logger.setLevel(logging.DEBUG)
fh = logging.StreamHandler(sys.stdout)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.debug('test')
logger.info('test') | Python | 0.000026 | |
a38b313799b7c0cdc25ff68161c2b2890db8e16d | Create log.py | log.py | log.py | import glob
import pandas as pd
logs = [log for log in glob.glob("*.log")]
dataset = {"id": [],
"pos": [],
"affinity (kcal/mol)": [],
"rmsd l.b.": [],
"rmsd u.b.": []}
for log in logs:
with open(log) as dock:
for line in dock.readlines():
if line.startswith(" ") and line.split()[0] != "|":
dataset["id"].append(log[:-4])
dataset["pos"].append(line.split()[0])
dataset["affinity (kcal/mol)"].append(line.split()[1])
dataset["rmsd l.b."].append(line.split()[2])
dataset["rmsd u.b."].append(line.split()[3])
dataframe = pd.DataFrame(data=dataset)
dataframe.to_csv("docks.csv")
| Python | 0.000002 | |
7263d7546aec62834fa19f20854522eba4916159 | add simple http server | run.py | run.py | import sys
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever() | Python | 0 | |
e6d420296b3f2234382bdcdf1122abc59af148ed | add plot function for classification | mousestyles/visualization/plot_classification.py | mousestyles/visualization/plot_classification.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def plot_performance(model):
"""
Plots the performance of classification model.It
is a side-by-side barplot. For each strain, it plots
the precision, recall and F-1 measure.
Parameters
----------
model: string
The model used to classify the strain
Returns
-------
None
"""
if model is 'SVM':
result = pd.DataFrame(np.load('SVM_result.npy'))
elif model is 'RF':
result = pd.DataFrame(np.load('RF_result.npy'))
elif model is 'GB':
result = pd.DataFrame(np.load('GB_result.npy'))
N = 16
ind = np.arange(N) # the x locations for the groups
width = 0.2
fig = plt.figure()
ax = fig.add_subplot(111)
precision = result.iloc[:, 0]
rects1 = ax.bar(ind, precision, width, color='Coral')
recall = result.iloc[:, 1]
rects2 = ax.bar(ind+width, recall, width, color='LightSeaGreen')
f1 = result.iloc[:, 2]
rects3 = ax.bar(ind+width*2, f1, width, color='DodgerBlue')
ax.set_ylabel('Accuracy')
ax.set_xlabel('Strains')
ax.set_xticks(ind+width)
ax.set_xticklabels(range(16))
ax.legend((rects1[0], rects2[0], rects3[0]), ('precision', 'recall', 'F1'))
plt.show()
return()
| Python | 0.000009 | |
a1ec669f4c494709dc9b8f3e47ff4f84b189b2e9 | add get_workflow_name.py | .circleci/get_workflow_name.py | .circleci/get_workflow_name.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Get workflow name for the current build using CircleCI API.
Would be great if this information is available in one of
CircleCI environment variables, but it's not there.
https://circleci.ideas.aha.io/ideas/CCI-I-295
"""
import json
import os
import sys
import urllib2
def main():
try:
username = os.environ['CIRCLE_PROJECT_USERNAME']
reponame = os.environ['CIRCLE_PROJECT_REPONAME']
build_num = os.environ['CIRCLE_BUILD_NUM']
except:
sys.stderr.write(
'Looks like we are not inside CircleCI container. Exiting...\n')
return 1
try:
request = urllib2.Request(
"https://circleci.com/api/v1.1/project/github/%s/%s/%s" %
(username, reponame, build_num),
headers={"Accept": "application/json"})
contents = urllib2.urlopen(request).read()
except:
sys.stderr.write('Cannot query CircleCI API. Exiting...\n')
return 1
try:
build_info = json.loads(contents)
except:
sys.stderr.write(
'Cannot parse JSON received from CircleCI API. Exiting...\n')
return 1
try:
workflow_name = build_info['workflows']['workflow_name']
except:
sys.stderr.write(
'Cannot get workflow name from CircleCI build info. Exiting...\n')
return 1
print workflow_name
return 0
retval = main()
exit(retval)
| Python | 0.000002 | |
3abf7e60d3bd028f86cb6aa2e1e1f3d4fff95353 | Create BinaryTreeMaxPathSum_001.py | leetcode/124-Binary-Tree-Maximum-Path-Sum/BinaryTreeMaxPathSum_001.py | leetcode/124-Binary-Tree-Maximum-Path-Sum/BinaryTreeMaxPathSum_001.py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
totMax, branchMax = self.maxBranchandPathSum(root)
return totMax
def maxBranchandPathSum(self, root):
if root is None:
return 0, 0
l, r = root.left, root.right
if l is None and r is None:
return root.val, root.val
lTotMax, lBranchMax = self.maxBranchandPathSum(root.left)
rTotMax, rBranchMax = self.maxBranchandPathSum(root.right)
lRootBranchMax = root.val + max(lBranchMax, 0)
rRootBranchMax = root.val + max(rBranchMax, 0)
if l is None:
rootTotMax = max(rTotMax, rRootBranchMax)
return rootTotMax, rRootBranchMax
if r is None:
rootTotMax = max(lTotMax, lRootBranchMax)
return rootTotMax, lRootBranchMax
rootTreeMax = root.val + max(lBranchMax, 0) + max(rBranchMax, 0)
rootTotMax = max(lTotMax, rTotMax, rootTreeMax)
rootBranchMax = max(lRootBranchMax, rRootBranchMax)
return rootTotMax, rootBranchMax
| Python | 0.000023 | |
59fd4bf04c8c89cdc87673de94788c5d34d4e5fe | Create Goldbach.py | Goldbach.py | Goldbach.py | import math
#Funcion para saber si un numero es 3 o no
def es_primo(a):
contador = 0
verificar= False
for i in range(1,a+1):
if (a% i)==0:
contador = contador + 1
if contador >= 3:
verificar=True
break
if contador==2 or verificar==False:
return True
return False
#Creacion de un lista con todos los primos del 1 al 10,000
def lista_primos():
primos = list();
for i in range (1,10000):
if es_primo(i):
primos.append(i)
return primos
def lista_impares():
impares = list();
for i in range (1,10000,2):
if not es_primo(i):
impares.append(i)
primos = lista_primos()
impares = lista_impares()
a = 0
impar = 1
for primo in primos:
for n in range (1,10000):
a = primo + (2)*(math.pow(n,2))
if a != impar and n > 500000:
print (impar)
impar += 2
| Python | 0 | |
e33774beb2f2b1264f654605294f0ad837fa7e8b | Add message_link function | utils/backends.py | utils/backends.py | """
Handle backend specific implementations.
"""
def message_link(bot, msg):
"""
:param bot: Plugin instance.
:param msg: Message object.
:returns: Message link.
"""
backend = bot.bot_config.BACKEND.lower()
if backend == 'gitter':
return 'https://gitter.im/{uri}?at={idd}'.format(msg.frm.room.uri,
msg.extras['id'])
elif backend == 'slack':
return msg.extras['url']
elif backend == 'telegram':
return ''
elif backend == 'text':
return ''
else:
raise NotImplementedError
| Python | 0.000001 | |
f509d556cc4a20b55be52f505fcee200c5d44ef2 | add rehex util | scripts/rehex.py | scripts/rehex.py | import simplejson
import binascii
import sys
import pdb
from pprint import pprint
import sys, os
sys.path.append( os.path.join( os.path.dirname(__file__), '..' ) )
sys.path.append( os.path.join( os.path.dirname(__file__), '..', 'lib' ) )
import dashlib
# ============================================================================
usage = "%s <hex>" % sys.argv[0]
obj = None
if len(sys.argv) < 2:
print(usage)
sys.exit(2)
else:
obj = dashlib.deserialise(sys.argv[1])
pdb.set_trace()
1
| Python | 0.000001 | |
ab67a0c86a473c5d30da2c127d661b2f91483d22 | add missing files for quota | nova/tests/quota_unittest.py | nova/tests/quota_unittest.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nova import db
from nova import flags
from nova import quota
from nova import test
from nova import utils
from nova.auth import manager
from nova.endpoint import cloud
from nova.endpoint import api
FLAGS = flags.FLAGS
class QuotaTestCase(test.TrialTestCase):
def setUp(self): # pylint: disable-msg=C0103
logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
quota_gigabytes=20,
quota_floating_ips=2)
self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
self.context = api.APIRequestContext(handler=None,
project=self.project,
user=self.user)
def tearDown(self): # pylint: disable-msg=C0103
manager.AuthManager().delete_project(self.project)
manager.AuthManager().delete_user(self.user)
super(QuotaTestCase, self).tearDown()
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.large'
inst['vcpus'] = cores
inst['mac_address'] = utils.generate_mac()
return db.instance_create(self.context, inst)
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
vol['user_id'] = self.user.id
vol['project_id'] = self.project.id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 10)
db.quota_destroy(self.context, self.project.id)
def test_too_many_instances(self):
instance_ids = []
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
self.assertFailure(self.cloud.run_instances(self.context,
min_count=1,
max_count=1,
instance_type='m1.small'),
cloud.QuotaError)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
def test_too_many_cores(self):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
self.assertFailure(self.cloud.run_instances(self.context,
min_count=1,
max_count=1,
instance_type='m1.small'),
cloud.QuotaError)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(cloud.QuotaError,
self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
| Python | 0.000001 | |
cf881dd1ba8c98dd116f2269bf0cfd38f14a7b40 | add a reel OVSFtree | OVSFTree.py | OVSFTree.py | import math
numberOfMobile=512
class Node:
def __init__(self, val):
self.l = None
self.r = None
self.v = val
class Tree:
def __init__(self):
self.root = None
self.root=Node(1)
thislevel = [self.root]
for i in range(0,math.ceil(math.log(numberOfMobile,2))):
nextlevel=[]
xornumber=pow(2,pow(2,i))-1
for n in thislevel:
codesize=n.v.bit_length()
n.l=Node((n.v<<codesize)+n.v)
n.r=Node((n.v<<codesize)+(n.v^xornumber))
nextlevel.append(n.l)
nextlevel.append(n.r)
thislevel=nextlevel
def getRoot(self):
return self.root
def deleteTree(self):
# garbage collector will do this for us.
self.root = None
def traverse(self):
thislevel = [self.root]
while thislevel:
nextlevel = []
for n in thislevel:
print( str(bin(n.v)), end=" ")
if n.l: nextlevel.append(n.l)
if n.r: nextlevel.append(n.r)
print(" ")
thislevel = nextlevel
tree1=Tree()
tree1.traverse()
| Python | 0.000001 | |
632c4dffe8a217ca07410d0a353455a4c6142d39 | Solve problem 29 | problem029.py | problem029.py | #!/usr/bin/env python3
print(len(set(a**b for a in range(2, 101) for b in range(2, 101))))
| Python | 0.99998 | |
b1815075ac1a1697c99a6293c8cc7719060ab9b2 | Add cpuspeed sensor | homeassistant/components/sensor/cpuspeed.py | homeassistant/components/sensor/cpuspeed.py | """
homeassistant.components.sensor.cpuspeed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Shows the current CPU speed.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.cpuspeed.html
"""
import logging
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['py-cpuinfo==0.1.6']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "CPU speed"
ATTR_VENDOR = 'Vendor ID'
ATTR_BRAND = 'Brand'
ATTR_HZ = 'GHz Advertised'
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the CPU speed sensor. """
try:
import cpuinfo # noqa
except ImportError:
_LOGGER.exception(
"Unable to import cpuinfo. "
"Did you maybe not install the 'py-cpuinfo' package?")
return False
add_devices([CpuSpeedSensor(config.get('name', DEFAULT_NAME))])
class CpuSpeedSensor(Entity):
""" A CPU info sensor. """
def __init__(self, name):
self._name = name
self._state = None
self._unit_of_measurement = 'GHz'
self.update()
@property
def name(self):
return self._name
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
@property
def state_attributes(self):
""" Returns the state attributes. """
if self.info is not None:
return {
ATTR_VENDOR: self.info['vendor_id'],
ATTR_BRAND: self.info['brand'],
ATTR_HZ: round(self.info['hz_advertised_raw'][0]/10**9, 2)
}
def update(self):
""" Gets the latest data and updates the state. """
from cpuinfo import cpuinfo
self.info = cpuinfo.get_cpu_info()
self._state = round(float(self.info['hz_actual_raw'][0])/10**9, 2)
| Python | 0.000001 | |
43605bd5340374a3a62e91cf544b2ba16edb320e | Add a tf-serving example for KerasBERT. | official/nlp/bert/serving.py | official/nlp/bert/serving.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Examples of SavedModel export for tf-serving."""
from absl import app
from absl import flags
import tensorflow as tf
from official.nlp.bert import bert_models
from official.nlp.bert import configs
flags.DEFINE_integer("sequence_length", None,
"Sequence length to parse the tf.Example. If "
"sequence_length > 0, add a signature for serialized "
"tf.Example and define the parsing specification by the "
"sequence_length.")
flags.DEFINE_string("bert_config_file", None,
"Bert configuration file to define core bert layers.")
flags.DEFINE_string("model_checkpoint_path", None,
"File path to TF model checkpoint.")
flags.DEFINE_string("export_path", None,
"Destination folder to export the serving SavedModel.")
FLAGS = flags.FLAGS
class BertServing(tf.keras.Model):
"""Bert transformer encoder model for serving."""
def __init__(self, bert_config, name_to_features=None, name="serving_model"):
super(BertServing, self).__init__(name=name)
self.encoder = bert_models.get_transformer_encoder(
bert_config, sequence_length=None)
self.name_to_features = name_to_features
def call(self, inputs):
input_word_ids = inputs["input_ids"]
input_mask = inputs["input_mask"]
input_type_ids = inputs["segment_ids"]
encoder_outputs, _ = self.encoder(
[input_word_ids, input_mask, input_type_ids])
return encoder_outputs
def serve_body(self, input_ids, input_mask=None, segment_ids=None):
if segment_ids is None:
# Requires CLS token is the first token of inputs.
segment_ids = tf.zeros_like(input_ids)
if input_mask is None:
# The mask has 1 for real tokens and 0 for padding tokens.
input_mask = tf.where(
tf.equal(input_ids, 0), tf.zeros_like(input_ids),
tf.ones_like(input_ids))
inputs = dict(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
return self.call(inputs)
@tf.function
def serve(self, input_ids, input_mask=None, segment_ids=None):
outputs = self.serve_body(input_ids, input_mask, segment_ids)
# Returns a dictionary to control SignatureDef output signature.
return {"outputs": outputs[-1]}
@tf.function
def serve_examples(self, inputs):
features = tf.io.parse_example(inputs, self.name_to_features)
for key in list(features.keys()):
t = features[key]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
features[key] = t
return self.serve(
features["input_ids"],
input_mask=features["input_mask"] if "input_mask" in features else None,
segment_ids=features["segment_ids"]
if "segment_ids" in features else None)
@classmethod
def export(cls, model, export_dir):
if not isinstance(model, cls):
raise ValueError("Invalid model instance: %s, it should be a %s" %
(model, cls))
signatures = {
"serving_default":
model.serve.get_concrete_function(
input_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="inputs")),
}
if model.name_to_features:
signatures[
"serving_examples"] = model.serve_examples.get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name="examples"))
tf.saved_model.save(model, export_dir=export_dir, signatures=signatures)
def main(_):
sequence_length = FLAGS.sequence_length
if sequence_length is not None and sequence_length > 0:
name_to_features = {
"input_ids": tf.io.FixedLenFeature([sequence_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([sequence_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([sequence_length], tf.int64),
}
else:
name_to_features = None
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
serving_model = BertServing(
bert_config=bert_config, name_to_features=name_to_features)
checkpoint = tf.train.Checkpoint(model=serving_model.encoder)
checkpoint.restore(FLAGS.model_checkpoint_path
).assert_existing_objects_matched().run_restore_ops()
BertServing.export(serving_model, FLAGS.export_path)
if __name__ == "__main__":
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("model_checkpoint_path")
flags.mark_flag_as_required("export_path")
app.run(main)
| Python | 0.999998 | |
abd41ea78f2962f0b8b7166f0540727538d56471 | ajoute la state get_object | sara_flexbe_states/src/sara_flexbe_states/Wonderland_Get_Object.py | sara_flexbe_states/src/sara_flexbe_states/Wonderland_Get_Object.py | #!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import requests
import json
from geometry_msgs.msg import Pose, Point
from tf.transformations import quaternion_from_euler
class GetObject(EventState):
'''
Get an
># id int id of the object
># name string name of the object
># color string color of the object
># room string room of the object
># type string category of the object
># expected_pose pose/point expected position of the object
#> object_pose pose the pose of the returned object
#> object_name string name of the object
#> object_color string color of the object
#> object_room string room of the object
#> object_type string category of the object
<= found object found
<= unknown the object is unknown
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(GetObject, self).__init__(outcomes=['found', 'unknown', 'error'],
input_keys=['id', 'name', 'color', 'room', 'type', 'robot_pose'],
output_keys=['id', 'object_pose', 'object_name', 'object_color', 'object_room', 'expected_pose'])
self._index = 0
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/object/?"
if userdata.id != None:
url += "?id="+userdata.id+"&"
if userdata.name != None:
url += "?name="+userdata.name+"&"
if userdata.name != None:
url += "?color="+userdata.color+"&"
if userdata.name != None:
url += "?room="+userdata.room+"&"
if userdata.name != None:
url += "?type="+userdata.type+"&"
if userdata.expected_pose == None:
Logger.logerr("in "+self.name+", you must give an expected pose or point")
return 'error'
if type(userdata.expected_pose) is Pose:
expX = userdata.expected_pose.position.x
expY = userdata.expected_pose.position.y
expZ = userdata.expected_pose.position.z
elif type(userdata.expected_pose) is Point:
expX = userdata.expected_pose.position.x
expY = userdata.expected_pose.position.y
expZ = userdata.expected_pose.position.z
else:
return 'error'
# try the request
try:
response = requests.get(url, headers=self._header)
except requests.exceptions.RequestException as e:
Logger.logerr(str(e))
return 'error'
# parse parameter json data
data = json.loads(response.content)
if len(data) == 0:
return 'unknown'
# find the nearest object
bestScore = 1000000
best = None
for d in data:
score = ((expX-d['object']['x_position'])**2+(expY-d['object']['y_position'])**2+(expZ-d['object']['z_position'])**2)**0.5
if score < bestScore:
bestScore = score
best = d
# generate the output pose
pose = Pose()
pose.position.x = best['object']['x']
pose.position.y = best['object']['y']
pose.position.z = best['object']['z']
quat = quaternion_from_euler(0, 0, best['object']['t'])
pose.orientation.x = quat[0]
pose.orientation.y = quat[1]
pose.orientation.z = quat[2]
pose.orientation.w = quat[3]
# send the outputs
userdata.object_id = best['object']['id']
userdata.object_pose = pose
userdata.object_name = best['object']['name']
userdata.object_color = best['object']['color']
userdata.object_type = best['object']['type']
userdata.object_category = best['object']['category']
return 'found'
| Python | 0.000001 | |
272031cfbef13a5a3edbf3cf3c6fe5f00608d650 | add test for importcuedmembers command | edpcmentoring/cuedmembers/tests/test_managementcommands.py | edpcmentoring/cuedmembers/tests/test_managementcommands.py | import os
import shutil
import tempfile
from django.core.management import call_command
from django.test import TestCase
from ..models import Member
class TemporaryDirectoryTestCase(TestCase):
"""A TestCase which creates a temporary directory for each test whose path
is available as the "tmpdir" attribute.
"""
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
class ImportCUEDMembersTestCase(TemporaryDirectoryTestCase):
def test_csv_import(self):
self.assertEqual(Member.objects.active().count(), 0)
inpath = os.path.join(self.tmpdir, 'input.csv')
with open(inpath, 'w') as f:
f.write(MEMBERS_CSV_1)
call_command('importcuedmembers', inpath)
self.assertEqual(Member.objects.active().count(), 6)
def test_import_deactivates_members(self):
self.assertEqual(Member.objects.active().count(), 0)
inpath = os.path.join(self.tmpdir, 'input.csv')
with open(inpath, 'w') as f:
f.write(MEMBERS_CSV_1)
call_command('importcuedmembers', inpath)
self.assertEqual(Member.objects.active().count(), 6)
inpath = os.path.join(self.tmpdir, 'input.csv')
with open(inpath, 'w') as f:
f.write(MEMBERS_CSV_2)
call_command('importcuedmembers', inpath)
self.assertEqual(Member.objects.active().count(), 5)
self.assertEqual(Member.objects.all().count(), 7)
def test_email_domain(self):
self.assertEqual(Member.objects.active().count(), 0)
inpath = os.path.join(self.tmpdir, 'input.csv')
with open(inpath, 'w') as f:
f.write(MEMBERS_CSV_1)
call_command('importcuedmembers', '-e', 'mailinator.com', inpath)
self.assertEqual(Member.objects.active().count(), 6)
u1 = Member.objects.filter(user__username='test0001').first().user
self.assertEqual(u1.email, 'test0001@mailinator.com')
# Two CSV files with different sets of users
MEMBERS_CSV_1 = '''
crsid,status,surname,fnames,pref_name,room,phone,arrived,start_date,end_date,division,role_course,host_supervisor,research_group
test0001,,Klein,Alexandra Corrina,Alexandra,,,,,,C,,,Materials Engineering
test0002,,Herman,Verna Ibrahim Fletcher,Verna,,,,,,,,,
test0004,,Kihn,Clementine,Clementine,,,,,,C,,,Engineering Design
test0005,,Lindgren,Eric,Eric,,,,,,A,,,Turbomachinery
test0006,,Torphy,Shirleyann Arden Minerva,Minerva,,,,,,,,,
test0008,,Kling,Jorden,Jorden,,,,,,A,,,Turbomachinery
'''.strip()
MEMBERS_CSV_2 = '''
crsid,status,surname,fnames,pref_name,room,phone,arrived,start_date,end_date,division,role_course,host_supervisor,research_group
test0001,,Klein,Alexandra Corrina,Alexandra,,,,,,C,,,Materials Engineering
test0003,,Emmerich,Pleasant,Pleasant,,,,,,A,,,Turbomachinery
test0004,,Kihn,Clementine,Clementine,,,,,,C,,,Engineering Design
test0006,,Torphy,Shirleyann Arden Minerva,Minerva,,,,,,,,,
test0008,,Kling,Jorden,Jorden,,,,,,A,,,Turbomachinery
'''.strip()
| Python | 0.000001 | |
0ba15652a5624cf8fa42f4caf603d84c09a0698b | Add kata: 6 kyu | 6_kyu/Decode_the_Morse_code.py | 6_kyu/Decode_the_Morse_code.py | # @see: https://www.codewars.com/kata/decode-the-morse-code
def decodeMorse(morseCode):
return ' '.join(
map(lambda m_word: ''.join(
map(lambda m_symbol: MORSE_CODE[m_symbol],
m_word.split())),
morseCode.strip().split(' ')))
| Python | 0.999999 | |
6837bbf2a1816d97b6c517bcb244aa51cf1eb7ba | Create robots_txt.py | robots_txt.py | robots_txt.py | import urlib.request
import io
def ger_robots_txt(url):
if url.endswith('/')
path = url
else:
path - url + '/'
# https://reddit.com/
| Python | 0.000064 | |
b36192eec53664f9178bfc4000d89b8ca9be1544 | Add merge migration | osf/migrations/0030_merge.py | osf/migrations/0030_merge.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-24 18:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0029_merge'),
('osf', '0029_externalaccount_date_last_refreshed'),
]
operations = [
]
| Python | 0 | |
6abf2f993813142ea685bc48a7a5a266d1905f1a | build indices qsub for bowtie or star with rsem | rsem_build.py | rsem_build.py | #/usr/bin/env python
import commands
import os
from subprocess import call
def write_file(filename, contents):
"""Write the given contents to a text file.
ARGUMENTS
filename (string) - name of the file to write to, creating if it doesn't exist
contents (string) - contents of the file to be written
"""
# Open the file for writing
file = open(filename, 'w')
# Write the file contents
file.write(contents)
# Close the file
file.close()
return
def qsub_submit(command_filename, hold_jobid = None, name = None):
"""Submit the given command filename to the queue.
ARGUMENTS
command_filename (string) - the name of the command file to submit
OPTIONAL ARGUMENTS
hold_jobid (int) - job id to hold on as a prerequisite for execution
RETURNS
jobid (integer) - the jobid
"""
# Form command
command = 'qsub'
if name: command += ' -N %s' % name
if hold_jobid: command += ' -hold_jid %d' % hold_jobid
command += ' %s' % command_filename
# Submit the job and capture output.
import subprocess
print "> " + command
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
out, err = process.communicate()
print(out)
# Match job id
jobid = out.split(' ')[2]
return int(jobid)
out= '${TMPDIR}'
call_files = ''
path_gtf= '/netapp/home/idriver/Mus_musculus.GRCm38.83.gtf'
path_fa='/netapp/home/idriver/Mus_musculus.GRCm38.dna.primary_assembly.fa'
command = 'rsem-prepare-reference --gtf '+path_gtf+' --bowtie2 -p 6 '+path_fa+' GRCm38'
print command
call('mkdir -p /netapp/home/idriver/rsem_m38', shell=True)
contents = """\
#!/bin/sh
#$ -l arch=linux-x64
#$ -S /bin/bash
#$ -o /netapp/home/idriver/rsem_m38
#$ -e /netapp/home/idriver/error_spc
#$ -cwd
#$ -r y
#$ -j y
#$ -l netapp=40G,scratch=100G,mem_total=100G
#$ -pe smp 6
#$ -R yes
#$ -l h_rt=7:59:00
set echo on
date
hostname
pwd
export PATH=$PATH:${HOME}/bin
PATH=$PATH:/netapp/home/idriver/bin/cufflinks-2.2.1.Linux_x86_64
PATH=$PATH:/netapp/home/idriver/bin/bowtie2-2.2.8
PATH=$PATH:/netapp/home/idriver/bin/samtools-1.2
PATH=$PATH:/netapp/home/idriver/bin/tophat-2.1.0.Linux_x86_64
PATH=$PATH:/netapp/home/idriver/bin/RSEM-1.2.28
PATH=$PATH:/netapp/home/idriver/bin/STAR-STAR_2.4.2a/source
PATH=$PATH:/usr/bin/gunzip
export PATH
alias STAR="/netapp/home/idriver/bin/STAR-STAR_2.4.2a/source/STAR"
echo $PATH
export TMPDIR=/scratch
echo $TMPDIR
cd $TMPDIR
mkdir $TMPDIR/rsem_m38
cd rsem_m38
%(command)s
# Copy the results back to the project directory:
cd $TMPDIR
cp -r rsem_m38 /netapp/home/idriver/rsem_m38
rm -r rsem_m38
date
""" % vars()
filename = 'rsem_build.sh'
write_file(filename, contents)
jobid = qsub_submit(filename, name = 'rsem_build')
print "Submitted. jobid = %d" % jobid
# Write jobid to a file.
import subprocess
process = subprocess.Popen('echo %d > jobids' % jobid, stdout=subprocess.PIPE, shell = True)
out, err = process.communicate()
print(out)
| Python | 0 | |
db41bce3d90cfada9916baa8f9267cd9e6160a94 | Add an example for opening a file. | examples/open_file.py | examples/open_file.py | import numpy as np
import pyh5md
f = pyh5md.H5MD_File('poc.h5', 'r')
at = f.trajectory('atoms')
at_pos = at.data('position')
r = at_pos.v.value
print r
f.f.close()
| Python | 0 | |
2cd57876c72d5c941bcb1ae497df48dbbc943ba9 | Create new package. (#6213) | var/spack/repos/builtin/packages/r-forecast/package.py | var/spack/repos/builtin/packages/r-forecast/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RForecast(RPackage):
"""Methods and tools for displaying and analysing univariate time
series forecasts including exponential smoothing via state space
models and automatic ARIMA modelling."""
homepage = "https://cran.r-project.org/package=forecast"
url = "https://cran.r-project.org/src/contrib/forecast_8.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/forecast"
version('8.2', '3ef095258984364c100b771b3c90d15e')
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-colorspace', type=('build', 'run'))
depends_on('r-nnet', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-fracdiff', type=('build', 'run'))
depends_on('r-tseries', type=('build', 'run'))
depends_on('r-lmtest', type=('build', 'run'))
depends_on('r-zoo', type=('build', 'run'))
depends_on('r-timedate', type=('build', 'run'))
depends_on('r-rcpparmadillo', type=('build', 'run'))
| Python | 0 | |
c192042bbaf9060ce3a76f1cbabc5f380fa4bfd6 | Adding uppercase character | src/Character.py | src/Character.py | import pygame
from sprite import *
from constants import *
class Character:
def __init__(self):
self.sprite = Sprite('../resources/char.png')
self.health = 10
self.speed = 40
def move(self, direction):
#pos = self.sprite.getPosition()
charWidth = self.sprite.rect.width
charHeight = self.sprite.rect.height
curX = self.sprite.rect.x
curY = self.sprite.rect.y
newX = curX + (direction[0] * self.speed)
newY = curY + (direction[1] * self.speed)
# Detect if near any edges/walls
if newX <= 0:
newX = 0
elif newX >= WIDTH - charWidth:
newX = WIDTH - charWidth
if newY <= 0:
newY = 0
elif newY >= HEIGHT - charHeight:
newY = HEIGHT - charHeight
self.sprite.set_position(newX, newY)
def draw(self, window_surface):
self.sprite.draw(window_surface)
| Python | 0.999723 | |
10eb703867fd10df543a141837c2a57d1052ba2c | Rename file with correct pattern | ideascube/conf/kb_civ_babylab.py | ideascube/conf/kb_civ_babylab.py | # -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
| Python | 0.000001 | |
f31fcd789254f95b311f4fa4009a04ad919c2027 | add url update migration | accelerator/migrations/0049_update_fluent_redirect_url.py | accelerator/migrations/0049_update_fluent_redirect_url.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-11 11:35
from __future__ import unicode_literals
from django.db import migrations
from accelerator.sitetree_navigation.sub_navigation import (
create_directory_subnav,
create_events_subnav,
create_home_subnav,
create_judging_subnav,
create_resources_subnav,
create_startup_dashboard_subnav,
delete_directory_subnav,
delete_events_subnav,
delete_home_subnav,
delete_judging_subnav,
delete_resources_subnav,
delete_startup_dashboard_subnav
)
def create_subnav_trees_and_items(apps, schema_editor):
create_directory_subnav()
create_events_subnav()
create_home_subnav()
create_judging_subnav()
create_resources_subnav()
create_startup_dashboard_subnav()
def delete_subnav_trees_and_items(apps, schema_editor):
delete_directory_subnav()
delete_events_subnav()
delete_home_subnav()
delete_judging_subnav()
delete_resources_subnav()
delete_startup_dashboard_subnav()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0048_create_sub_navigation_objects'),
]
operations = [
migrations.RunPython(
create_subnav_trees_and_items,
delete_subnav_trees_and_items),
]
| Python | 0.000001 | |
f21ff91cb3c70a150eca68dc03c51577ff343f18 | Solve challenge 8 | Challenges/chall_8.py | Challenges/chall_8.py | #!/Applications/anaconda/envs/Python3/bin
# Python challenge - 8
# http://www.pythonchallenge.com/pc/def/integrity.html
# http://www.pythonchallenge.com/pc/return/good.html
import bz2
def main():
'''
Hint: Where is the missing link?
<area shape="poly" coords="179,284,214,311,255,320,281,226,319,224,363,309,339,222,371,225,411,229,404,242,415,252,428,233,428,214,394,207,383,205,390,195,423,192,439,193,442,209,440,215,450,221,457,226,469,202,475,187,494,188,494,169,498,147,491,121,477,136,481,96,471,94,458,98,444,91,420,87,405,92,391,88,376,82,350,79,330,82,314,85,305,90,299,96,290,103,276,110,262,114,225,123,212,125,185,133,138,144,118,160,97,168,87,176,110,180,145,176,153,176,150,182,137,190,126,194,121,198,126,203,151,205,160,195,168,217,169,234,170,260,174,282" href="../return/good.html">
un:
'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw:
'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
'''
username_comp = b'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw_comp = b'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
un = bz2.decompress(username_comp)
pw = bz2.decompress(pw_comp)
print(un)
print(pw)
return 0
# Keywords: huge; file
if __name__ == '__main__':
main()
| Python | 0.004408 | |
96ba88c74a77f3b71ef4a8b51c29013d16e23973 | Create tofu/plugins/MISTRAL/Inputs with empty __init__.py | tofu/plugins/MISTRAL/Inputs/__init__.py | tofu/plugins/MISTRAL/Inputs/__init__.py | Python | 0.000017 | ||
87a717b76ccc4ba36c394fafa4a8bc89ef6dca4b | Add 'series' tests | git_pw/tests/test_series.py | git_pw/tests/test_series.py | import unittest
from click.testing import CliRunner as CLIRunner
import mock
from git_pw import series
@mock.patch('git_pw.api.detail')
@mock.patch('git_pw.api.download')
@mock.patch('git_pw.utils.git_am')
class ApplyTestCase(unittest.TestCase):
def test_apply_without_args(self, mock_git_am, mock_download, mock_detail):
"""Validate calling with no arguments."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_detail.return_value = rsp
mock_download.return_value = 'test.patch'
runner = CLIRunner()
result = runner.invoke(series.apply_cmd, ['123'])
assert result.exit_code == 0, result
mock_detail.assert_called_once_with('series', 123)
mock_download.assert_called_once_with(rsp['mbox'])
mock_git_am.assert_called_once_with(mock_download.return_value, ())
def test_apply_with_args(self, mock_git_am, mock_download, mock_detail):
"""Validate passthrough of arbitrary arguments to git-am."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_detail.return_value = rsp
mock_download.return_value = 'test.patch'
runner = CLIRunner()
result = runner.invoke(series.apply_cmd, ['123', '--', '-3'])
assert result.exit_code == 0, result
mock_detail.assert_called_once_with('series', 123)
mock_download.assert_called_once_with(rsp['mbox'])
mock_git_am.assert_called_once_with(mock_download.return_value,
('-3',))
@mock.patch('git_pw.api.detail')
@mock.patch('git_pw.api.download')
@mock.patch('git_pw.api.get')
class DownloadTestCase(unittest.TestCase):
def test_download(self, mock_get, mock_download, mock_detail):
"""Validate standard behavior."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_detail.return_value = rsp
mock_download.return_value = 'test.patch'
runner = CLIRunner()
result = runner.invoke(series.download_cmd, ['123'])
assert result.exit_code == 0, result
mock_detail.assert_called_once_with('series', 123)
mock_download.assert_called_once_with(rsp['mbox'])
mock_get.assert_not_called()
def test_download_to_file(self, mock_get, mock_download, mock_detail):
"""Validate downloading to a file."""
class MockResponse(object):
@property
def text(self):
return b'alpha-beta'
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_detail.return_value = rsp
mock_get.return_value = MockResponse()
runner = CLIRunner()
with runner.isolated_filesystem():
result = runner.invoke(series.download_cmd, ['123', 'test.patch'])
assert result.exit_code == 0, result
with open('test.patch') as output:
assert ['alpha-beta'] == output.readlines()
mock_detail.assert_called_once_with('series', 123)
mock_get.assert_called_once_with(rsp['mbox'])
mock_download.assert_not_called()
class ShowTestCase(unittest.TestCase):
@staticmethod
def _get_series(**kwargs):
rsp = {
'id': 123,
'date': '2017-01-01 00:00:00',
'name': 'Sample series',
'submitter': {
'name': 'foo',
'email': 'foo@bar.com',
},
'project': {
'name': 'bar',
},
'version': '1',
'total': 2,
'received_total': 2,
'received_all': True,
'cover_letter': None,
'patches': [],
}
rsp.update(**kwargs)
return rsp
@mock.patch('git_pw.api.detail')
def test_show(self, mock_detail):
"""Validate standard behavior."""
rsp = self._get_series()
mock_detail.return_value = rsp
runner = CLIRunner()
result = runner.invoke(series.show_cmd, ['123'])
assert result.exit_code == 0, result
mock_detail.assert_called_once_with('series', 123)
@mock.patch('git_pw.utils.echo_via_pager', new=mock.Mock)
@mock.patch('git_pw.api.version', return_value=(1, 0))
@mock.patch('git_pw.api.index')
class ListTestCase(unittest.TestCase):
@staticmethod
def _get_series(**kwargs):
return ShowTestCase._get_series(**kwargs)
@staticmethod
def _get_people(**kwargs):
rsp = {
'id': 1,
'name': 'John Doe',
'email': 'john@example.com',
}
rsp.update(**kwargs)
return rsp
def test_list(self, mock_index, mock_version):
"""Validate standard behavior."""
rsp = [self._get_series()]
mock_index.return_value = rsp
runner = CLIRunner()
result = runner.invoke(series.list_cmd, [])
assert result.exit_code == 0, result
mock_index.assert_called_once_with('series', [
('q', None), ('page', None), ('per_page', None),
('order', '-date')])
def test_list_with_filters(self, mock_index, mock_version):
"""Validate behavior with filters applied.
Apply all filters, including those for pagination.
"""
people_rsp = [self._get_people()]
series_rsp = [self._get_series()]
mock_index.side_effect = [people_rsp, series_rsp]
runner = CLIRunner()
result = runner.invoke(series.list_cmd, [
'--submitter', 'john@example.com', '--limit', 1, '--page', 1,
'--sort', '-name', 'test'])
assert result.exit_code == 0, result
calls = [
mock.call('people', [('q', 'john@example.com')]),
mock.call('series', [
('submitter', 1), ('q', 'test'), ('page', 1), ('per_page', 1),
('order', '-name')])]
mock_index.assert_has_calls(calls)
@mock.patch('git_pw.series.LOG')
def test_list_with_invalid_filters(self, mock_log, mock_index,
mock_version):
"""Validate behavior with filters applied.
Try to filter against a sumbmitter filter that's too broad. This should
error out saying that too many possible submitters were found.
"""
people_rsp = [self._get_people(), self._get_people()]
series_rsp = [self._get_series()]
mock_index.side_effect = [people_rsp, series_rsp]
runner = CLIRunner()
result = runner.invoke(series.list_cmd, ['--submitter',
'john@example.com'])
assert result.exit_code == 1, result
assert mock_log.error.called
| Python | 0.000014 | |
35317e778b2fe1d238e21954df1eac0c5380b00b | Add corpus fetch from database | generate_horoscope.py | generate_horoscope.py | #!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
import sys
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
_parser = argparse.ArgumentParser(description="Awesome SQLite importer")
_parser.add_argument('-d', '--database', dest='database', required=True, help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', default=10)
def keyword_valid(cursor, keyword, threshold=10):
""" Checks whether enough horoscopes are present for the keyword """
# TODO implement
return True
def get_corpuses(cursor, with_rating=False, zodiac_sign=None, keyword=None):
""" Returns a cursor with all horoscopes for the given parameters """
# ugly code =(
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
zodiac_sign_ordinal = zodiac_signs[zodiac_sign]
base_stmt = 'SELECT interp%s from horoscopes' % (',rating' if with_rating else '')
if zodiac_sign is None:
if keyword is None:
return cursor.execute(base_stmt)
else:
return cursor.execute(base_stmt + ' WHERE keyword=?', (keyword,))
else:
if keyword is None:
return cursor.execute(base_stmt + ' WHERE sign=?', (str(zodiac_sign_ordinal),))
else:
return cursor.execute(base_stmt + ' WHERE sign=? and keyword=?', (str(zodiac_sign_ordinal), keyword))
if __name__ == '__main__':
args = _parser.parse_args()
with sqlite3.connect(args.database) as conn:
if not keyword_valid:
print('Not enough horoscopes for the given keyword', sys.stderr)
sys.exit(1)
corpuses = get_corpuses(conn.cursor(), zodiac_sign=None, keyword='enthusiasm')
print(corpuses.fetchone())
| Python | 0 | |
0b03dd638dd5ac3358d89a5538c707d5412b84ae | Add basic network broker state machine | broker/network.py | broker/network.py | from hypothesis.stateful import GenericStateMachine
class NetworkBroker(GenericStateMachine):
"""
Broker to coordinate network traffic
nodes = A map of node ids to node objects.
network = An adjacency list of what nodes can talk to each other. If a is
in network[b] than b -> a communcation is allowed. This is a map of
id type -> set(id type)
messages = A queue of messages. messages[0] is the head, where messages are
sent from. Messages are tuples in the form of (from, to, data).
"""
def __init__(self, nodes):
self.nodes = nodes
self.network = dict([(i, set(nodes.keys())) for i in nodes.keys()])
self.messages = []
def steps(self):
pass
def execute_step(self, step):
"""
Actions:
DeliverMsg
If next message is deliverable, deliver it. Otherwise, drop it.
DropMsg
Drop the next message.
DestroyEdge (from, to)
Destroys the edge from -> to, causing any packets sent along it to be dropped.
HealEdge (from, to)
Heal the edge from -> to, allowing packets to be sent along it.
DuplicateMsg
Create a copy of the message at the front of the queue
DelayMsg n
Push the message at the front of the queue back by n slots
"""
action, value = step
if action == "DeliverMsg":
message = self.messages.pop(0)
self.nodes[message[1]].recv(message[0], message[2])
if action == "DropMsg":
self.messages.pop(0)
if action == "DestroyEdge":
self.network[step[0]].remove(step[1])
if action == "HealEdge":
self.network[step[0]].add(step[1])
if action == "DuplicateMsg":
self.messages.insert(0, self.messages[0])
if action == "DelayMsg":
self.messages.insert(value, self.messages.pop(0))
| Python | 0 | |
3ab61b1e9cc155868108e658ad7e87fac9569e10 | add run script for bulk loader. | loader/loader.py | loader/loader.py | #!/usr/bin/python
import os, sys, urllib2, urllib
def cleanup(args):
cmd = "hadoop fs -rm -r /tmp/%s" % args["htable_name"]
print cmd
ret = os.system(cmd)
print cmd, "return", ret
return ret
def hfile(args):
cmd = """spark-submit --class "subscriber.TransferToHFile" \
--name "TransferToHFile@shon" \
--conf "spark.task.maxFailures=20" \
--master yarn-cluster \
--num-executors %s --driver-memory 1g --executor-memory 2g --executor-cores 1 %s \
%s /tmp/%s %s %s %s %s %s %s""" % (args["num_executors"], JAR, args["input"], args["htable_name"], args["hbase_zk"], args["htable_name"], args["db_url"], args["max_file_per_region"], args["label_mapping"], args["auto_create_edge"])
print cmd
ret = os.system(cmd)
print cmd, "return", ret
return ret
def distcp(args):
cmd = "hadoop distcp -overwrite -m %s -bandwidth %s /tmp/%s %s/tmp/%s" % (args["-m"], args["-bandwidth"], args["htable_name"], args["hbase_namenode"], args["htable_name"])
print cmd
ret = os.system(cmd)
print cmd, "return", ret
return ret
def chmod(args):
cmd = "export HADOOP_CONF_DIR=%s; export HADOOP_USER_NAME=hdfs; hadoop fs -chmod -R 777 /tmp/%s" % (args["HADOOP_CONF_DIR"], args["htable_name"])
print cmd
ret = os.system(cmd)
print cmd, "return", ret
return ret
def load(args):
cmd = "export HADOOP_CONF_DIR=%s; export HBASE_CONF_DIR=%s; hbase %s /tmp/%s %s" % (args["HADOOP_CONF_DIR"], args["HBASE_CONF_DIR"], LOADER_CLASS, args["htable_name"], args["htable_name"])
print cmd
ret = os.system(cmd)
print cmd, "return", ret
return ret
def send(msg):
print msg
def run(args):
cleanup(args)
send("[Start]: bulk loader")
ret = hfile(args)
if ret != 0: return send("[Failed]: loader build hfile failed %s" % ret)
else: send("[Success]: loader build hfile")
ret = distcp(args)
if ret != 0: return send("[Failed]: loader distcp failed %s" % ret)
else: send("[Success]: loader distcp")
ret = chmod(args)
if ret != 0: return send("[Failed]: loader chmod failed %s" % ret)
else: send("[Success]: loader chmod")
ret = load(args)
if ret != 0: return send("[Failed]: loader complete bulkload failed %s" % ret)
else: send("[Success]: loader complete bulkload")
LOADER_CLASS = "org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles"
JAR="loader/target/scala-2.10/s2loader-assembly-0.11.0-SNAPSHOT.jar"
args = {
"HADOOP_CONF_DIR": "hdfs_conf_gasan",
"HBASE_CONF_DIR": "hbase_conf_gasan",
"htable_name": "test",
"hbase_namenode": "hdfs://nameservice:8020",
"hbase_zk": "localhost",
"db_url": "jdbc:mysql://localhost:3306/graph_dev",
"max_file_per_region": 1,
"label_mapping": "none",
"auto_create_edge": "false",
"-m": 1,
"-bandwidth": 10,
"num_executors": 2,
"input": "/user/test.txt"
}
run(args)
| Python | 0 | |
c48be39a1f04af887349ef7f19ecea4312425cf9 | initialize for production | shoppley.com/shoppley/apps/offer/management/commands/initialize.py | shoppley.com/shoppley/apps/offer/management/commands/initialize.py | from django.core.management.base import NoArgsCommand
from shoppleyuser.models import Country, Region, City, ZipCode, ShoppleyUser
import os, csv
from googlevoice import Voice
FILE_ROOT = os.path.abspath(os.path.dirname(__file__))
class Command(NoArgsCommand):
def handle_noargs(self, **options):
f = open(FILE_ROOT+"/../../../shoppleyuser/data/US.txt", "r")
zip_reader = csv.reader(f, delimiter="\t")
for row in zip_reader:
country_obj, created = Country.objects.get_or_create(name="United States", code=row[0])
zip_code = row[1]
city = row[2]
region = row[3]
region_code = row[4]
latitude = row[9]
longitude = row[10]
region_obj, created = Region.objects.get_or_create(name=region,
code=region_code, country=country_obj)
city_obj, created = City.objects.get_or_create(name=city, region=region_obj)
zip_obj, created = ZipCode.objects.get_or_create(code=zip_code,
city=city_obj, latitude=latitude, longitude=longitude)
print "done"
| Python | 0.000001 | |
00f3e74387fc7a215af6377cb90555d142b81d74 | Add acoustics module with class AcousticMaterial. | pyfds/acoustics.py | pyfds/acoustics.py | class AcousticMaterial:
"""Class for specification of acoustic material parameters."""
def __init__(self, sound_velocity, density,
shear_viscosity=0, bulk_viscosity=0,
thermal_conductivity=0, isobaric_heat_cap=1, isochoric_heat_cap=1):
"""Default values for optional parameters create lossless medium."""
self.sound_velocity = sound_velocity
self.density = density
self.shear_viscosity = shear_viscosity
self.bulk_viscosity = bulk_viscosity
self.thermal_conductivity = thermal_conductivity
self.isobaric_heat_cap = isobaric_heat_cap
self.isochoric_heat_cap = isochoric_heat_cap
@property
def absorption_coef(self):
"""This is a helper variable that sums up all losses into a single quantity."""
return (4/3 * self.shear_viscosity + self.bulk_viscosity + self.thermal_conductivity *
(self.isobaric_heat_cap - self.isochoric_heat_cap) /
(self.isobaric_heat_cap * self.isochoric_heat_cap))
| Python | 0 | |
75ffc049d021e88fed37dc009376761661452cbe | Add unit tests for heat.scaling.template | heat/tests/test_scaling_template.py | heat/tests/test_scaling_template.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from heat.scaling import template
from heat.common import short_id
from heat.tests.common import HeatTestCase
class ResourceTemplatesTest(HeatTestCase):
def setUp(self):
super(ResourceTemplatesTest, self).setUp()
ids = ('stubbed-id-%s' % (i,) for i in itertools.count())
self.patchobject(short_id, 'generate_id').side_effect = ids.next
def test_create_template(self):
"""
When creating a template from scratch, an empty list is accepted as
the "old" resources and new resources are created up to num_resource.
"""
templates = template.resource_templates([], {'type': 'Foo'}, 2, 0)
expected = [
('stubbed-id-0', {'type': 'Foo'}),
('stubbed-id-1', {'type': 'Foo'})]
self.assertEqual(expected, list(templates))
def test_replace_template(self):
"""
If num_replace is the number of old resources, then all of the
resources will be replaced.
"""
old_resources = [
('old-id-0', {'type': 'Foo'}),
('old-id-1', {'type': 'Foo'})]
templates = template.resource_templates(old_resources, {'type': 'Bar'},
1, 2)
expected = [('old-id-1', {'type': 'Bar'})]
self.assertEqual(expected, list(templates))
def test_replace_some_units(self):
"""
If the resource definition changes, only the number of replacements
specified will be made; beyond that, the original templates are used.
"""
old_resources = [
('old-id-0', {'type': 'Foo'}),
('old-id-1', {'type': 'Foo'})]
new_spec = {'type': 'Bar'}
templates = template.resource_templates(old_resources, new_spec, 2, 1)
expected = [
('old-id-0', {'type': 'Bar'}),
('old-id-1', {'type': 'Foo'})]
self.assertEqual(expected, list(templates))
def test_growth_counts_as_replacement(self):
"""
If we grow the template and replace some elements at the same time, the
number of replacements to perform is reduced by the number of new
resources to be created.
"""
spec = {'type': 'Foo'}
old_resources = [
('old-id-0', spec),
('old-id-1', spec)]
new_spec = {'type': 'Bar'}
templates = template.resource_templates(old_resources, new_spec, 4, 2)
expected = [
('old-id-0', spec),
('old-id-1', spec),
('stubbed-id-0', new_spec),
('stubbed-id-1', new_spec)]
self.assertEqual(expected, list(templates))
def test_replace_units_some_already_up_to_date(self):
"""
If some of the old resources already have the new resource definition,
then they won't be considered for replacement, and the next resource
that is out-of-date will be replaced.
"""
old_resources = [
('old-id-0', {'type': 'Bar'}),
('old-id-1', {'type': 'Foo'})]
new_spec = {'type': 'Bar'}
templates = template.resource_templates(old_resources, new_spec, 2, 1)
second_batch_expected = [
('old-id-0', {'type': 'Bar'}),
('old-id-1', {'type': 'Bar'})]
self.assertEqual(second_batch_expected, list(templates))
| Python | 0.000015 | |
3d8ef3b0f31575354f03583a5f053fad6838084d | add `YouCompleteMe` config file. | .ycm_extra_conf.py | .ycm_extra_conf.py | import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# Do you use C++11 features?
'-std=c++11',
# Do you want C++98 compatible code?
#'-Wc++98-compat',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# Specific includes.
'-DQT_CORE_LIB',
'-DQT_GUI_LIB',
'-DQT_NETWORK_LIB',
'-DQT_QML_LIB',
'-DQT_QUICK_LIB',
'-DQT_SQL_LIB',
'-DQT_WIDGETS_LIB',
'-DQT_XML_LIB',
'-fPIE',
'-I', '/usr/include/qt5/QtCore',
'-I', '/usr/include/qt5/QtGui',
'-I', '/usr/include/qt5/QtWidgets',
'-I', './third-party/gtest/include',
# Project-specific include.
'-I', './src'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| Python | 0 | |
3e723f3419468654c9606b27d2127c94054b4bed | Add YouCompleteMe config for vim autocompletion | .ycm_extra_conf.py | .ycm_extra_conf.py | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import re
import subprocess
import pybind11
def LoadSystemIncludes():
regex = re.compile(r'(?:\#include \<...\> search starts here\:)'
r'(?P<list>.*?)(?:End of search list)', re.DOTALL)
process = subprocess.Popen(['clang', '-v', '-E', '-x', 'c++', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_out, process_err = process.communicate('')
output = process_out + process_err
output = output.decode('utf-8')
includes = []
for p in re.search(regex, output).group('list').split('\n'):
p = p.strip()
if len(p) > 0 and p.find('(framework directory)') < 0:
includes.append('-isystem')
includes.append(p)
return includes
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-fexceptions',
'-x',
'c++',
'-std=c++14',
'-stdlib=libc++',
'-I',
pybind11.get_include(),
'-I',
pybind11.get_include(True),
'-I',
'vendor/'
]
systemIncludes = LoadSystemIncludes()
flags = flags + systemIncludes
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def FlagsForFile( filename, **kwargs ):
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
| Python | 0 | |
60e65d31c943f63e646b39350f18e6d177fbb66b | Add okupy.common.test_helpets.set_request | okupy/common/test_helpers.py | okupy/common/test_helpers.py | # vim:fileencoding=utf8:et:ts=4:sts=4:sw=4:ft=python
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.messages.storage.cookie import CookieStorage
from django.test import TestCase, RequestFactory
def set_request(uri, post=False, user=False, messages=False):
if post:
if type(post) == bool:
post = {}
request = RequestFactory().post(uri, post)
else:
request = RequestFactory().get(uri)
if user:
request.user = user
else:
request.user = AnonymousUser()
request.user.is_verified = lambda: True
request.session = {}
if messages:
SessionMiddleware().process_request(request)
MessageMiddleware().process_request(request)
return request
class OkupyTestCase(TestCase):
def _get_matches(self, response, text):
""" Get messages that match the given text """
messages = self._get_messages(response)
if messages:
matches = [m for m in messages if text == m.message]
return matches
else:
self.fail('No messages found')
def _get_messages(self, response):
""" Get all messages from the context or the CookieStorage """
try:
messages = response.context['messages']
except (TypeError, KeyError):
try:
messages = CookieStorage(response)._decode(
response.cookies['messages'].value)
except KeyError:
return
return messages
def assertMessageCount(self, response, expect_num):
"""
Asserts that exactly the given number of messages have been sent.
"""
messages = self._get_messages(response)
if messages:
actual_num = len(messages)
else:
actual_num = 0
if actual_num != expect_num:
self.fail('Message count was %d, expected %d' %
(actual_num, expect_num))
def assertMessage(self, response, text, level=None):
"""
Asserts that there is exactly one message containing the given text.
"""
matches = self._get_matches(response, text)
if len(matches) == 1:
msg = matches[0]
if level is not None and msg.level != level:
self.fail('There was one matching message but with different '
'level: %s != %s' % (msg.level, level))
elif len(matches) == 0:
messages_str = ", ".join(
'"%s"' % m for m in self._get_messages(response))
self.fail('No message contained text "%s", messages were: %s' %
(text, messages_str))
else:
self.fail('Multiple messages contained text "%s": %s' %
(text, ", ".join(('"%s"' % m) for m in matches)))
def assertNotMessage(self, response, text):
""" Assert that no message contains the given text. """
matches = self._get_matches(response, text)
if len(matches) > 0:
self.fail('Message(s) contained text "%s": %s' %
(text, ", ".join(('"%s"' % m) for m in matches)))
| # vim:fileencoding=utf8:et:ts=4:sts=4:sw=4:ft=python
from django.test import TestCase
from django.contrib.messages.storage.cookie import CookieStorage
class OkupyTestCase(TestCase):
def _get_matches(self, response, text):
""" Get messages that match the given text """
messages = self._get_messages(response)
if messages:
matches = [m for m in messages if text == m.message]
return matches
else:
self.fail('No messages found')
def _get_messages(self, response):
""" Get all messages from the context or the CookieStorage """
try:
messages = response.context['messages']
except (TypeError, KeyError):
try:
messages = CookieStorage(response)._decode(
response.cookies['messages'].value)
except KeyError:
return
return messages
def assertMessageCount(self, response, expect_num):
"""
Asserts that exactly the given number of messages have been sent.
"""
messages = self._get_messages(response)
if messages:
actual_num = len(messages)
else:
actual_num = 0
if actual_num != expect_num:
self.fail('Message count was %d, expected %d' %
(actual_num, expect_num))
def assertMessage(self, response, text, level=None):
"""
Asserts that there is exactly one message containing the given text.
"""
matches = self._get_matches(response, text)
if len(matches) == 1:
msg = matches[0]
if level is not None and msg.level != level:
self.fail('There was one matching message but with different '
'level: %s != %s' % (msg.level, level))
elif len(matches) == 0:
messages_str = ", ".join(
'"%s"' % m for m in self._get_messages(response))
self.fail('No message contained text "%s", messages were: %s' %
(text, messages_str))
else:
self.fail('Multiple messages contained text "%s": %s' %
(text, ", ".join(('"%s"' % m) for m in matches)))
def assertNotMessage(self, response, text):
""" Assert that no message contains the given text. """
matches = self._get_matches(response, text)
if len(matches) > 0:
self.fail('Message(s) contained text "%s": %s' %
(text, ", ".join(('"%s"' % m) for m in matches)))
| Python | 0.000007 |
b25a172cd89e8811e5cb38414bdf86ef5a5afaee | fix ABC for py2.7 | rhea/system/cso.py | rhea/system/cso.py |
from __future__ import absolute_import
from abc import ABCMeta, abstractclassmethod
from myhdl import Signal, SignalType, always_comb
class ControlStatusBase(object):
__metaclass__ = ABCMeta
def __init__(self):
""" Base class for control and status classes
Many complex digital block have control and status interfaces.
The base class is the base class for the specific control and
status objects (typically ``ControlStatus``) in a block, the
control-status-objects (CSO) can be used to dynamically
interact with the block from other blocks, statically configure,
or assign to a register-file that can be accessed from a
memory-mapped bus.
"""
self._isstatic = False
@property
def isstatic(self):
return self._isstatic
@isstatic.setter
def isstatic(self, val):
self._isstatic = val
def get_config_bits(self):
attrs = vars(self)
cfgbits = {}
for k, v in attrs.items():
if isinstance(v, SignalType) and v.config and not v.driven:
cfgbits[k] = v.initial_value
return cfgbits
@abstractclassmethod
def default_assign(self):
""" A myhdl.block that assigns the control-status defaults.
For certain synthesis tools the static values of the signals
need to be assigned. This will return generators to keep
the default signals. If the synthesis tool supports initial
values, initial values should be used otherwise this can be
used to assign a static value to a signal. Note, the synthesis
tool will generate warnings that the signal is stuck at a
value - this is desired.
Returns:
myhdl generators
"""
raise NotImplemented
def get_register_file(self):
""" get the register-file for this control-status object"""
# @todo: this function currently lives in memmap.regfile
# @todo: return build_register_file(self)
return None
@abstractclassmethod
def get_generators(self):
""" get any hardware logic associated with the cso"""
return None
def assign_config(sig, val):
"""
Arguments:
sig (Signal): The signals to be assigned to a constant value
val (int): The constant value
"""
keep = Signal(bool(0))
keep.driven = 'wire'
@always_comb
def beh_assign():
sig.next = val if keep else val
return beh_assign
|
from __future__ import absolute_import
from abc import ABCMeta, abstractclassmethod
from myhdl import Signal, SignalType, always_comb
class ControlStatusBase(metaclass=ABCMeta):
def __init__(self):
self._isstatic = False
@property
def isstatic(self):
return self._isstatic
@isstatic.setter
def isstatic(self, val):
self._isstatic = val
def get_config_bits(self):
attrs = vars(self)
cfgbits = {}
for k, v in attrs.items():
if isinstance(v, SignalType) and v.config and not v.driven:
cfgbits[k] = v.initial_value
return cfgbits
@abstractclassmethod
def default_assign(self):
raise NotImplemented
def get_register_file(self):
""" get the register-file for this control-status object"""
# @todo: this function currently lives in memmap.regfile
# @todo: return build_register_file(self)
return None
@abstractclassmethod
def get_generators(self):
""" get any hardware logic associated with the cso"""
return None
def assign_config(sig, val):
"""
Arguments:
sig (Signal): The signals to be assigned to a constant value
val (int): The constant value
"""
keep = Signal(bool(0))
keep.driven = 'wire'
@always_comb
def beh_assign():
sig.next = val if keep else val
return beh_assign
| Python | 0.99992 |
682b52e3f5b1f1de5009e7fc7fac95f453dbe631 | Enable more content in header/footer | rinohlib/templates/manual.py | rinohlib/templates/manual.py |
from rinoh.document import Document, DocumentPart, Page, PORTRAIT
from rinoh.dimension import PT, CM
from rinoh.layout import Container, FootnoteContainer, Chain, \
UpExpandingContainer, DownExpandingContainer
from rinoh.paper import A4
from rinoh.structure import Section, Heading, TableOfContents, Header, Footer
# page definition
# ----------------------------------------------------------------------------
class SimplePage(Page):
topmargin = bottommargin = 3*CM
leftmargin = rightmargin = 2*CM
header_footer_distance = 14*PT
def __init__(self, chain, paper, orientation, header_footer=True):
super().__init__(chain.document, paper, orientation)
body_width = self.width - (self.leftmargin + self.rightmargin)
body_height = self.height - (self.topmargin + self.bottommargin)
self.body = Container('body', self, self.leftmargin, self.topmargin,
body_width, body_height)
self.footnote_space = FootnoteContainer('footnotes', self.body, 0*PT,
body_height)
self.content = Container('content', self.body, 0*PT, 0*PT,
bottom=self.footnote_space.top,
chain=chain)
self.content._footnote_space = self.footnote_space
if header_footer:
header_bottom = self.body.top - self.header_footer_distance
self.header = UpExpandingContainer('header', self,
left=self.leftmargin,
bottom=header_bottom,
width=body_width)
footer_vpos = self.body.bottom + self.header_footer_distance
self.footer = DownExpandingContainer('footer', self,
left=self.leftmargin,
top=footer_vpos,
width=body_width)
header_text = chain.document.options['header_text']
footer_text = chain.document.options['footer_text']
self.header.append_flowable(Header(header_text))
self.footer.append_flowable(Footer(footer_text))
# document parts
# ----------------------------------------------------------------------------
# class TitlePart(DocumentPart)
class ManualPart(DocumentPart):
def __init__(self, document):
super().__init__(document)
self.chain = Chain(document)
def init(self):
self.new_page([self.chain])
def new_page(self, chains):
assert (len(chains) == 1)
page = SimplePage(next(iter(chains)),
self.document.options['page_size'],
self.document.options['page_orientation'],
header_footer=self.header_footer)
self.page_count += 1
self.add_page(page, self.page_count)
return page.content
class TableOfContentsPart(ManualPart):
header_footer = False
def __init__(self, document):
super().__init__(document)
self.chain << Section([Heading('Table of Contents', style='unnumbered'),
TableOfContents()])
class ContentsPart(ManualPart):
header_footer = True
def __init__(self, document, content_tree):
super().__init__(document)
for child in content_tree.getchildren():
self.chain << child.flowable()
# main document
# ----------------------------------------------------------------------------
class Manual(Document):
def __init__(self, rinoh_tree, stylesheet, options=None, backend=None,
title=None):
super().__init__(stylesheet, backend=backend, title=title)
self.options = options or ManualOptions()
self.add_part(TableOfContentsPart(self))
self.add_part(ContentsPart(self, rinoh_tree))
class ManualOptions(dict):
options = {'page_size': A4,
'page_orientation': PORTRAIT,
'header_text': None,
'footer_text': None}
def __init__(self, **options):
for name, value in options.items():
if name not in self.options:
raise ValueError("Unknown option '{}'".format(name))
self[name] = value
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
return self.options[key]
|
from rinoh.document import Document, DocumentPart, Page, PORTRAIT
from rinoh.dimension import PT, CM
from rinoh.layout import Container, FootnoteContainer, Chain
from rinoh.paper import A4
from rinoh.structure import Section, Heading, TableOfContents, Header, Footer
# page definition
# ----------------------------------------------------------------------------
class SimplePage(Page):
topmargin = bottommargin = 3*CM
leftmargin = rightmargin = 2*CM
def __init__(self, chain, paper, orientation, header_footer=True):
super().__init__(chain.document, paper, orientation)
body_width = self.width - (self.leftmargin + self.rightmargin)
body_height = self.height - (self.topmargin + self.bottommargin)
self.body = Container('body', self, self.leftmargin, self.topmargin,
body_width, body_height)
self.footnote_space = FootnoteContainer('footnotes', self.body, 0*PT,
body_height)
self.content = Container('content', self.body, 0*PT, 0*PT,
bottom=self.footnote_space.top,
chain=chain)
self.content._footnote_space = self.footnote_space
if header_footer:
self.header = Container('header', self, self.leftmargin,
self.topmargin / 2, body_width, 12*PT)
footer_vpos = self.topmargin + body_height + self.bottommargin / 2
self.footer = Container('footer', self, self.leftmargin,
footer_vpos, body_width, 12*PT)
header_text = chain.document.options['header_text']
footer_text = chain.document.options['footer_text']
self.header.append_flowable(Header(header_text))
self.footer.append_flowable(Footer(footer_text))
# document parts
# ----------------------------------------------------------------------------
# class TitlePart(DocumentPart)
class ManualPart(DocumentPart):
def __init__(self, document):
super().__init__(document)
self.chain = Chain(document)
def init(self):
self.new_page([self.chain])
def new_page(self, chains):
assert (len(chains) == 1)
page = SimplePage(next(iter(chains)),
self.document.options['page_size'],
self.document.options['page_orientation'],
header_footer=self.header_footer)
self.page_count += 1
self.add_page(page, self.page_count)
return page.content
class TableOfContentsPart(ManualPart):
header_footer = False
def __init__(self, document):
super().__init__(document)
self.chain << Section([Heading('Table of Contents', style='unnumbered'),
TableOfContents()])
class ContentsPart(ManualPart):
header_footer = True
def __init__(self, document, content_tree):
super().__init__(document)
for child in content_tree.getchildren():
self.chain << child.flowable()
# main document
# ----------------------------------------------------------------------------
class Manual(Document):
def __init__(self, rinoh_tree, stylesheet, options=None, backend=None,
title=None):
super().__init__(stylesheet, backend=backend, title=title)
self.options = options or ManualOptions()
self.add_part(TableOfContentsPart(self))
self.add_part(ContentsPart(self, rinoh_tree))
class ManualOptions(dict):
options = {'page_size': A4,
'page_orientation': PORTRAIT,
'header_text': None,
'footer_text': None}
def __init__(self, **options):
for name, value in options.items():
if name not in self.options:
raise ValueError("Unknown option '{}'".format(name))
self[name] = value
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
return self.options[key]
| Python | 0 |
c0ab9b755b4906129988348b2247452b6dfc157f | Add a module to set the "display name" of a dedicated server | plugins/modules/dedicated_server_display_name.py | plugins/modules/dedicated_server_display_name.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_display_name
short_description: Modify the server display name in ovh manager
description:
- Modify the server display name in ovh manager, to help you find your server with your own naming
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service name
display_name:
required: true
description: The display name to set
'''
EXAMPLES = '''
synthesio.ovh.display_name
service_name: "{{ ovhname }}"
display_name: "{{ ansible_hostname }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
display_name=dict(required=True),
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
display_name = module.params['display_name']
service_name = module.params['service_name']
if module.check_mode:
module.exit_json(msg="display_name has been set to {} ! - (dry run mode)".format(display_name), changed=True)
try:
result = client.get('/dedicated/server/%s/serviceInfos' % service_name)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
service_id = result["serviceId"]
resource = {
"resource": {
'displayName': display_name,
'name': service_name}}
try:
client.put(
'/service/%s' % service_id,
**resource
)
module.exit_json(
msg="displayName succesfully set to {} for {} !".format(display_name, service_name),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
| Python | 0 | |
ffdee2f18d5e32c2d0b4f4eb0cebe8b63ee555f7 | Document tools/mac/dump-static-initializers.py more. | tools/mac/dump-static-initializers.py | tools/mac/dump-static-initializers.py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Dumps a list of files with static initializers. Use with release builds.
Usage:
tools/mac/dump-static-initializers.py out/Release/Chromium\ Framework.framework.dSYM/Contents/Resources/DWARF/Chromium\ Framework
Do NOT use mac_strip_release=0 or component=shared_library if you want to use
this script.
"""
import optparse
import re
import subprocess
import sys
# Matches for example:
# [ 1] 000001ca 64 (N_SO ) 00 0000 0000000000000000 'test.cc'
dsymutil_file_re = re.compile("N_SO.*'([^']*)'")
# Matches for example:
# [ 2] 000001d2 66 (N_OSO ) 00 0001 000000004ed856a0 '/Volumes/MacintoshHD2/src/chrome-git/src/test.o'
dsymutil_o_file_re = re.compile("N_OSO.*'([^']*)'")
# Matches for example:
# [ 8] 00000233 24 (N_FUN ) 01 0000 0000000000001b40 '__GLOBAL__I_s'
# [185989] 00dc69ef 26 (N_STSYM ) 02 0000 00000000022e2290 '__GLOBAL__I_a'
dsymutil_re = re.compile(r"(?:N_FUN|N_STSYM).*\s[0-9a-f]*\s'__GLOBAL__I_")
def ParseDsymutil(binary):
"""Given a binary, prints source and object filenames for files with
static initializers.
"""
child = subprocess.Popen(['dsymutil', '-s', binary], stdout=subprocess.PIPE)
for line in child.stdout:
file_match = dsymutil_file_re.search(line)
if file_match:
current_filename = file_match.group(1)
else:
o_file_match = dsymutil_o_file_re.search(line)
if o_file_match:
current_o_filename = o_file_match.group(1)
else:
match = dsymutil_re.search(line)
if match:
print current_filename
print current_o_filename
print
def main():
parser = optparse.OptionParser(usage='%prog filename')
opts, args = parser.parse_args()
if len(args) != 1:
parser.error('missing filename argument')
return 1
binary = args[0]
ParseDsymutil(binary)
return 0
if '__main__' == __name__:
sys.exit(main())
| #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import re
import subprocess
import sys
# Matches for example:
# [ 1] 000001ca 64 (N_SO ) 00 0000 0000000000000000 'test.cc'
dsymutil_file_re = re.compile("N_SO.*'([^']*)'")
# Matches for example:
# [ 2] 000001d2 66 (N_OSO ) 00 0001 000000004ed856a0 '/Volumes/MacintoshHD2/src/chrome-git/src/test.o'
dsymutil_o_file_re = re.compile("N_OSO.*'([^']*)'")
# Matches for example:
# [ 8] 00000233 24 (N_FUN ) 01 0000 0000000000001b40 '__GLOBAL__I_s'
# [185989] 00dc69ef 26 (N_STSYM ) 02 0000 00000000022e2290 '__GLOBAL__I_a'
dsymutil_re = re.compile(r"(?:N_FUN|N_STSYM).*\s[0-9a-f]*\s'__GLOBAL__I_")
def ParseDsymutil(binary):
"""Given a binary, prints source and object filenames for files with
static initializers.
"""
child = subprocess.Popen(['dsymutil', '-s', binary], stdout=subprocess.PIPE)
for line in child.stdout:
file_match = dsymutil_file_re.search(line)
if file_match:
current_filename = file_match.group(1)
else:
o_file_match = dsymutil_o_file_re.search(line)
if o_file_match:
current_o_filename = o_file_match.group(1)
else:
match = dsymutil_re.search(line)
if match:
print current_filename
print current_o_filename
print
def main():
parser = optparse.OptionParser(usage='%prog filename')
opts, args = parser.parse_args()
if len(args) != 1:
parser.error('missing filename argument')
return 1
binary = args[0]
ParseDsymutil(binary)
return 0
if '__main__' == __name__:
sys.exit(main())
| Python | 0.000002 |
b783ddcaad104ac5cfa7b6903852e6f68b736bf3 | Add python implementation of motion propagation. | tools/propagate/motion_propagation.py | tools/propagate/motion_propagation.py | #!/usr/bin/env python
import argparse
import glob
import os
import scipy.io as sio
from vdetlib.utils.protocol import proto_load
from vdetlib.utils.common import imread
import numpy as np
import pdb
def _boxes_average_sum(motionmap, boxes, box_ratio=1.0):
h, w = motionmap.shape
accum_map = np.cumsum(np.cumsum(motionmap, axis=0), axis=1)
boxes = boxes - 1
col1 = boxes[:,0]
row1 = boxes[:,1]
col2 = boxes[:,2]
row2 = boxes[:,3]
n_row = row2 - row1 + 1
n_col = col2 - col1 + 1
col1 = np.round(col1 + 0.5*(1.-box_ratio)*n_col)
row1 = np.round(row1 + 0.5*(1.-box_ratio)*n_row)
col2 = np.round(col2 - 0.5*(1.-box_ratio)*n_col)
row2 = np.round(row2 - 0.5*(1.-box_ratio)*n_row)
# clipping
col1[col1 < 0] = 0
row1[row1 < 0] = 0
col2[col2 >= w] = w-1
row2[row2 >= h] = h-1
n_row = row2 - row1 + 1
n_col = col2 - col1 + 1
# print col1, col2, row1, row2
col_out_idx = (col1==0)
row_out_idx = (row1==0)
corner_out_idx = col_out_idx | row_out_idx
col1[col_out_idx] = 0
row1[row_out_idx] = 0
sum_values = accum_map[row2.astype('int'), col2.astype('int')]
col_values = accum_map[row1.astype('int'), col1.astype('int')]
row_values = accum_map[row1.astype('int'), col2.astype('int')]
corner_values = accum_map[row1.astype('int'), col1.astype('int')]
corner_values[corner_out_idx] = 0
col_values[col_out_idx] = 0
row_values[row_out_idx] = 0
values = sum_values - col_values - row_values + corner_values
values = values / (n_row * n_col)
return values
def optflow_transform(optflow):
bound = 15
return optflow.astype('single') / 255. * 2 * bound - bound
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('det_dir')
parser.add_argument('flow_dir')
parser.add_argument('save_dir')
parser.add_argument('--window', default=3, type=int)
args = parser.parse_args()
window = args.window
assert window > 0 and window % 2 == 1
half_tws = (window - 1) / 2
vid_proto = proto_load(args.vid_file)
det_files = glob.glob(os.path.join(args.det_dir, '*.mat'))
flow_files = glob.glob(os.path.join(args.flow_dir, '*.png'))
n_frames = len(vid_proto['frames'])
assert len(vid_proto['frames']) == len(det_files)
assert len(vid_proto['frames']) == len(flow_files)
all_boxes = []
all_scores = []
num_boxes_before = 0
num_expected = 0
for idx, det_file in enumerate(det_files):
det = sio.loadmat(det_file)
all_boxes.append(det['boxes'])
all_scores.append(det['zs'])
num_cur_boxes = det['boxes'].shape[0]
num_boxes_before += num_cur_boxes
num_expected += num_cur_boxes
num_expected += min(idx, half_tws) * num_cur_boxes
num_expected += min(n_frames - idx - 1, half_tws) * num_cur_boxes
# propagation
for local_idx, (frame, det_file, flow_file) in \
enumerate(zip(vid_proto['frames'], det_files, flow_files)):
print "Propagating frame {}".format(local_idx+1)
det = sio.loadmat(det_file)
# read optical flows
# rgb is reversed to bgr when using opencv
optflow = imread(flow_file)[:,:,::-1]
x_map = optflow_transform(optflow[:,:,0])
y_map = optflow_transform(optflow[:,:,1])
n_row, n_col = x_map.shape
# read detections
num_boxes = det['boxes'].shape[0]
boxes = det['boxes'].reshape((-1, 4))
box_avg_x = _boxes_average_sum(x_map, boxes)
box_avg_x = box_avg_x.reshape((num_boxes, -1, 1))
box_avg_y = _boxes_average_sum(y_map, boxes)
box_avg_y = box_avg_y.reshape((num_boxes, -1, 1))
motion_shift = np.concatenate(
(box_avg_x, box_avg_y, box_avg_x, box_avg_y), axis=2)
# motion propagation
for offset in range(-half_tws, half_tws+1):
if offset == 0: continue
neighbor_frame_idx = local_idx + offset
if neighbor_frame_idx < 0 or neighbor_frame_idx >= n_frames:
continue
cur_boxes = det['boxes']
cur_scores = det['zs']
cur_boxes = cur_boxes + motion_shift * offset
# clipping
cur_boxes = np.clip(cur_boxes, 1,
np.array([n_col,n_row,n_col,n_row]).reshape((1, 1, 4)))
all_boxes[neighbor_frame_idx] = \
np.concatenate((all_boxes[neighbor_frame_idx], cur_boxes), axis=0)
all_scores[neighbor_frame_idx] = \
np.concatenate((all_scores[neighbor_frame_idx], cur_scores), axis=0)
num_boxes_after = 0
for box in all_boxes:
num_boxes_after += box.shape[0]
print "Originally {} boxes, expected {} boxes, now {} boxes.".format(
num_boxes_before, num_expected, num_boxes_after)
# save results
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
print "Saving..."
for frame, boxes, scores in zip(vid_proto['frames'], all_boxes, all_scores):
frame_name = os.path.splitext(frame['path'])[0]
sio.savemat(os.path.join(args.save_dir, frame_name+'.mat'),
{'boxes': boxes, 'zs': scores})
| Python | 0 | |
8fbc5877fa97b6b8df621ff7afe7515b501660fc | Convert string to camel case | LeetCode/ConvertStringToCamelCase.py | LeetCode/ConvertStringToCamelCase.py | def to_camel_case(text):
if len(text) < 2:
return text
capped_camel = "".join([word.title() for word in text.replace('-','_').split('_')])
return capped_camel if text[0].isupper() else capped_camel[0].lower()+capped_camel[1:]
| Python | 0.999999 | |
6dd1545ae9ff3ac10586144494f763bcc1bea1d8 | Add script to verify that image files exist for every actual_result checksum | tools/verify_images_for_gm_results.py | tools/verify_images_for_gm_results.py | #!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Look through skia-autogen, searching for all checksums which should have
corresponding files in Google Storage, and verify that those files exist. """
import json
import posixpath
import re
import subprocess
import sys
AUTOGEN_URL = 'http://skia-autogen.googlecode.com/svn/gm-actual'
GS_URL = 'gs://chromium-skia-gm/gm'
TEST_NAME_PATTERN = re.compile('(\S+)_(\S+).png')
def FileNameToGSURL(filename, hash_type, hash_value):
""" Convert a file name given in a checksum file to the URL of the
corresponding image file in Google Storage.
filename: string; the file name to convert. Takes the form specified by
TEST_NAME_PATTERN.
hash_type: string; the type of the checksum.
hash_value: string; the checksum itself.
"""
test_name = TEST_NAME_PATTERN.match(filename).group(1)
if not test_name:
raise Exception('Invalid test name for file: %s' % filename)
return '%s/%s/%s/%s.png' % (GS_URL, hash_type, test_name, hash_value)
def FindURLSInJSON(json_file, gs_urls):
""" Extract Google Storage URLs from a JSON file in svn, adding them to the
gs_urls dictionary.
json_file: string; URL of the JSON file.
gs_urls: dict; stores Google Storage URLs as keys and lists of the JSON files
which reference them.
Example gs_urls:
{ 'gs://chromium-skia-gm/gm/sometest/12345.png': [
'http://skia-autogen.googlecode.com/svn/gm-actual/base-macmini/Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug/base-macmini/actual-results.json',
'http://skia-autogen.googlecode.com/svn/gm-actual/base-macmini-10_8/Test-Mac10.8-MacMini4.1-GeForce320M-x86-Debug/base-macmini-10_8/actual-results.json',
]
}
"""
output = subprocess.check_output(['svn', 'cat', json_file])
json_content = json.loads(output)
for dict_type in ['actual-results']:
for result_type in json_content[dict_type]:
if json_content[dict_type][result_type]:
for result in json_content[dict_type][result_type].keys():
hash_type, hash_value = json_content[dict_type][result_type][result]
gs_url = FileNameToGSURL(result, hash_type, str(hash_value))
if gs_urls.get(gs_url):
gs_urls[gs_url].append(json_file)
else:
gs_urls[gs_url] = [json_file]
def _FindJSONFiles(url, json_files):
""" Helper function for FindJsonFiles. Recursively explore the repository,
adding JSON files to a list.
url: string; URL of the repository (or subdirectory thereof) to explore.
json_files: list to which JSON file urls will be added.
"""
proc = subprocess.Popen(['svn', 'ls', url], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
raise Exception('Failed to list svn directory.')
output = proc.communicate()[0].splitlines()
subdirs = []
for item in output:
if item.endswith(posixpath.sep):
subdirs.append(item)
elif item.endswith('.json'):
json_files.append(posixpath.join(url, item))
else:
print 'Warning: ignoring %s' % posixpath.join(url, item)
for subdir in subdirs:
_FindJSONFiles(posixpath.join(url, subdir), json_files)
def FindJSONFiles(url):
""" Recursively explore the given repository and return a list of the JSON
files it contains.
url: string; URL of the repository to explore.
"""
print 'Searching for JSON files in %s' % url
json_files = []
_FindJSONFiles(url, json_files)
return json_files
def FindURLs(url):
""" Find Google Storage URLs inside of JSON files in the given repository.
Returns a dictionary whose keys are Google Storage URLs and values are lists
of the JSON files which reference them.
url: string; URL of the repository to explore.
Example output:
{ 'gs://chromium-skia-gm/gm/sometest/12345.png': [
'http://skia-autogen.googlecode.com/svn/gm-actual/base-macmini/Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug/base-macmini/actual-results.json',
'http://skia-autogen.googlecode.com/svn/gm-actual/base-macmini-10_8/Test-Mac10.8-MacMini4.1-GeForce320M-x86-Debug/base-macmini-10_8/actual-results.json',
]
}
"""
gs_urls = {}
for json_file in FindJSONFiles(url):
print 'Looking for checksums in %s' % json_file
FindURLSInJSON(json_file, gs_urls)
return gs_urls
def VerifyURL(url):
""" Verify that the given URL exists.
url: string; the Google Storage URL of the image file in question.
"""
proc = subprocess.Popen(['gsutil', 'ls', url], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
return False
return True
def VerifyURLs(urls):
""" Verify that each of the given URLs exists. Return a list of which URLs do
not exist.
urls: dictionary; URLs of the image files in question.
"""
print 'Verifying that images exist for URLs...'
missing = []
for url in urls.iterkeys():
if not VerifyURL(url):
print 'Missing: %s, referenced by: \n %s' % (url, '\n '.join(urls[url]))
missing.append(url)
return missing
def Main():
urls = FindURLs(AUTOGEN_URL)
missing = VerifyURLs(urls)
if missing:
print 'Found %d Missing files.' % len(missing)
return 1
if __name__ == '__main__':
sys.exit(Main())
| Python | 0.006359 | |
98abb69d2c5cd41e9cdf9decc1180fe35112bc28 | Add initial base for the feed handler | backend/feed_daemon.py | backend/feed_daemon.py | import feedparser
import psycopg2
import sys
import configparser
import logging
class FeedHandler():
def __init__(self):
self.config = configparser.ConfigParser(interpolation=None)
self.config.read(('config.ini',))
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
self.con = None
try:
self.con = psycopg2.connect(
database=self.config.get('database', 'database'),
user=self.config.get('database', 'user'),
password=self.config.get('database', 'password'),
host=self.config.get('database', 'host'),
async=False)
except psycopg2.OperationalError as e:
logging.error('Database: {}'.format(str(e).split('\n')[0]))
def update_feed(self, feed_id, feed_url=None):
if feed_url == None:
cur = self.con.cursor()
cur.execute('SELECT url FROM lysr_feed WHERE id=%s', (feed_id,))
self.con.commit()
feed_url = cur.fetchone()[0]
logging.info('Updating feed {}: {}'.format(feed_id, feed_url))
feed = feedparser.parse(feed_url)
new_entries = 0
if feed.status is 200:
try:
cur = self.con.cursor()
for entry in feed.entries:
# Bad HTML is removed by default :D
cur.execute('SELECT id FROM lysr_feed_entry WHERE feed = %s AND guid = %s', (feed_id, entry.link))
self.con.commit()
if cur.rowcount is 0:
new_entries += 1
cur.execute('INSERT INTO lysr_feed_entry (feed, guid, content, title) VALUES (%s, %s, %s, %s)',
(feed_id, entry.link, entry.description, entry.title))
self.con.commit()
logging.info('Fetched feed {}, {} new entries found'.format(feed_id, new_entries))
except Exception as e:
logging.error('Database: {}'.format(str(e).split('\n')[0]))
else:
logging.info('Failed to fetch feed {}, status {}'.format(feed_id, feed.status))
cur = self.con.cursor()
cur.execute('UPDATE lysr_feed SET last_check=NOW() WHERE id=%s', (feed_id,))
self.con.commit()
if new_entries:
cur.execute('UPDATE lysr_feed SET last_update=NOW() WHERE id=%s', (feed_id,))
else:
cur.execute('UPDATE lysr_feed SET update_interval=2*update_interval WHERE id=%s', (feed_id,))
self.con.commit()
def parse_feeds(self):
cur = self.con.cursor()
cur.execute('SELECT id, url FROM lysr_feed WHERE NOW() > last_check + update_interval')
self.con.commit()
for feed in cur:
self.update_feed(*feed)
def main(args):
fh = FeedHandler()
#fh.update_feed(1)
fh.parse_feeds()
if __name__ == '__main__':
main(sys.argv)
| Python | 0 | |
c684ab17fc83242ee32db4b4c4bf57a7798acae4 | Add ordering prefix | examples/00_empty_window.py | examples/00_empty_window.py | import ModernGL
from ModernGL.ext.examples import run_example
class Example:
def __init__(self, wnd):
self.wnd = wnd
self.ctx = ModernGL.create_context()
def render(self):
self.ctx.viewport = self.wnd.viewport
self.ctx.clear(0.2, 0.4, 0.7)
run_example(Example)
| Python | 0.000114 | |
c660de10b58b985273675396a214a3f4bf968a20 | Fix credentials initialization. | tools/telemetry/telemetry/core/backends/chrome/cros_test_case.py | tools/telemetry/telemetry/core/backends/chrome/cros_test_case.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.core import browser_finder
from telemetry.core import extension_to_load
from telemetry.core import util
from telemetry.core.backends.chrome import cros_interface
from telemetry.unittest import options_for_unittests
class CrOSTestCase(unittest.TestCase):
def setUp(self):
options = options_for_unittests.GetCopy()
self._cri = cros_interface.CrOSInterface(options.cros_remote,
options.cros_ssh_identity)
self._is_guest = options.browser_type == 'cros-chrome-guest'
self._username = options.browser_options.username
self._password = options.browser_options.password
self._load_extension = None
def _CreateBrowser(self, autotest_ext=False, auto_login=True,
gaia_login=False, username=None, password=None):
"""Finds and creates a browser for tests. if autotest_ext is True,
also loads the autotest extension"""
options = options_for_unittests.GetCopy()
if autotest_ext:
extension_path = os.path.join(util.GetUnittestDataDir(), 'autotest_ext')
assert os.path.isdir(extension_path)
self._load_extension = extension_to_load.ExtensionToLoad(
path=extension_path,
browser_type=options.browser_type,
is_component=True)
options.extensions_to_load = [self._load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self.assertTrue(browser_to_create)
options.browser_options.create_browser_with_oobe = True
options.browser_options.auto_login = auto_login
options.browser_options.gaia_login = gaia_login
if username is not None:
options.browser_options.username = username
if password is not None:
options.browser_options.password = password
return browser_to_create.Create()
def _GetAutotestExtension(self, browser):
"""Returns the autotest extension instance"""
extension = browser.extensions[self._load_extension]
self.assertTrue(extension)
return extension
def _IsCryptohomeMounted(self):
"""Returns True if cryptohome is mounted. as determined by the cmd
cryptohome --action=is_mounted"""
return self._cri.RunCmdOnDevice(
['/usr/sbin/cryptohome', '--action=is_mounted'])[0].strip() == 'true'
def _GetLoginStatus(self, browser):
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof('chrome.autotestPrivate') != 'undefined'"))
extension.ExecuteJavaScript('''
window.__login_status = null;
chrome.autotestPrivate.loginStatus(function(s) {
window.__login_status = s;
});
''')
return util.WaitFor(
lambda: extension.EvaluateJavaScript('window.__login_status'), 10)
def _Credentials(self, credentials_path):
"""Returns credentials from file."""
credentials_path = os.path.join(os.path.dirname(__file__),
credentials_path)
if os.path.isfile(credentials_path):
with open(credentials_path) as f:
username, password = f.read().rstrip().split(':')
return (username, password)
return (None, None)
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.core import browser_finder
from telemetry.core import extension_to_load
from telemetry.core import util
from telemetry.core.backends.chrome import cros_interface
from telemetry.unittest import options_for_unittests
class CrOSTestCase(unittest.TestCase):
def setUp(self):
options = options_for_unittests.GetCopy()
self._cri = cros_interface.CrOSInterface(options.cros_remote,
options.cros_ssh_identity)
self._is_guest = options.browser_type == 'cros-chrome-guest'
self._username = options.browser_options.username
self._password = options.browser_options.password
self._load_extension = None
def _CreateBrowser(self, autotest_ext=False, auto_login=True,
gaia_login=False, username=None, password=None):
"""Finds and creates a browser for tests. if autotest_ext is True,
also loads the autotest extension"""
options = options_for_unittests.GetCopy()
if autotest_ext:
extension_path = os.path.join(util.GetUnittestDataDir(), 'autotest_ext')
assert os.path.isdir(extension_path)
self._load_extension = extension_to_load.ExtensionToLoad(
path=extension_path,
browser_type=options.browser_type,
is_component=True)
options.extensions_to_load = [self._load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self.assertTrue(browser_to_create)
options.browser_options.create_browser_with_oobe = True
options.browser_options.auto_login = auto_login
options.browser_options.gaia_login = gaia_login
if username is not None:
options.browser_options.username = username
if password is not None:
options.browser_options.password = password
return browser_to_create.Create()
def _GetAutotestExtension(self, browser):
"""Returns the autotest extension instance"""
extension = browser.extensions[self._load_extension]
self.assertTrue(extension)
return extension
def _IsCryptohomeMounted(self):
"""Returns True if cryptohome is mounted. as determined by the cmd
cryptohome --action=is_mounted"""
return self._cri.RunCmdOnDevice(
['/usr/sbin/cryptohome', '--action=is_mounted'])[0].strip() == 'true'
def _GetLoginStatus(self, browser):
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof('chrome.autotestPrivate') != 'undefined'"))
extension.ExecuteJavaScript('''
window.__login_status = null;
chrome.autotestPrivate.loginStatus(function(s) {
window.__login_status = s;
});
''')
return util.WaitFor(
lambda: extension.EvaluateJavaScript('window.__login_status'), 10)
def _Credentials(self, credentials_path):
"""Returns credentials from file."""
credentials_path = os.path.join(os.path.dirname(__file__),
credentials_path)
credentials = []
if os.path.isfile(credentials_path):
with open(credentials_path) as f:
credentials = f.read().rstrip().split(':')
return credentials
| Python | 0.999998 |
35a683738f00a67b88f26fdc2453a29777fe7f82 | Add raw outputter | salt/output/raw.py | salt/output/raw.py | '''
Print out the raw python data, the original outputter
'''
def ouput(data):
'''
Rather basic....
'''
print(data)
| Python | 0.002716 | |
ad284dfe63b827aaa1ca8d7353e1bf1a54ea4fdf | Change arduino board from first example from mega to nano | src/arduino_sourcecodes/src/arduino_serial_nodes/connect_arduino_nano1.py | src/arduino_sourcecodes/src/arduino_serial_nodes/connect_arduino_nano1.py | #!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import rospy
from rosserial_python import SerialClient, RosSerialServer
import multiprocessing
import sys
if __name__=="__main__":
rospy.init_node("serial_node_arduinoNano1")
rospy.loginfo("ROS Serial Python Node")
port_name = rospy.get_param('~port','/dev/ttyUSB0')
baud = int(rospy.get_param('~baud','57600'))
# TODO: should these really be global?
tcp_portnum = int(rospy.get_param('/rosserial_embeddedlinux/tcp_port', '11411'))
fork_server = rospy.get_param('/rosserial_embeddedlinux/fork_server', False)
# TODO: do we really want command line params in addition to parameter server params?
sys.argv = rospy.myargv(argv=sys.argv)
if len(sys.argv) == 2 :
port_name = sys.argv[1]
if len(sys.argv) == 3 :
tcp_portnum = int(sys.argv[2])
if port_name == "tcp" :
server = RosSerialServer(tcp_portnum, fork_server)
rospy.loginfo("Waiting for socket connections on port %d" % tcp_portnum)
try:
server.listen()
except KeyboardInterrupt:
rospy.loginfo("got keyboard interrupt")
finally:
rospy.loginfo("Shutting down")
for process in multiprocessing.active_children():
rospy.loginfo("Shutting down process %r", process)
process.terminate()
process.join()
rospy.loginfo("All done")
else : # Use serial port
rospy.loginfo("Connecting to %s at %d baud" % (port_name,baud) )
client = SerialClient(port_name, baud)
try:
client.run()
except KeyboardInterrupt:
pass
| Python | 0.000005 | |
07528bd828c28a18f3118481d1cdb9cf1287fd0b | Revert "don't track django.wsgi". It is part of the documentation. | railroad/sample/django.wsgi | railroad/sample/django.wsgi | # Copyright 2010 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# Describes the location of our Django configuration file. Unless you move the
# settings file this default should be fine
os.environ['DJANGO_SETTINGS_MODULE'] = 'railroad.settings'
# These should correspond to the paths of your railroad and nagcat
# installation
sys.path.append('/var/lib/nagcat/railroad')
sys.path.append('/var/lib/nagcat/python')
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| Python | 0 | |
e7dd12377a5f3a46019c5244de08a5cfc00f44db | add Anscombe's Quartet example | examples/glyphs/anscombe.py | examples/glyphs/anscombe.py |
import os
import numpy as np
import pandas as pd
from bokeh.objects import (
ColumnDataSource, GlyphRenderer, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.glyphs import Circle, Line
from bokeh import session
from StringIO import StringIO
data = """
I I II II III III IV IV 5
x y x y x y x y 5
10.0 8.04 10.0 9.14 10.0 7.46 8.0 6.58
8.0 6.95 8.0 8.14 8.0 6.77 8.0 5.76
13.0 7.58 13.0 8.74 13.0 12.74 8.0 7.71
9.0 8.81 9.0 8.77 9.0 7.11 8.0 8.84
11.0 8.33 11.0 9.26 11.0 7.81 8.0 8.47
14.0 9.96 14.0 8.10 14.0 8.84 8.0 7.04
6.0 7.24 6.0 6.13 6.0 6.08 8.0 5.25
4.0 4.26 4.0 3.10 4.0 5.39 19.0 12.50
12.0 10.84 12.0 9.13 12.0 8.15 8.0 5.56
7.0 4.82 7.0 7.26 7.0 6.42 8.0 7.91
5.0 5.68 5.0 4.74 5.0 5.73 8.0 6.89
"""
quartet = pd.read_fwf(StringIO(data), widths=[8]*8, header=[1,2], tupleize_cols=False)
circles_source = ColumnDataSource(
data = dict(
xi = quartet['I']['x'],
yi = quartet['I']['y'],
xii = quartet['II']['x'],
yii = quartet['II']['y'],
xiii = quartet['III']['x'],
yiii = quartet['III']['y'],
xiv = quartet['IV']['x'],
yiv = quartet['IV']['y'],
)
)
x = np.linspace(0,10, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=0, end=10)
ydr = Range1d(start=0, end=10)
def make_plot(title, xname, yname):
plot = Plot(x_range=xdr, y_range=ydr, data_sources=[lines_source, circles_source], title=title, width=400, height=400)
xaxis = LinearAxis(plot=plot, dimension=0, location="bottom", axis_line_alpha=0)
yaxis = LinearAxis(plot=plot, dimension=1, location="left", axis_line_alpha=0)
xgrid = Grid(plot=plot, dimension=0)
ygrid = Grid(plot=plot, dimension=1)
line_renderer = GlyphRenderer(
data_source = lines_source,
xdata_range = xdr,
ydata_range = ydr,
glyph = Line(x='x', y='y', line_color="navy", line_width=2, line_alpha=0.5),
)
plot.renderers.append(line_renderer)
circle_renderer = GlyphRenderer(
data_source = circles_source,
xdata_range = xdr,
ydata_range = ydr,
glyph = Circle(x=xname, y=yname, radius=6, fill_color="orange", line_color="orange", fill_alpha=0.5),
)
plot.renderers.append(circle_renderer)
return plot, (line_renderer, circle_renderer, xaxis, yaxis, xgrid, ygrid)
I, objsI = make_plot('I', 'xi', 'yi')
II, objsII = make_plot('II', 'xii', 'yii')
III, objsIII = make_plot('III', 'xiii', 'yiii')
IV, objsIV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]])
sess = session.HTMLFileSession("anscombe.html")
sess.add(lines_source, circles_source, xdr, ydr)
sess.add(*(objsI + objsII + objsIII + objsIV))
sess.add(grid, I, II, III, IV)
sess.plotcontext.children.append(grid)
sess.save(js="relative", css="relative", rootdir=os.path.abspath("."))
try:
import webbrowser
webbrowser.open("file://" + os.path.abspath("anscombe.html"))
except:
pass
| Python | 0.000024 | |
2d65862d77338dc503e34f389de1dc3bc553b6cd | Add DomainCaseRuleRun to admin site | corehq/apps/data_interfaces/admin.py | corehq/apps/data_interfaces/admin.py | from django.contrib import admin
from corehq.apps.data_interfaces.models import DomainCaseRuleRun
class DomainCaseRuleRunAdmin(admin.ModelAdmin):
list_display = [
'domain',
'started_on',
'finished_on',
'status',
'cases_checked',
'num_updates',
'num_closes',
'num_related_updates',
'num_related_closes',
]
search_fields = [
'domain',
]
ordering = ['-started_on']
admin.site.register(DomainCaseRuleRun, DomainCaseRuleRunAdmin)
| Python | 0 | |
017f276bb9544578417444c34ce2c04d87bb5852 | Fix zds #323 | markdown/extensions/emoticons.py | markdown/extensions/emoticons.py | # Emoticon extension for python-markdown
# Original version :
# https://gist.github.com/insin/815656/raw/a68516f1ffc03df465730b3ddef6de0a11b7e9a5/mdx_emoticons.py
#
# Patched by cgabard for supporting newer python-markdown version and extend for support multi-extensions
import re
import markdown
from markdown.inlinepatterns import Pattern
from markdown.util import etree
class EmoticonExtension(markdown.Extension):
def __init__ (self, configs):
self.config = {
'EMOTICONS': [{
":)" : "test.png",
}, 'A mapping from emoticon symbols to image names.'],
}
for key, value in configs.iteritems() :
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
self.md = md
EMOTICON_RE = r'(?=(^|\W))(?P<emoticon>%s)(?=(\W|$))' % '|'.join(
[re.escape(emoticon) for emoticon in self.getConfig('EMOTICONS').keys()])
md.inlinePatterns.add('emoticons', EmoticonPattern(EMOTICON_RE, self),">not_strong")
class EmoticonPattern(Pattern):
def __init__ (self, pattern, emoticons):
Pattern.__init__(self, pattern)
self.emoticons = emoticons
def handleMatch(self, m):
emoticon = m.group('emoticon')
el = etree.Element('img')
el.set('src', '%s' % (self.emoticons.getConfig('EMOTICONS')[emoticon],))
el.set('alt', emoticon)
return el
def makeExtension(configs=None) :
return EmoticonExtension(configs=configs)
| # Emoticon extension for python-markdown
# Original version :
# https://gist.github.com/insin/815656/raw/a68516f1ffc03df465730b3ddef6de0a11b7e9a5/mdx_emoticons.py
#
# Patched by cgabard for supporting newer python-markdown version and extend for support multi-extensions
import re
import markdown
from markdown.inlinepatterns import Pattern
from markdown.util import etree
class EmoticonExtension(markdown.Extension):
def __init__ (self, configs):
self.config = {
'EMOTICONS': [{
":)" : "test.png",
}, 'A mapping from emoticon symbols to image names.'],
}
for key, value in configs.iteritems() :
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
self.md = md
EMOTICON_RE = '(?P<emoticon>%s)' % '|'.join(
[re.escape(emoticon) for emoticon in self.getConfig('EMOTICONS').keys()])
md.inlinePatterns.add('emoticons', EmoticonPattern(EMOTICON_RE, self),">not_strong")
class EmoticonPattern(Pattern):
def __init__ (self, pattern, emoticons):
Pattern.__init__(self, pattern)
self.emoticons = emoticons
def handleMatch(self, m):
emoticon = m.group('emoticon')
el = etree.Element('img')
el.set('src', '%s' % (self.emoticons.getConfig('EMOTICONS')[emoticon],))
el.set('alt', emoticon)
return el
def makeExtension(configs=None) :
return EmoticonExtension(configs=configs)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.