commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
d1a93e356cc410a46495e239f7646d9171c1961e | Update config.py | fadhiilrachman/line-py | linepy/config.py | linepy/config.py | # -*- coding: utf-8 -*-
from akad.ttypes import ApplicationType
import re
class Config(object):
LINE_HOST_DOMAIN = 'https://legy-jp.line.naver.jp'
LINE_OBS_DOMAIN = 'https://obs-sg.line-apps.com'
LINE_TIMELINE_API = 'https://legy-jp.line.naver.jp/mh/api'
LINE_TIMELINE_MH = 'https://legy-jp.line.naver.jp/mh'
LINE_LOGIN_QUERY_PATH = '/api/v4p/rs'
LINE_AUTH_QUERY_PATH = '/api/v4/TalkService.do'
LINE_API_QUERY_PATH_FIR = '/S4'
LINE_POLL_QUERY_PATH_FIR = '/P4'
LINE_CALL_QUERY_PATH = '/V4'
LINE_CERTIFICATE_PATH = '/Q'
LINE_CHAN_QUERY_PATH = '/CH4'
LINE_SQUARE_QUERY_PATH = '/SQS1'
LINE_SHOP_QUERY_PATH = '/SHOP4'
CHANNEL_ID = {
'LINE_TIMELINE': '1341209850',
'LINE_WEBTOON': '1401600689',
'LINE_TODAY': '1518712866',
'LINE_STORE': '1376922440',
'LINE_MUSIC': '1381425814',
'LINE_SERVICES': '1459630796'
}
APP_TYPE = ApplicationType._VALUES_TO_NAMES[96]
APP_VER = '7.18.1'
CARRIER = '51089, 1-0'
SYSTEM_NAME = 'FDLRCN'
SYSTEM_VER = '11.2.5'
IP_ADDR = '8.8.8.8'
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def __init__(self):
self.APP_NAME = '%s\t%s\t%s\t%s' % (self.APP_TYPE, self.APP_VER, self.SYSTEM_NAME, self.SYSTEM_VER)
self.USER_AGENT = 'Line/%s' % self.APP_VER
| # -*- coding: utf-8 -*-
from akad.ttypes import ApplicationType
import re
class Config(object):
LINE_HOST_DOMAIN = 'https://gd2.line.naver.jp'
LINE_OBS_DOMAIN = 'https://obs-sg.line-apps.com'
LINE_TIMELINE_API = 'https://gd2.line.naver.jp/mh/api'
LINE_TIMELINE_MH = 'https://gd2.line.naver.jp/mh'
LINE_LOGIN_QUERY_PATH = '/api/v4p/rs'
LINE_AUTH_QUERY_PATH = '/api/v4/TalkService.do'
LINE_API_QUERY_PATH_FIR = '/S4'
LINE_POLL_QUERY_PATH_FIR = '/P4'
LINE_CALL_QUERY_PATH = '/V4'
LINE_CERTIFICATE_PATH = '/Q'
LINE_CHAN_QUERY_PATH = '/CH4'
LINE_SQUARE_QUERY_PATH = '/SQS1'
LINE_SHOP_QUERY_PATH = '/SHOP4'
CHANNEL_ID = {
'LINE_TIMELINE': '1341209850',
'LINE_WEBTOON': '1401600689',
'LINE_TODAY': '1518712866',
'LINE_STORE': '1376922440',
'LINE_MUSIC': '1381425814',
'LINE_SERVICES': '1459630796'
}
APP_TYPE = ApplicationType._VALUES_TO_NAMES[96]
APP_VER = '7.18.1'
CARRIER = '51089, 1-0'
SYSTEM_NAME = 'FDLRCN'
SYSTEM_VER = '11.2.5'
IP_ADDR = '8.8.8.8'
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def __init__(self):
self.APP_NAME = '%s\t%s\t%s\t%s' % (self.APP_TYPE, self.APP_VER, self.SYSTEM_NAME, self.SYSTEM_VER)
self.USER_AGENT = 'Line/%s' % self.APP_VER
| bsd-3-clause | Python |
8b2efb3e77b2a034db29767439f1530eeebe93e1 | Add onload and DOM content-load time to loading benchmark. | timopulkkinen/BubbleFish,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,Just-D/chromium-1,ChromiumWebApps/chromium,ltilve/chromium,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,timopulkkinen/BubbleFish,jaruba/chromium.src,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,Jonekee/chromium.src,nacl-webkit/chrome_deps,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,anirudhSK/chromium,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,zcbenz/cefode-chromium,littlstar/chromium.src,Jonekee/chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,patrickm/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,ltilve/chromium,ondra-novak/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,jaruba/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,chuan9/chromium-crosswalk,M4sse/chromium.src,hujiajie/pa-chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dednal/chromium.src,hujiajie/pa-chromium,zcbenz/cefode-chromium,axinging/chromium-crosswalk,anirudhSK/chromium,dushu1203/chromium.src,Fireblend/chromium-crosswalk,zcbenz/cefode-chromium,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,M4sse/chromium.src,ondra-novak/chromium.src,mogoweb/chromium-crosswalk,markYoungH/chromium.src,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,markYoungH/chromium.src,ltilve/chromium,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,hujiajie/pa-chromium,nacl-webkit/chrome_deps,littlstar/chromium.src,zcbenz/cefode-chromium,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,timopulkkinen/BubbleFish,dushu1203/chromium.src,timopulkkinen/BubbleFish,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,ltilve/chromium,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,ondra-novak/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,Chilledheart/chromium,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,patrickm/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,M4sse/chromium.src,M4sse/chromium.src,patrickm/chromium.src,zcbenz/cefode-chromium,dednal/chromium.src,dednal/chromium.src,anirudhSK/chromium,anirudhSK/chromium,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,littlstar/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,ChromiumWebApps/chromium,hujiajie/pa-chromium,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,dednal/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,anirudhSK/chromium,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,markYoungH/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,patrickm/chromium.src,axinging/chromium-crosswalk,zcbenz/cefode-chromium,hgl888/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,jaruba/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,M4sse/chromium.src,jaruba/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,dushu1203/chromium.src,ltilve/chromium,nacl-webkit/chrome_deps,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,Jonekee/chromium.src,mogoweb/chromium-crosswalk,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,zcbenz/cefode-chromium,jaruba/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,timopulkkinen/BubbleFish,timopulkkinen/BubbleFish,ondra-novak/chromium.src,anirudhSK/chromium,dednal/chromium.src,dushu1203/chromium.src,jaruba/chromium.src,hujiajie/pa-chromium,pozdnyakov/chromium-crosswalk,Just-D/chromium-1,dushu1203/chromium.src,patrickm/chromium.src,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,Chilledheart/chromium,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,dushu1203/chromium.src | tools/perf/perf_tools/loading_benchmark.py | tools/perf/perf_tools/loading_benchmark.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from telemetry import multi_page_benchmark
class LoadingBenchmark(multi_page_benchmark.MultiPageBenchmark):
@property
def results_are_the_same_on_every_page(self):
return False
def WillNavigateToPage(self, page, tab):
tab.StartTimelineRecording()
def MeasurePage(self, page, tab, results):
# In current telemetry tests, all tests wait for DocumentComplete state.
#
# TODO(nduca): when crbug.com/168431 is fixed, modify the page sets to
# recognize loading as a toplevel action.
tab.StopTimelineRecording()
load_timings = tab.EvaluateJavaScript("window.performance.timing")
load_time_ms = (
float(load_timings['loadEventStart']) -
load_timings['navigationStart'])
dom_content_loaded_time_ms = (
float(load_timings['domContentLoadedEventStart']) -
load_timings['navigationStart'])
results.Add('load_time', 'ms', load_time_ms)
results.Add('dom_content_loaded_time', 'ms',
dom_content_loaded_time_ms)
events = tab.timeline_model.GetAllEvents()
events_by_name = collections.defaultdict(list)
for e in events:
events_by_name[e.name].append(e)
for key, group in events_by_name.items():
times = [e.self_time_ms for e in group]
total = sum(times)
biggest_jank = max(times)
results.Add(key, 'ms', total)
results.Add(key + '_max', 'ms', biggest_jank)
results.Add(key + '_avg', 'ms', total / len(times))
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from telemetry import multi_page_benchmark
class LoadingBenchmark(multi_page_benchmark.MultiPageBenchmark):
@property
def results_are_the_same_on_every_page(self):
return False
def WillNavigateToPage(self, page, tab):
tab.StartTimelineRecording()
def MeasurePage(self, page, tab, results):
# In current telemetry tests, all tests wait for DocumentComplete state.
#
# TODO(nduca): when crbug.com/168431 is fixed, modify the page sets to
# recognize loading as a toplevel action.
tab.StopTimelineRecording()
events = tab.timeline_model.GetAllEvents()
events_by_name = collections.defaultdict(list)
for e in events:
events_by_name[e.name].append(e)
for key, group in events_by_name.items():
times = [e.self_time_ms for e in group]
total = sum(times)
biggest_jank = max(times)
results.Add(key, 'ms', total)
results.Add(key + '_max', 'ms', biggest_jank)
results.Add(key + '_avg', 'ms', total / len(times))
| bsd-3-clause | Python |
0d81d4308ddd6255d2bc4ec83a00c458e24a3ae1 | format is not always best to use | geometalab/osmaxx,geometalab/drf-utm-zone-info,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/drf-utm-zone-info,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx | conversion_service/converters/gis_converter/extract/db_to_format/extract.py | conversion_service/converters/gis_converter/extract/db_to_format/extract.py | import subprocess
import os
from converters.converter_settings import OSMAXX_CONVERSION_SERVICE
FORMATS = {
'fgdb': {
'ogr_name': 'FileGDB',
'extension': '.gdb',
'extraction_options': [],
},
'gpkg': {
'ogr_name': 'GPKG',
'extension': '.gpkg',
'extraction_options': [],
},
'shp': {
'ogr_name': 'ESRI Shapefile',
'extension': '.shp',
'extraction_options': [],
},
'spatialite': {
'ogr_name': 'SQLite',
'extension': '.sqlite',
'extraction_options': ['-dsco', 'SPATIALITE=YES', '-nlt', 'GEOMETRY'] # FIXME: Remove or change -nlt because of geometry reading problems
},
}
def extract_to(to_format, output_dir, base_filename):
conversion_service_settings = OSMAXX_CONVERSION_SERVICE
db_name = conversion_service_settings['GIS_CONVERSION_DB_NAME']
db_user = conversion_service_settings['GIS_CONVERSION_DB_USER']
db_pass = conversion_service_settings['GIS_CONVERSION_DB_PASSWORD']
to_format_options = FORMATS[to_format]
extraction_options = to_format_options['extraction_options']
ogr_name = to_format_options['ogr_name']
extension = to_format_options['extension']
output_path = os.path.join(output_dir, base_filename + extension)
ogr2ogr_command = [
'ogr2ogr', '-f', str(ogr_name), output_path,
'PG:dbname={dbname} user={user} password={password} schemas=view_osmaxx'.format(
dbname=db_name,
user=db_user,
password=db_pass,
),
]
ogr2ogr_command += extraction_options
print(subprocess.check_output(ogr2ogr_command))
return output_path
| import subprocess
import os
from converters.converter_settings import OSMAXX_CONVERSION_SERVICE
FORMATS = {
'fgdb': {
'ogr_name': 'FileGDB',
'extension': '.gdb',
'extraction_options': [],
},
'gpkg': {
'ogr_name': 'GPKG',
'extension': '.gpkg',
'extraction_options': [],
},
'shp': {
'ogr_name': 'ESRI Shapefile',
'extension': '.shp',
'extraction_options': [],
},
'spatialite': {
'ogr_name': 'SQLite',
'extension': '.sqlite',
'extraction_options': ['-dsco', 'SPATIALITE=YES', '-nlt', 'GEOMETRY'] # FIXME: Remove or change -nlt because of geometry reading problems
},
}
def extract_to(to_format, output_dir, base_filename):
conversion_service_settings = OSMAXX_CONVERSION_SERVICE
db_name = conversion_service_settings['GIS_CONVERSION_DB_NAME']
db_user = conversion_service_settings['GIS_CONVERSION_DB_USER']
db_pass = conversion_service_settings['GIS_CONVERSION_DB_PASSWORD']
to_format_options = FORMATS[to_format]
extraction_options = to_format_options['extraction_options']
ogr_name = to_format_options['ogr_name']
extension = to_format_options['extension']
output_path = os.path.join(output_dir, base_filename + extension)
ogr2ogr_command = [
'ogr2ogr', '-f', '{0}'.format(ogr_name), output_path,
'PG:dbname={dbname} user={user} password={password} schemas=view_osmaxx'.format(
dbname=db_name,
user=db_user,
password=db_pass,
),
]
ogr2ogr_command += extraction_options
print(subprocess.check_output(ogr2ogr_command))
return output_path
| mit | Python |
8aa1bfeb8182b5c4d2cb07c090a93264d343b413 | Bump to version 0.42.4 | reubano/tabutils,reubano/tabutils,reubano/meza,reubano/tabutils,reubano/meza,reubano/meza | meza/__init__.py | meza/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza
~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from datetime import datetime as dt
from os import path as p
__version__ = '0.42.4'
__title__ = 'meza'
__package_name__ = 'meza'
__author__ = 'Reuben Cummings'
__description__ = 'A Python toolkit for processing tabular data'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
BOM = '\ufeff'
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
DATA_DIR = p.join(PARENT_DIR, 'data', 'test')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza
~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from datetime import datetime as dt
from os import path as p
__version__ = '0.42.3'
__title__ = 'meza'
__package_name__ = 'meza'
__author__ = 'Reuben Cummings'
__description__ = 'A Python toolkit for processing tabular data'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
BOM = '\ufeff'
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
DATA_DIR = p.join(PARENT_DIR, 'data', 'test')
| mit | Python |
38cfbf4a98300d5489e4d03584a44e77610e483f | Bump version | l04m33/moses,l04m33/moses | moses/version.py | moses/version.py | __version__ = '0.10.0'
__all__ = ['__version__']
| __version__ = '0.9.0'
__all__ = ['__version__']
| mit | Python |
92c864bd43277ab4290b2d143eb7dc8708f6d878 | Add width parameter to figure and figsize utilities. | tonysyu/mpltools,matteoicardi/mpltools | mpltools/util.py | mpltools/util.py | import matplotlib.pyplot as plt
__all__ = ['figure', 'figsize']
def figure(aspect_ratio=1.3, scale=1, width=None, *args, **kwargs):
"""Return matplotlib figure window.
Parameters
----------
aspect_ratio : float
Aspect ratio, width / height, of figure.
scale : float
Scale default size of the figure.
width : float
Figure width in inches. If None, default to rc parameters.
See Also
--------
`figsize`
"""
assert 'figsize' not in kwargs
size = figsize(aspect_ratio=aspect_ratio, scale=scale, width=width)
return plt.figure(figsize=size, *args, **kwargs)
def figsize(aspect_ratio=1.3, scale=1, width=None):
"""Return figure size (width, height) in inches.
Parameters
----------
aspect_ratio : float
Aspect ratio, width / height, of figure.
scale : float
Scale default size of the figure.
width : float
Figure width in inches. If None, default to rc parameters.
"""
if width is None:
width, h = plt.rcParams['figure.figsize']
height = width / aspect_ratio
size = (width * scale, height * scale)
return size
| import matplotlib.pyplot as plt
__all__ = ['figure', 'figsize']
def figure(aspect_ratio=1.3, scale=1, *args, **kwargs):
"""Return matplotlib figure window.
Parameters
----------
aspect_ratio : float
Aspect ratio, width / height, of figure.
scale : float
Scale default size of the figure.
See Also
--------
`figsize`
"""
assert 'figsize' not in kwargs
size = figsize(aspect_ratio=aspect_ratio, scale=scale)
return plt.figure(figsize=size, *args, **kwargs)
def figsize(aspect_ratio=1.3, scale=1):
"""Return figure size (width, height) in inches.
Parameters
----------
aspect_ratio : float
Aspect ratio, width / height, of figure.
scale : float
Scale default size of the figure.
"""
width, h = plt.rcParams['figure.figsize']
height = width / aspect_ratio
size = (width * scale, height * scale)
return size
| bsd-3-clause | Python |
10e3ff7c77e80f8680d44713332bfcf9745f7320 | fix typo in _compat | jmgc/myhdl-numeric,jmgc/myhdl-numeric,jmgc/myhdl-numeric | myhdl/_compat.py | myhdl/_compat.py | import sys
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = (str, unicode)
integer_types = (int,)
long = int
import builtins
else:
string_types = (str,)
integer_types = (int, long)
long = long
import __builtin__ as builtins
| import sys
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = (str, unicode)
integer_types = (int,)
long = int
import builtins
else:
str_types = (str,)
integer_types = (int, long)
long = long
import __builtin__ as builtins
| lgpl-2.1 | Python |
f604979e94fab59eb1b422d4e62ad62d3360c2ac | Use admin interface by default | on-server/on-server-api,on-server/on-server-api | onserver/urls.py | onserver/urls.py | # -*- coding: utf-8 -*-
"""onserver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^', admin.site.urls),
]
| # -*- coding: utf-8 -*-
"""onserver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| mit | Python |
11843011c8cacc4295c944ff98054d32b12edf08 | Print out only ball data | vickenty/ookoobah,vickenty/ookoobah,vickenty/ookoobah,vickenty/ookoobah,vickenty/ookoobah | ookoobah/core.py | ookoobah/core.py | class Grid(dict):
pass
class Block(object):
def act(self, ball):
raise NotImplemented
class Wall(Block):
def act(self, ball):
ball.direction = (
-ball.direction[0],
-ball.direction[1],
)
class Mirror(Block):
SLOPE_BACKWARD = 1
SLOPE_FORWARD = -1
def __init__(self, slope=SLOPE_BACKWARD):
self.slope = slope
def act(self, ball):
ball.direction = (
ball.direction[1] * self.slope,
ball.direction[0] * self.slope,
)
class Ball(object):
DIR_RIGHT = (1, 0)
DIR_DOWN = (0, 1)
DIR_LEFT = (-1, 0)
DIR_UP = (0, -1)
def __init__(self, direction=DIR_RIGHT, pos=(0, 0)):
self.direction = direction
self.pos = pos
pass
def __str__(self):
return "<Ball: pos=%s, direction=%s>" % (self.pos, self.direction)
def move(self):
self.pos = (
self.pos[0] + self.direction[0],
self.pos[1] + self.direction[1],
)
class Game(object):
def __init__(self):
self.step_n = 0
self.grid = Grid()
self.ball = Ball()
def __str__(self):
return "<Game: step_n=%s, grid=%s, ball=%s>" % (self.step_n, self.grid, self.ball)
def step(self):
self.ball.move()
block = self.grid.get(self.ball.pos)
if block:
block.act(self.ball)
self.step_n += 1
if __name__ == "__main__":
game = Game()
game.grid[2, 0] = Mirror()
game.grid[3, 0] = Wall()
for n in range(10):
print game.ball
game.step()
| class Grid(dict):
pass
class Block(object):
def act(self, ball):
raise NotImplemented
class Wall(Block):
def act(self, ball):
ball.direction = (
-ball.direction[0],
-ball.direction[1],
)
class Mirror(Block):
SLOPE_BACKWARD = 1
SLOPE_FORWARD = -1
def __init__(self, slope=SLOPE_BACKWARD):
self.slope = slope
def act(self, ball):
ball.direction = (
ball.direction[1] * self.slope,
ball.direction[0] * self.slope,
)
class Ball(object):
DIR_RIGHT = (1, 0)
DIR_DOWN = (0, 1)
DIR_LEFT = (-1, 0)
DIR_UP = (0, -1)
def __init__(self, direction=DIR_RIGHT, pos=(0, 0)):
self.direction = direction
self.pos = pos
pass
def __str__(self):
return "<Ball: pos=%s, direction=%s>" % (self.pos, self.direction)
def move(self):
self.pos = (
self.pos[0] + self.direction[0],
self.pos[1] + self.direction[1],
)
class Game(object):
def __init__(self):
self.step_n = 0
self.grid = Grid()
self.ball = Ball()
def __str__(self):
return "<Game: step_n=%s, grid=%s, ball=%s>" % (self.step_n, self.grid, self.ball)
def step(self):
self.ball.move()
block = self.grid.get(self.ball.pos)
if block:
block.act(self.ball)
self.step_n += 1
if __name__ == "__main__":
game = Game()
game.grid[2, 0] = Mirror()
game.grid[3, 0] = Wall()
for n in range(10):
print game
game.step()
| mit | Python |
77c1d778856874b09b087b4d90fd9ac35163340c | include aliased internal_council_id in station object | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/api/pollingstations.py | polling_stations/api/pollingstations.py | from rest_framework.decorators import list_route
from rest_framework.mixins import ListModelMixin
from rest_framework.serializers import CharField
from rest_framework.viewsets import GenericViewSet
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from pollingstations.models import PollingStation
class PollingStationSerializer(GeoFeatureModelSerializer):
station_id = CharField(source='internal_council_id', read_only=True)
class Meta:
model = PollingStation
geo_field = 'location'
fields = ('council', 'station_id', 'postcode', 'address', 'location')
class PollingStationViewSet(GenericViewSet, ListModelMixin):
queryset = PollingStation.objects.all()
serializer_class = PollingStationSerializer
@list_route(url_path='geo')
def geo(self, request, format=None):
return self.list(request, format=None)
| from rest_framework.decorators import list_route
from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import GenericViewSet
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from pollingstations.models import PollingStation
class PollingStationSerializer(GeoFeatureModelSerializer):
class Meta:
model = PollingStation
geo_field = 'location'
fields = ('council', 'postcode', 'address', 'location')
class PollingStationViewSet(GenericViewSet, ListModelMixin):
queryset = PollingStation.objects.all()
serializer_class = PollingStationSerializer
@list_route(url_path='geo')
def geo(self, request, format=None):
return self.list(request, format=None)
| bsd-3-clause | Python |
97650cec66dc5779fac38d127b8c9449f1fc5eaf | Add a script for cleaning crawled price data. | eliangcs/pystock-crawler,hsd315/pystock-crawler | scripts/cleanup.py | scripts/cleanup.py | #!/usr/bin/env python
import argparse
import csv
def parse_args():
parser = argparse.ArgumentParser(description='Clean up the crawled CSV file.')
parser.add_argument('data_type', metavar='DATA_TYPE', type=unicode,
choices=('reports', 'prices'),
help="what's in the input file, 'reports' or 'prices'?")
parser.add_argument('input_file', metavar='INPUT_FILE', type=unicode,
help='input CSV file')
parser.add_argument('-o', metavar='OUTPUT_FILE', type=unicode,
help='output CSV file, overwrite INPUT_FILE if not specified')
return parser.parse_args()
def parse_csv(file_path):
with open(file_path, 'rb') as f:
reader = csv.reader(f)
headers = reader.next()
for row in reader:
item = {}
for i, value in enumerate(row):
header = headers[i]
item[header] = value
yield item
def item_cmp_report(a, b):
cmp_sym = cmp(a['symbol'], b['symbol'])
if cmp_sym == 0:
return cmp(a['end_date'], b['end_date'])
return cmp_sym
def item_cmp_price(a, b):
cmp_sym = cmp(a['symbol'], b['symbol'])
if cmp_sym == 0:
return cmp(a['date'], b['date'])
return cmp_sym
def dict_to_list(a, keys):
result = []
for k in keys:
result.append(a[k])
return result
def write_csv(items, file_path, headers):
with open(file_path, 'wb') as f:
writer = csv.writer(f)
writer.writerow(headers)
for item in items:
row = dict_to_list(item, headers)
writer.writerow(row)
CMPS = {
'reports': item_cmp_report,
'prices': item_cmp_price
}
HEADERS = {
'reports': (
'symbol', 'doc_type', 'amend', 'end_date', 'period_focus',
'revenues', 'net_income', 'eps_basic', 'eps_diluted', 'dividend',
'assets', 'cash', 'equity'
),
'prices': (
'symbol', 'date', 'open', 'high', 'low', 'close', 'volume', 'adj_close'
)
}
def main():
args = parse_args()
headers = HEADERS[args.data_type]
items = parse_csv(args.input_file)
items = sorted(items, cmp=CMPS[args.data_type])
write_csv(items, args.o or args.input_file, headers)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import argparse
import csv
def parse_args():
parser = argparse.ArgumentParser(description='Clean up the crawled CSV file.')
parser.add_argument('input_file', metavar='INPUT_FILE', type=unicode,
help='input CSV file')
parser.add_argument('-o', metavar='OUTPUT_FILE', type=unicode,
help='output CSV file, overwrite INPUT_FILE if not specified')
return parser.parse_args()
def parse_csv(file_path):
with open(file_path, 'rb') as f:
reader = csv.reader(f)
headers = reader.next()
for row in reader:
item = {}
for i, value in enumerate(row):
header = headers[i]
item[header] = value
yield item
def item_cmp(a, b):
cmp_sym = cmp(a['symbol'], b['symbol'])
if cmp_sym == 0:
return cmp(a['end_date'], b['end_date'])
return cmp_sym
def sort_items(items):
return sorted(items, cmp=item_cmp)
def dict_to_list(a, keys):
result = []
for k in keys:
result.append(a[k])
return result
def write_csv(items, file_path):
headers = ['symbol', 'doc_type', 'amend', 'end_date', 'period_focus',
'revenues', 'net_income', 'eps_basic', 'eps_diluted', 'dividend',
'assets', 'cash', 'equity']
with open(file_path, 'wb') as f:
writer = csv.writer(f)
writer.writerow(headers)
for item in items:
row = dict_to_list(item, headers)
writer.writerow(row)
def main():
args = parse_args()
items = parse_csv(args.input_file)
items = sort_items(items)
write_csv(items, args.o or args.input_file)
if __name__ == '__main__':
main()
| mit | Python |
1268477f607adee2da29f74a07c3420d64b47d92 | print calendars | ndd365/showup,ndd365/showup,ndd365/showup | scrapers/print_calendars.py | scrapers/print_calendars.py | from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from oauth2client.service_account import ServiceAccountCredentials
import pprint
scopes = 'https://www.googleapis.com/auth/calendar'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'client_secret.json', scopes)
http_auth = credentials.authorize(Http())
CAL = build('calendar', 'v3', http=credentials.authorize(Http()))
def print_calendars():
page_token = None
while True:
calendar_list = CAL.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
pprint.pprint(calendar_list_entry)
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
print_calendars() | mit | Python | |
de844267e778df896bc536b98b123e27d9934feb | load key from env | piccolbo/rightload,piccolbo/rightload | feature_extraction.py | feature_extraction.py | from content_extraction import get_text
from basilica import Connection
from joblib import Memory
from nltk.data import load as nltk_load
from numpy import array
from os import environ
_sent_detector = nltk_load("tokenizers/punkt/english.pickle")
_memory = Memory(cachedir="feature-cache-basilica", verbose=1, bytes_limit=10 ** 9)
_memory.reduce_size()
@_memory.cache(ignore=["entry"])
def entry2mat(entry, url):
return _text2mat(get_text(entry=entry, url=url))
def url2mat(url):
return entry2mat(None, url)
def text2sentences(text, max_sentences=300): # limit to cap latency
return _sent_detector.tokenize(text.strip())[:max_sentences]
def _text2mat(text):
sentences = text2sentences(text)
if len(sentences) == 0:
raise FailedExtraction
bkey = environ["BASILICA_KEY"]
assert bkey
if sentences:
with Connection(bkey) as conn:
return array(list(conn.embed_sentences(sentences)))
else:
raise FailedExtraction
class FailedExtraction(Exception):
pass
| from content_extraction import get_text
from basilica import Connection
from joblib import Memory
from nltk.data import load as nltk_load
from numpy import array
_sent_detector = nltk_load("tokenizers/punkt/english.pickle")
_memory = Memory(cachedir="feature-cache-basilica", verbose=1, bytes_limit=10 ** 9)
_memory.reduce_size()
@_memory.cache(ignore=["entry"])
def entry2mat(entry, url):
return _text2mat(get_text(entry=entry, url=url))
def url2mat(url):
return entry2mat(None, url)
def text2sentences(text, max_sentences=300): # limit to cap latency
return _sent_detector.tokenize(text.strip())[:max_sentences]
def _text2mat(text):
sentences = text2sentences(text)
if len(sentences) == 0:
raise FailedExtraction
bkey = "" # your basilica key here
if sentences:
with Connection(bkey) as conn:
return array(list(conn.embed_sentences(sentences)))
else:
raise FailedExtraction
class FailedExtraction(Exception):
pass
| agpl-3.0 | Python |
0db58f77e2c14eadc66403f666b39d3402461a06 | Add utils module | fedora-infra/python-fedora | fedora/tg/__init__.py | fedora/tg/__init__.py | '''
Functions and classes to help build a Fedora Service.
'''
__all__ = ('client', 'json', 'tg1utils', 'tg2utils', 'widgets',
'identity', 'utils', 'visit')
| '''
Functions and classes to help build a Fedora Service.
'''
__all__ = ('client', 'json', 'tg1utils', 'tg2utils', 'widgets',
'identity', 'visit')
| lgpl-2.1 | Python |
ab58a28f9b56a90df0d623c4381ea4e0e55373ef | extend installation script to create an empty histogram in there is not already a histogram present. | benjiyo/computer_usage_statistics,benjiyo/computer_usage_statistics | installation.py | installation.py | # installation.py
# Replaces all occurences of "PATH_TO_REPOSITORY/" with the path to the current working folder.
import os
cwd = os.getcwd() # Current Working Directory
# Set up the paths
for filename in ["bash_script", "python_script.py", "print_to_terminal.py"]:
print filename
with open(filename, 'r') as myfile:
text = myfile.read()
print text
new_text = text.replace("PATH_TO_REPOSITORY", cwd)
print new_text
with open(filename, 'w') as myfile:
myfile.write(new_text)
# Initialize an empty histogram if there is none
if not os.path.isfile(cwd + '/histogram.txt'):
empty_histogram = "0\n"*24
with open('histogram.txt', 'w') as myfile:
myfile.write(empty_histogram)
| # installation.py
# Replaces all occurences of "PATH_TO_REPOSITORY/" with the path to the current working folder.
import os
cwd = os.getcwd()
for filename in ["bash_script", "python_script.py", "print_to_terminal.py"]:
print filename
with open(filename, 'r') as myfile:
text = myfile.read()
print text
new_text = text.replace("PATH_TO_REPOSITORY", cwd)
print new_text
with open(filename, 'w') as myfile:
myfile.write(new_text)
# Todo: initialize an empty histogram
| mit | Python |
e5c6b9ccd6969828c8d69c7655df71aed9cea2eb | Fix demo models. | novafloss/django-mail-factory,novafloss/django-mail-factory | demo/demo/demo_app/models.py | demo/demo/demo_app/models.py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Article(models.Model):
user = models.ForeignKey(User)
content = models.CharField('text', max_length=100)
| from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Article(models.Model):
user = models.ForeignKey(User)
content = models.CharField('text')
| bsd-3-clause | Python |
0241e253c68ca6862a3da26d29a649f65c27ae36 | Use compat for unicode import | Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger | demos/chatroom/experiment.py | demos/chatroom/experiment.py | """Coordination chatroom game."""
import dallinger as dlgr
from dallinger.compat import unicode
from dallinger.config import get_config
config = get_config()
def extra_settings():
config.register('network', unicode)
config.register('n', int)
class CoordinationChatroom(dlgr.experiments.Experiment):
"""Define the structure of the experiment."""
def __init__(self, session):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
self.experiment_repeats = 1
self.num_participants = config.get('n')
self.initial_recruitment_size = self.num_participants
self.quorum = self.num_participants
self.config = config
if not self.config.ready:
self.config.load_config()
self.setup()
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(
dlgr.networks,
self.config.get('network')
)
return class_(max_size=self.num_participants)
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return dlgr.nodes.Agent(network=network, participant=participant)
| """Coordination chatroom game."""
import dallinger as dlgr
from dallinger.config import get_config
try:
unicode = unicode
except NameError: # Python 3
unicode = str
config = get_config()
def extra_settings():
config.register('network', unicode)
config.register('n', int)
class CoordinationChatroom(dlgr.experiments.Experiment):
"""Define the structure of the experiment."""
def __init__(self, session):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
self.experiment_repeats = 1
self.num_participants = config.get('n')
self.initial_recruitment_size = self.num_participants
self.quorum = self.num_participants
self.config = config
if not self.config.ready:
self.config.load_config()
self.setup()
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(
dlgr.networks,
self.config.get('network')
)
return class_(max_size=self.num_participants)
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return dlgr.nodes.Agent(network=network, participant=participant)
| mit | Python |
bfea2c419e01daffa79d8c267f09a464c88942bf | remove call to deprecated tostring method. | EichlerLab/pacbio_variant_caller,EichlerLab/pacbio_variant_caller,EichlerLab/pacbio_variant_caller,EichlerLab/pacbio_variant_caller,EichlerLab/pacbio_variant_caller | scripts/AnnotateGapBed.py | scripts/AnnotateGapBed.py | #!/usr/bin/env python
import sys
import argparse
from Bio import SeqIO
from Bio import Seq
from Bio import SeqRecord
if (len(sys.argv) < 3):
print "usage: AnnotateGapBed.py bedIn bedOut annotation.out"
sys.exit(0)
ap = argparse.ArgumentParser(description="Print gap sequences to fasta files.")
ap.add_argument("bedin", help="Input bed file.")
ap.add_argument("bedout", help="Output bed file.")
ap.add_argument("dotout", help="RepeatMasker file.out annotation file.")
ap.add_argument("maskedout", help="Masked output file.", default=None)
ap.add_argument("--seqidx", help="Index of gap sequence (6)", default=6, type=int)
args = ap.parse_args()
bedFileIn = open(args.bedin, 'r')
bedFileOut = open(args.bedout, 'w')
annotations = {}
dotoutFile = open(args.dotout, 'r')
maskedDict = {}
if (args.maskedout is not None):
maskedSequences = open(args.maskedout)
maskedDict = SeqIO.to_dict(SeqIO.parse(maskedSequences, "fasta"))
for i in range(3):
dotoutFile.readline()
for line in dotoutFile:
vals = line.split()
name = vals[4]
rep = vals[9]
pre = vals[11]
post = vals[13]
pre = int(pre.replace("(","").replace(")",""))
post = int(post.replace("(","").replace(")",""))
if (pre+post < 30):
rep = rep + ":FULL"
else:
rep = rep + ":INC"
if (name not in annotations):
annotations[name] = []
annotations[name].append(rep)
for line in bedFileIn:
vals = line.split()
name = '/'.join(vals[0:3])
if (name in annotations):
annotation = ';'.join(annotations[name])
else:
annotation = "NONE"
repeatContent = ""
if (name in maskedDict):
vals[5] = str(maskedDict[name].seq)
repeatContent = "\t{:2.2f}".format(float(vals[5].count("a") + vals[5].count("c") + vals[5].count("g") + vals[5].count("t"))/len(vals[5]))
line = '\t'.join(vals[0:args.seqidx]) + '\t' + annotation + '\t' + '\t'.join(vals[args.seqidx:]) + repeatContent + '\n'
bedFileOut.write(line)
bedFileOut.close()
| #!/usr/bin/env python
import sys
import argparse
from Bio import SeqIO
from Bio import Seq
from Bio import SeqRecord
if (len(sys.argv) < 3):
print "usage: AnnotateGapBed.py bedIn bedOut annotation.out"
sys.exit(0)
ap = argparse.ArgumentParser(description="Print gap sequences to fasta files.")
ap.add_argument("bedin", help="Input bed file.")
ap.add_argument("bedout", help="Output bed file.")
ap.add_argument("dotout", help="RepeatMasker file.out annotation file.")
ap.add_argument("maskedout", help="Masked output file.", default=None)
ap.add_argument("--seqidx", help="Index of gap sequence (6)", default=6, type=int)
args = ap.parse_args()
bedFileIn = open(args.bedin, 'r')
bedFileOut = open(args.bedout, 'w')
annotations = {}
dotoutFile = open(args.dotout, 'r')
maskedDict = {}
if (args.maskedout is not None):
maskedSequences = open(args.maskedout)
maskedDict = SeqIO.to_dict(SeqIO.parse(maskedSequences, "fasta"))
for i in range(3):
dotoutFile.readline()
for line in dotoutFile:
vals = line.split()
name = vals[4]
rep = vals[9]
pre = vals[11]
post = vals[13]
pre = int(pre.replace("(","").replace(")",""))
post = int(post.replace("(","").replace(")",""))
if (pre+post < 30):
rep = rep + ":FULL"
else:
rep = rep + ":INC"
if (name not in annotations):
annotations[name] = []
annotations[name].append(rep)
for line in bedFileIn:
vals = line.split()
name = '/'.join(vals[0:3])
if (name in annotations):
annotation = ';'.join(annotations[name])
else:
annotation = "NONE"
repeatContent = ""
if (name in maskedDict):
vals[5] = maskedDict[name].seq.tostring()
repeatContent = "\t{:2.2f}".format(float(vals[5].count("a") + vals[5].count("c") + vals[5].count("g") + vals[5].count("t"))/len(vals[5]))
line = '\t'.join(vals[0:args.seqidx]) + '\t' + annotation + '\t' + '\t'.join(vals[args.seqidx:]) + repeatContent + '\n'
bedFileOut.write(line)
bedFileOut.close()
| mit | Python |
8033b00ebbcb8e294f47ee558e76ee260ec18d2b | Remove libfreetype2, which should have been omitted and was breaking the scripts | servo/servo-org-stats,servo/servo-org-stats,servo/servo-org-stats | orglog-config.py | orglog-config.py | org = "servo"
ignore_repos = ["skia", "skia-snapshots", "cairo", "libpng", "libcss",
"libhubbub", "libparserutils", "libwapcaplet", "pixman",
"libfreetype2"]
count_forks = ["glutin","rust-openssl"]
# Path to where we'll dump the bare checkouts. Must end in /
clones_dir = "repos/"
# Path to the concatenated log
log_path = "log.txt"
# Nuke the clones_dir afterwards?
destroy_clones = True
| org = "servo"
ignore_repos = ["skia", "skia-snapshots", "cairo", "libpng", "libcss",
"libhubbub", "libparserutils", "libwapcaplet", "pixman"]
count_forks = ["glutin","rust-openssl"]
# Path to where we'll dump the bare checkouts. Must end in /
clones_dir = "repos/"
# Path to the concatenated log
log_path = "log.txt"
# Nuke the clones_dir afterwards?
destroy_clones = True
| mit | Python |
8f29d63b782e7a4c9b53210b0518af0ee95beea8 | Fix thanks to Joel for spotting it | bliksemlabs/rrrr | web-uwsgi.py | web-uwsgi.py | import uwsgi
import zmq
import struct
COMMON_HEADERS = [('Content-Type', 'text/plain'), ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', 'Requested-With,Content-Type')]
context = zmq.Context()
def light(environ, start_response):
if environ['PATH_INFO'] in ['/favicon.ico']:
start_response('404 NOK', COMMON_HEADERS)
return ''
qstring = environ['QUERY_STRING']
if qstring == '':
start_response('406 NOK', COMMON_HEADERS)
return ''
request_bliksem = context.socket(zmq.REQ)
request_bliksem.connect("tcp://127.0.0.1:9292")
poller = zmq.Poller()
poller.register(request_bliksem, zmq.POLLIN)
request_bliksem.send(qstring)
socks = dict(poller.poll(5000))
if socks.get(request_bliksem) == zmq.POLLIN:
reply = request_bliksem.recv()
start_response('200 OK', COMMON_HEADERS + [('Content-length', str(len(reply)))])
return reply
else:
start_response('500 NOK', COMMON_HEADERS)
return ''
uwsgi.applications = {'': light}
| import uwsgi
import zmq
import struct
COMMON_HEADERS = [('Content-Type', 'text/plain'), ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', 'Requested-With,Content-Type')]
context = zmq.Context()
def light(environ, start_response):
if environ['PATH_INFO'] in ['/favicon.ico']:
start_response('404 NOK', COMMON_HEADERS)
return ''
qstring = environ['QUERY_STRING']
if qstring == '':
start_response('406 NOK', COMMON_HEADERS)
return ''
request_bliksem = context.socket(zmq.REQ)
request_bliksem.connect("tcp://127.0.0.1:9292")
poller = zmq.Poller()
poller.register(request_bliksem, zmq.POLLIN)
request_bliksem.send(qstring)
socks = dict(poller.poll(5000))
if socks.get(request_bliksem) == zmq.POLLIN:
reply = request_bliksem.recv()
start_response('500 NOK', COMMON_HEADERS + [('Content-length', str(len(reply)))])
return reply
else:
start_response('500 NOK', COMMON_HEADERS)
return ''
uwsgi.applications = {'': light}
| bsd-2-clause | Python |
1dfff48a5ddb910b4abbcf8e477b3dda9d606a49 | Allow splitting by a particular component (by index) | bxlab/bx-python,bxlab/bx-python,bxlab/bx-python | scripts/maf_split_by_src.py | scripts/maf_split_by_src.py | #!/usr/bin/env python2.3
"""
Read a MAF from stdin and break into a set of mafs containing
no more than a certain number of columns
"""
usage = "usage: %prog"
import sys, string
import bx.align.maf
from optparse import OptionParser
import psyco_full
INF="inf"
def __main__():
# Parse command line arguments
parser = OptionParser( usage=usage )
parser.add_option( "-o", "--outprefix", action="store", default="" )
parser.add_option( "-c", "--component", action="store", default=None )
( options, args ) = parser.parse_args()
out_prefix = options.outprefix
comp = options.component
if comp is not None:
comp = int( comp )
maf_reader = bx.align.maf.Reader( sys.stdin )
writers = {}
for m in maf_reader:
if comp is None:
writer_key = string.join( [ c.src for c in m.components ], '_' )
else:
writer_key = m.components[ comp ].src
if not writers.has_key( writer_key ):
writer = bx.align.maf.Writer( file( "%s%s.maf" % ( out_prefix, writer_key ), "w" ) )
writers[ writer_key ] = writer
else:
writer = writers[ writer_key ]
writer.write( m )
for key in writers:
writers[ key ].close()
if __name__ == "__main__": __main__()
| #!/usr/bin/env python2.3
"""
Read a MAF from stdin and break into a set of mafs containing
no more than a certain number of columns
"""
usage = "usage: %prog"
import sys, string
import bx.align.maf
from optparse import OptionParser
import psyco_full
INF="inf"
def __main__():
# Parse command line arguments
parser = OptionParser( usage=usage )
parser.add_option( "-o", "--outprefix", action="store", default="" )
( options, args ) = parser.parse_args()
out_prefix = options.outprefix
maf_reader = bx.align.maf.Reader( sys.stdin )
writers = {}
for m in maf_reader:
writer_key = string.join( [ c.src for c in m.components ], '_' )
if not writers.has_key( writer_key ):
writer = bx.align.maf.Writer( file( "%s%s.maf" % ( out_prefix, writer_key ), "w" ) )
writers[ writer_key ] = writer
else:
writer = writers[ writer_key ]
writer.write( m )
for key in writers:
writers[ key ].close()
if __name__ == "__main__": __main__()
| mit | Python |
ead9192b4c2acb21df917dfe116785343e9a59a6 | Fix spec issue with Transfer::Server ProtocolDetails | cloudtools/troposphere,cloudtools/troposphere | scripts/patches/transfer.py | scripts/patches/transfer.py | patches = [
{
"op": "move",
"from": "/ResourceTypes/AWS::Transfer::Server/Properties/Protocols/ItemType",
"path": "/ResourceTypes/AWS::Transfer::Server/Properties/Protocols/PrimitiveItemType",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::Transfer::Server/Properties/Protocols/PrimitiveItemType",
"value": "String",
},
{
"op": "move",
"from": "/ResourceTypes/AWS::Transfer::User/Properties/SshPublicKeys/ItemType",
"path": "/ResourceTypes/AWS::Transfer::User/Properties/SshPublicKeys/PrimitiveItemType",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::Transfer::User/Properties/SshPublicKeys/PrimitiveItemType",
"value": "String",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::Transfer::Server.ProtocolDetails/Properties/As2Transports/ItemType",
"path": "/PropertyTypes/AWS::Transfer::Server.ProtocolDetails/Properties/As2Transports/PrimitiveItemType",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::Transfer::Server.ProtocolDetails/Properties/As2Transports/PrimitiveItemType",
"value": "String",
},
]
| patches = [
{
"op": "move",
"from": "/ResourceTypes/AWS::Transfer::Server/Properties/Protocols/ItemType",
"path": "/ResourceTypes/AWS::Transfer::Server/Properties/Protocols/PrimitiveItemType",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::Transfer::Server/Properties/Protocols/PrimitiveItemType",
"value": "String",
},
{
"op": "move",
"from": "/ResourceTypes/AWS::Transfer::User/Properties/SshPublicKeys/ItemType",
"path": "/ResourceTypes/AWS::Transfer::User/Properties/SshPublicKeys/PrimitiveItemType",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::Transfer::User/Properties/SshPublicKeys/PrimitiveItemType",
"value": "String",
},
]
| bsd-2-clause | Python |
4fe19797ba2fb12239ae73da60bb3e726b23ffe9 | Fix bug in admin user editing | uppercounty/uppercounty,uppercounty/uppercounty,uppercounty/uppercounty | web/forms.py | web/forms.py | from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import UniqueEmailUser
class UniqueEmailUserCreationForm(UserCreationForm):
"""
A form that creates a UniqueEmailUser.
"""
class Meta:
model = UniqueEmailUser
fields = ("email",)
class UniqueEmailUserChangeForm(UserChangeForm):
"""
A form for updating a UniqueEmailUser.
"""
class Meta:
model = UniqueEmailUser
fields = ("email",)
| from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import UniqueEmailUser
class UniqueEmailUserCreationForm(UserCreationForm):
"""
A form that creates a UniqueEmailUser.
"""
def __init__(self, *args, **kargs):
super(UniqueEmailUserCreationForm, self).__init__(*args, **kargs)
del self.fields['username']
class Meta:
model = UniqueEmailUser
fields = ("email",)
class UniqueEmailUserChangeForm(UserChangeForm):
"""
A form for updating a UniqueEmailUser.
"""
def __init__(self, *args, **kargs):
super(UniqueEmailUserChangeForm, self).__init__(*args, **kargs)
del self.fields['username']
class Meta:
model = UniqueEmailUser
fields = ("email",)
| mit | Python |
735c39128f42220bfd5fd6a5d4320530e561c08f | increase version | flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost | fluxghost/__init__.py | fluxghost/__init__.py |
__version__ = "0.5b7"
DEBUG = False
|
__version__ = "0.5b6"
DEBUG = False
| agpl-3.0 | Python |
1c14cc322e7b972f95e1b4ff181f934388bf0e41 | Fix debug output for SSLMiddleware X-Forwarded-Proto | openstack/murano,openstack/murano | murano/api/middleware/ssl.py | murano/api/middleware/ssl.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from murano.common import wsgi
ssl_middleware_opts = [
cfg.StrOpt('secure_proxy_ssl_header',
default='X-Forwarded-Proto',
help="The HTTP Header that will be used to determine which "
"the original request protocol scheme was, even if it was "
"removed by an SSL terminator proxy.")
]
cfg.CONF.register_opts(ssl_middleware_opts)
LOG = logging.getLogger(__name__)
class SSLMiddleware(wsgi.Middleware):
"""Replaces the request wsgi.url_scheme environment
A middleware that replaces the request wsgi.url_scheme environment
variable with the value of HTTP header configured in
secure_proxy_ssl_header if exists in the incoming request.
This is useful if the server is behind a SSL termination proxy.
"""
@classmethod
def factory(cls, global_conf, **local_conf):
def filter(app):
return cls(app)
return filter
def __init__(self, application):
super(SSLMiddleware, self).__init__(application)
self.secure_proxy_ssl_header = 'HTTP_{0}'.format(
cfg.CONF.secure_proxy_ssl_header.upper().replace('-', '_'))
def process_request(self, req):
url_scheme = req.environ['wsgi.url_scheme']
req.environ['wsgi.url_scheme'] = req.environ.get(
self.secure_proxy_ssl_header, req.environ['wsgi.url_scheme'])
if url_scheme != req.environ['wsgi.url_scheme']:
LOG.debug('Changed url_scheme from {0} to {1}'.format(
url_scheme, req.environ['wsgi.url_scheme']))
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from murano.common import wsgi
ssl_middleware_opts = [
cfg.StrOpt('secure_proxy_ssl_header',
default='X-Forwarded-Proto',
help="The HTTP Header that will be used to determine which "
"the original request protocol scheme was, even if it was "
"removed by an SSL terminator proxy.")
]
cfg.CONF.register_opts(ssl_middleware_opts)
LOG = logging.getLogger(__name__)
class SSLMiddleware(wsgi.Middleware):
"""Replaces the request wsgi.url_scheme environment
A middleware that replaces the request wsgi.url_scheme environment
variable with the value of HTTP header configured in
secure_proxy_ssl_header if exists in the incoming request.
This is useful if the server is behind a SSL termination proxy.
"""
@classmethod
def factory(cls, global_conf, **local_conf):
def filter(app):
return cls(app)
return filter
def __init__(self, application):
super(SSLMiddleware, self).__init__(application)
self.secure_proxy_ssl_header = 'HTTP_{0}'.format(
cfg.CONF.secure_proxy_ssl_header.upper().replace('-', '_'))
def process_request(self, req):
LOG.debug('Default url_scheme: {0}. {1}: {2}'.format(
req.environ['wsgi.url_scheme'], self.secure_proxy_ssl_header,
req.environ.get(self.secure_proxy_ssl_header)))
req.environ['wsgi.url_scheme'] = req.environ.get(
self.secure_proxy_ssl_header, req.environ['wsgi.url_scheme'])
| apache-2.0 | Python |
5cfd242d7f67f920830f0b525cd058804f15467d | Add error CanNotDetermineEndPointIP | Fiware/ops.Fuel-main-dev,stackforge/fuel-main,eayunstack/fuel-main,ddepaoli3/fuel-main-dev,zhaochao/fuel-web,AnselZhangGit/fuel-main,SmartInfrastructures/fuel-web-dev,prmtl/fuel-web,SmartInfrastructures/fuel-main-dev,SergK/fuel-main,koder-ua/nailgun-fcert,nebril/fuel-web,huntxu/fuel-web,AnselZhangGit/fuel-main,prmtl/fuel-web,stackforge/fuel-main,nebril/fuel-web,koder-ua/nailgun-fcert,SergK/fuel-main,dancn/fuel-main-dev,Fiware/ops.Fuel-main-dev,prmtl/fuel-web,stackforge/fuel-web,koder-ua/nailgun-fcert,huntxu/fuel-main,ddepaoli3/fuel-main-dev,stackforge/fuel-main,Fiware/ops.Fuel-main-dev,huntxu/fuel-web,zhaochao/fuel-main,AnselZhangGit/fuel-main,zhaochao/fuel-web,prmtl/fuel-web,koder-ua/nailgun-fcert,huntxu/fuel-main,SmartInfrastructures/fuel-main-dev,huntxu/fuel-main,eayunstack/fuel-web,teselkin/fuel-main,nebril/fuel-web,SergK/fuel-main,zhaochao/fuel-main,teselkin/fuel-main,stackforge/fuel-web,stackforge/fuel-web,zhaochao/fuel-main,zhaochao/fuel-web,SmartInfrastructures/fuel-web-dev,dancn/fuel-main-dev,eayunstack/fuel-web,dancn/fuel-main-dev,dancn/fuel-main-dev,eayunstack/fuel-web,nebril/fuel-web,eayunstack/fuel-main,huntxu/fuel-web,nebril/fuel-web,huntxu/fuel-web,zhaochao/fuel-main,prmtl/fuel-web,eayunstack/fuel-web,Fiware/ops.Fuel-main-dev,SmartInfrastructures/fuel-main-dev,SmartInfrastructures/fuel-web-dev,eayunstack/fuel-web,teselkin/fuel-main,SmartInfrastructures/fuel-web-dev,teselkin/fuel-main,eayunstack/fuel-main,zhaochao/fuel-web,ddepaoli3/fuel-main-dev,ddepaoli3/fuel-main-dev,AnselZhangGit/fuel-main,huntxu/fuel-web,zhaochao/fuel-web,SmartInfrastructures/fuel-main-dev,zhaochao/fuel-main,SmartInfrastructures/fuel-web-dev | nailgun/nailgun/errors/__init__.py | nailgun/nailgun/errors/__init__.py | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.logger import logger
from nailgun.errors.base import NailgunException
default_messages = {
# common errors
"InvalidData": "Invalid data received",
"AlreadyExists": "Object already exists",
# node discovering errors
"InvalidInterfacesInfo": "Invalid interfaces info",
"InvalidMetadata": "Invalid metadata specified for node",
# deployment errors
"DeploymentAlreadyStarted": "Deployment already started",
"DeletionAlreadyStarted": "Environment removal already started",
"FailedProvisioning": "Failed to start provisioning",
"WrongNodeStatus": "Wrong node status",
"NodeOffline": "Node is offline",
"NotEnoughControllers": "Not enough controllers",
# disk errors
"NotEnoughFreeSpace": "Not enough free space",
# network errors
"AdminNetworkNotFound": "Admin network info not found",
"InvalidNetworkAccess": "Invalid network access",
"AssignIPError": "Failed to assign IP to node",
"NetworkCheckError": "Network checking failed",
"OutOfVLANs": "Not enough available VLAN IDs",
"OutOfIPs": "Not enough free IP addresses in pool",
"NoSuitableCIDR": "Cannot find suitable CIDR",
"CanNotFindInterface": "Cannot find interface",
"CanNotDetermineEndPointIP": "Cannot determine end point IP",
# plugin errors
"PluginDownloading": "Cannot install plugin",
"PluginInitialization": "Cannot initialize plugin",
# unknown
"UnknownError": "Unknown error"
}
class ErrorFactory(object):
def __init__(self):
for name, msg in default_messages.iteritems():
setattr(self, name, self._build_exc(name, msg))
def _build_exc(self, name, msg):
return type(
name,
(NailgunException,),
{
"message": msg
}
)
def __getattr__(self, name):
return self._build_exc(name, default_messages["UnknownError"])
errors = ErrorFactory()
| # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.logger import logger
from nailgun.errors.base import NailgunException
default_messages = {
# common errors
"InvalidData": "Invalid data received",
"AlreadyExists": "Object already exists",
# node discovering errors
"InvalidInterfacesInfo": "Invalid interfaces info",
"InvalidMetadata": "Invalid metadata specified for node",
# deployment errors
"DeploymentAlreadyStarted": "Deployment already started",
"DeletionAlreadyStarted": "Environment removal already started",
"FailedProvisioning": "Failed to start provisioning",
"WrongNodeStatus": "Wrong node status",
"NodeOffline": "Node is offline",
"NotEnoughControllers": "Not enough controllers",
# disk errors
"NotEnoughFreeSpace": "Not enough free space",
# network errors
"AdminNetworkNotFound": "Admin network info not found",
"InvalidNetworkAccess": "Invalid network access",
"AssignIPError": "Failed to assign IP to node",
"NetworkCheckError": "Network checking failed",
"OutOfVLANs": "Not enough available VLAN IDs",
"OutOfIPs": "Not enough free IP addresses in pool",
"NoSuitableCIDR": "Cannot find suitable CIDR",
"CanNotFindInterface": "Cannot find interface",
# plugin errors
"PluginDownloading": "Cannot install plugin",
"PluginInitialization": "Cannot initialize plugin",
# unknown
"UnknownError": "Unknown error"
}
class ErrorFactory(object):
def __init__(self):
for name, msg in default_messages.iteritems():
setattr(self, name, self._build_exc(name, msg))
def _build_exc(self, name, msg):
return type(
name,
(NailgunException,),
{
"message": msg
}
)
def __getattr__(self, name):
return self._build_exc(name, default_messages["UnknownError"])
errors = ErrorFactory()
| apache-2.0 | Python |
f1e015555f26b083238551e87f14b988b6a25083 | bump version 2.16.9 | saydulk/newfies-dialer,romonzaman/newfies-dialer,Star2Billing/newfies-dialer,Star2Billing/newfies-dialer,newfies-dialer/newfies-dialer,romonzaman/newfies-dialer,saydulk/newfies-dialer,newfies-dialer/newfies-dialer,saydulk/newfies-dialer,saydulk/newfies-dialer,newfies-dialer/newfies-dialer,Star2Billing/newfies-dialer,romonzaman/newfies-dialer,Star2Billing/newfies-dialer,romonzaman/newfies-dialer,newfies-dialer/newfies-dialer | newfies/newfies_dialer/__init__.py | newfies/newfies_dialer/__init__.py | # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
# :copyright: (c) 2011-2015 by Arezqui Belaid.
# :license: MPL 2.0, see COPYING for more details.
__version__ = "2.16.9"
__author__ = "Arezqui Belaid"
__contact__ = "info@star2billing.com"
__homepage__ = "http://www.newfies-dialer.org"
__docformat__ = "restructuredtext"
| # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
# :copyright: (c) 2011-2015 by Arezqui Belaid.
# :license: MPL 2.0, see COPYING for more details.
__version__ = "2.16.8"
__author__ = "Arezqui Belaid"
__contact__ = "info@star2billing.com"
__homepage__ = "http://www.newfies-dialer.org"
__docformat__ = "restructuredtext"
| mpl-2.0 | Python |
871dd9e2b17bf45a30f04767555620f9dfd0f511 | Allow measurement channels to be specified for tomography | rmcgurrin/PyQLab,Plourde-Research-Lab/PyQLab,BBN-Q/PyQLab,calebjordan/PyQLab | QGL/Tomography.py | QGL/Tomography.py | '''
Helper functions for adding tomography routines.
Copyright 2013 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from itertools import product
import operator
from PulsePrimitives import *
def create_tomo_blocks(qubits, numPulses, alignment='parallel'):
'''
Helper function to create the tomography pulse block in either parallel or serial.
'''
#Tomography pulse sets
if numPulses == 4:
tomoSet = [Id, X90, Y90, X]
elif numPulses == 6:
tomoSet = [Id, X90, X90m, Y90, Y90m, X]
else:
raise ValueError("Only able to handle numPulses=4 or 6")
#Create all combinations of pulses for the number of qubits
return [reduce(operator.mul, [p(q) for p,q in zip(pulseSet, qubits)]) for pulseSet in product(tomoSet, repeat=len(qubits))]
def state_tomo(seq, qubits, numPulses=4, measChans=None):
'''
Apply state tomography readout pulses and measurement.
Parameters
-----------
seq : a single entry list sequence to perform tomography on
qubits : which qubits to act on
numPulses : number of readout pulses
measChans : tuple of measurement channels to readout (defaults to individual qubit channels)
'''
if measChans is None:
measChans = qubits
return [seq + [tomoBlock, MEAS(*measChans)]
for tomoBlock in create_tomo_blocks(qubits, numPulses)]
def process_tomo(seq, qubits, numPulses=4, measChans=None):
'''
Apply process tomography state prep. and readout pulses and measurement.
Parameters
-----------
seq : a single entry list sequence to perform tomography on
qubits : which qubits to act on
numPulses : number of prep/readout pulses
measChans : tuple of measurement channels to readout (defaults to individual qubit channels)
'''
if measChans is None:
measChans = qubits
return [[prepBlock] + seq + [readoutBlock, MEAS(*measChans)]
for prepBlock, readoutBlock in product(create_tomo_blocks(qubits, numPulses), repeat=2)]
| '''
Helper functions for adding tomography routines.
Copyright 2013 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from itertools import product
import operator
from PulsePrimitives import *
def create_tomo_blocks(qubits, numPulses, alignment='parallel'):
'''
Helper function to create the tomography pulse block in either parallel or serial.
'''
#Tomography pulse sets
if numPulses == 4:
tomoSet = [Id, X90, Y90, X]
elif numPulses == 6:
tomoSet = [Id, X90, X90m, Y90, Y90m, X]
else:
raise ValueError("Only able to handle numPulses=4 or 6")
#Create all combinations of pulses for the number of qubits
return [reduce(operator.mul, [p(q) for p,q in zip(pulseSet, qubits)]) for pulseSet in product(tomoSet, repeat=len(qubits))]
def state_tomo(seq, qubits, numPulses=4):
'''
Apply state tomography readout pulses and measurement.
Parameters
-----------
seq : a single entry list sequence to perform tomography on
qubits: which qubits to act on
numPulses: number of readout pulsese
'''
return [seq + [tomoBlock, MEAS(*qubits)]
for tomoBlock in create_tomo_blocks(qubits, numPulses)]
def process_tomo(seq, qubits, numPulses=4):
'''
Apply process tomography state prep. and readout pulses and measurement.
Parameters
-----------
seq : a single entry list sequence to perform tomography on
qubits: which qubits to act on
numPulses: number of prep/readout pulsese
'''
return [[prepBlock] + seq + [readoutBlock, MEAS(*qubits)]
for prepBlock, readoutBlock in product(create_tomo_blocks(qubits, numPulses), repeat=2)]
| apache-2.0 | Python |
ef537ff146661ce16775f6c2abe97a010af24bc7 | return response on send | conbus/fbmq,conbus/fbmq | fbmq/__init__.py | fbmq/__init__.py | __version__ = '2.0.1'
from .fbmq import *
from . import attachment as Attachment
from . import template as Template | __version__ = '2.0.0'
from .fbmq import *
from . import attachment as Attachment
from . import template as Template | mit | Python |
7b941ece0edd086f8bca21fd76ddcb882f7ae028 | Add missing import | scriptotek/otsrdflib | otsrdflib/ots.py | otsrdflib/ots.py | from rdflib.plugins.serializers.turtle import TurtleSerializer
from rdflib.namespace import Namespace, FOAF, SKOS, RDF
from rdflib import BNode
import re
class OrderedTurtleSerializer(TurtleSerializer):
short_name = "ots"
def __init__(self, store):
super(OrderedTurtleSerializer, self).__init__(store)
SD = Namespace('http://www.w3.org/ns/sparql-service-description#')
ISOTHES = Namespace('http://purl.org/iso25964/skos-thes#')
# Order of classes:
self.topClasses = [SKOS.ConceptScheme,
FOAF.Organization,
SD.Service,
SD.Dataset,
SD.Graph,
SD.NamedGraph,
ISOTHES.ThesaurusArray,
SKOS.Concept]
# Order of instances:
def compare(x, y):
x2 = int(re.sub(r'[^0-9]', '', x))
y2 = int(re.sub(r'[^0-9]', '', y))
if x2 == 0 or y2 == 0:
return cmp(x, y)
else:
return cmp(x2, y2)
self.sortFunction = compare
def orderSubjects(self):
seen = {}
subjects = []
for classURI in self.topClasses:
members = list(self.store.subjects(RDF.type, classURI))
members.sort(self.sortFunction)
for member in members:
subjects.append(member)
self._topLevels[member] = True
seen[member] = True
recursable = [
(isinstance(subject, BNode),
self._references[subject], subject)
for subject in self._subjects if subject not in seen]
recursable.sort()
subjects.extend([subject for (isbnode, refs, subject) in recursable])
return subjects
| from rdflib.plugins.serializers.turtle import TurtleSerializer
from rdflib.namespace import Namespace, FOAF, SKOS, RDF
from rdflib import BNode
class OrderedTurtleSerializer(TurtleSerializer):
short_name = "ots"
def __init__(self, store):
super(OrderedTurtleSerializer, self).__init__(store)
SD = Namespace('http://www.w3.org/ns/sparql-service-description#')
ISOTHES = Namespace('http://purl.org/iso25964/skos-thes#')
# Order of classes:
self.topClasses = [SKOS.ConceptScheme,
FOAF.Organization,
SD.Service,
SD.Dataset,
SD.Graph,
SD.NamedGraph,
ISOTHES.ThesaurusArray,
SKOS.Concept]
# Order of instances:
def compare(x, y):
x2 = int(re.sub(r'[^0-9]', '', x))
y2 = int(re.sub(r'[^0-9]', '', y))
if x2 == 0 or y2 == 0:
return cmp(x, y)
else:
return cmp(x2, y2)
self.sortFunction = compare
def orderSubjects(self):
seen = {}
subjects = []
for classURI in self.topClasses:
members = list(self.store.subjects(RDF.type, classURI))
members.sort(self.sortFunction)
for member in members:
subjects.append(member)
self._topLevels[member] = True
seen[member] = True
recursable = [
(isinstance(subject, BNode),
self._references[subject], subject)
for subject in self._subjects if subject not in seen]
recursable.sort()
subjects.extend([subject for (isbnode, refs, subject) in recursable])
return subjects
| mit | Python |
53972cb11ae4825b2ae7b59b2a31d2d89d253cb5 | Add test cases | derekmpham/interview-prep,derekmpham/interview-prep | fib-seq-recur.py | fib-seq-recur.py | # Implement fibonacci sequence function using recursion
def get_fib(position):
if position < 2: # base case
return position
else:
return get_fib(position-1) + get_fib(position-2) # add two previous numbers
# test cases
print get_fib(9) # returns 34
print get_fib(11) # returns 89
print get_fib(0) # returns 0
| # Implement fibonacci sequence function using recursion
def get_fib(position):
if position < 2: # base case
return position
else:
return get_fib(position-1) + get_fib(position-2) # add two previous numbers
| mit | Python |
8448a83aba82379be96b135a835572ac2853665a | Update example so it works under Python 3. | Kami/python-yubico-client | demo/example.py | demo/example.py | import sys
from yubico_client import Yubico
from yubico_client import yubico_exceptions
from yubico_client.py3 import PY3
if PY3:
raw_input = input
client_id = raw_input('Enter your client id: ')
secret_key = raw_input('Enter your secret key (optional): ')
use_https = raw_input('Use secure connection (https)? [y/n]: ')
token = raw_input('Enter OTP token: ')
if not secret_key:
secret_key = None
if use_https == 'n':
https = False
else:
https = True
client = Yubico(client_id, secret_key, https)
try:
status = client.verify(token)
except yubico_exceptions.InvalidClientIdError:
e = sys.exc_info()[1]
print('Client with id %s does not exist' % (e.client_id))
sys.exit(1)
except yubico_exceptions.SignatureVerificationError:
print('Signature verification failed')
sys.exit(1)
except yubico_exceptions.StatusCodeError:
e = sys.exc_info()[1]
print('Negative status code was returned: %s' % (e.status_code))
sys.exit(1)
if status:
print('Success, the provided OTP is valid')
else:
print('No response from the servers or received other negative '
'status code')
| import sys
from yubico_client import Yubico
from yubico_client import yubico_exceptions
client_id = raw_input('Enter your client id: ')
secret_key = raw_input('Enter your secret key (optional): ')
use_https = raw_input('Use secure connection (https)? [y/n]: ')
token = raw_input('Enter OTP token: ')
if not secret_key:
secret_key = None
if use_https == 'n':
https = False
else:
https = True
client = Yubico(client_id, secret_key, https)
try:
status = client.verify(token)
except yubico_exceptions.InvalidClientIdError, e:
print 'Client with id %s does not exist' % (e.client_id)
sys.exit(1)
except yubico_exceptions.SignatureVerificationError:
print 'Signature verification failed'
sys.exit(1)
except yubico_exceptions.StatusCodeError, e:
print 'Negative status code was returned: %s' % (e.status_code)
sys.exit(1)
if status:
print 'Success, the provided OTP is valid'
else:
print 'No response from the servers or received other negative status code'
| bsd-3-clause | Python |
89225ed0c7ec627ee32fd973d5f1fb95da173be2 | Remove pointless `_cache` attribute on MemcacheLock class. | potatolondon/djangae,potatolondon/djangae | djangae/contrib/locking/memcache.py | djangae/contrib/locking/memcache.py | import random
import time
from datetime import datetime
from django.core.cache import cache
class MemcacheLock(object):
def __init__(self, identifier, unique_value):
self.identifier = identifier
self.unique_value = unique_value
@classmethod
def acquire(cls, identifier, wait=True, steal_after_ms=None):
start_time = datetime.utcnow()
unique_value = random.randint(1, 100000)
while True:
acquired = cache.add(identifier, unique_value)
if acquired:
return cls(identifier, unique_value)
elif not wait:
return None
else:
# We are waiting for the lock
if steal_after_ms and (datetime.utcnow() - start_time).total_seconds() * 1000 > steal_after_ms:
# Steal anyway
cache.set(identifier, unique_value)
return cls(identifier, unique_value)
time.sleep(0)
def release(self):
# Delete the key if it was ours. There is a race condition here
# if something steals the lock between the if and the delete...
if cache.get(self.identifier) == self.unique_value:
cache.delete(self.identifier)
| import random
import time
from datetime import datetime
from django.core.cache import cache
class MemcacheLock(object):
def __init__(self, identifier, cache, unique_value):
self.identifier = identifier
self._cache = cache
self.unique_value = unique_value
@classmethod
def acquire(cls, identifier, wait=True, steal_after_ms=None):
start_time = datetime.utcnow()
unique_value = random.randint(1, 100000)
while True:
acquired = cache.add(identifier, unique_value)
if acquired:
return cls(identifier, cache, unique_value)
elif not wait:
return None
else:
# We are waiting for the lock
if steal_after_ms and (datetime.utcnow() - start_time).total_seconds() * 1000 > steal_after_ms:
# Steal anyway
cache.set(identifier, unique_value)
return cls(identifier, cache, unique_value)
time.sleep(0)
def release(self):
cache = self._cache
# Delete the key if it was ours. There is a race condition here
# if something steals the lock between the if and the delete...
if cache.get(self.identifier) == self.unique_value:
cache.delete(self.identifier)
| bsd-3-clause | Python |
8709d89e78d224beb6b86c689492ab303602b8ed | Handle authentication of non-existant users, see ticket #384. | galaxor/Nodewatcher,galaxor/Nodewatcher,galaxor/Nodewatcher,galaxor/Nodewatcher | nodewatcher/wlanlj/account/auth.py | nodewatcher/wlanlj/account/auth.py | from django.contrib.auth.models import User
from django.contrib.auth.models import check_password
from crypt import crypt
if crypt('', '$1$DIF16...$Xzh7aN9GPHrZPK9DgggUK/') != '$1$DIF16...$Xzh7aN9GPHrZPK9DgggUK/':
# crypt does not support MD5 hashed passwords, we will use Python implementation
from md5crypt import unix_md5_crypt
crypt = unix_md5_crypt
class CryptBackend:
def authenticate(self, username = None, password = None):
"""
Authenticate against the database using OpenBSD's blowfish crypt
function.
"""
try:
user = User.objects.get(username = username)
if check_password(password, user.password):
# Successfully checked password in Django password format, so we can change it to crypt format
salt = '$1$' + User.objects.make_random_password(8)
user.password = crypt(password, salt)
user.save()
except ValueError:
pass
except User.DoesNotExist:
return None
try:
if crypt(password, user.password) == user.password and user.is_active:
return user
else:
return None
except User.DoesNotExist:
return None
except ValueError:
return None
def get_user(self, user_id):
"""
Translates the user id into a User object.
"""
try:
return User.objects.get(pk = user_id)
except User.DoesNotExist:
return None
| from django.contrib.auth.models import User
from django.contrib.auth.models import check_password
from crypt import crypt
if crypt('', '$1$DIF16...$Xzh7aN9GPHrZPK9DgggUK/') != '$1$DIF16...$Xzh7aN9GPHrZPK9DgggUK/':
# crypt does not support MD5 hashed passwords, we will use Python implementation
from md5crypt import unix_md5_crypt
crypt = unix_md5_crypt
class CryptBackend:
def authenticate(self, username = None, password = None):
"""
Authenticate against the database using OpenBSD's blowfish crypt
function.
"""
try:
user = User.objects.get(username = username)
if check_password(password, user.password):
# Successfully checked password in Django password format, so we can change it to crypt format
salt = '$1$' + User.objects.make_random_password(8)
user.password = crypt(password, salt)
user.save()
except ValueError:
pass
try:
if crypt(password, user.password) == user.password and user.is_active:
return user
else:
return None
except User.DoesNotExist:
return None
except ValueError:
return None
def get_user(self, user_id):
"""
Translates the user id into a User object.
"""
try:
return User.objects.get(pk = user_id)
except User.DoesNotExist:
return None
| agpl-3.0 | Python |
13de116871c24e0a462299c7466305d1aff9772b | Fix tab/space mess | johnner/mercurial-jira-commit-message-hook | jirakeycheck.py | jirakeycheck.py | #coding: utf-8
import re
#If the hook returns True - hook fails
BAD_COMMIT = True
OK = False
def checkCommitMessage(ui, repo, **kwargs):
"""
Checks commit message for matching commit rule:
Every commit message must include JIRA issue key
Example:
PRJ-42 - added meaning of life
Include this hook in .hg/hgrc
[hooks]
pretxncommit.jirakeycheck = python:/path/to/jirakeycheck.py:checkCommitMessage
"""
hg_commit_message = repo['tip'].description()
if(checkMessage(hg_commit_message) == False):
printUsage(ui)
#reject commit transaction
return BAD_COMMIT
else:
return OK
def checkAllCommitMessage(ui, repo, node, **kwargs):
"""
For push: checks commit messages for all incoming commits
[hooks]
pretxnchangegroup.jirakeycheckall = python:/path/to/jirakeycheck.py:checkAllCommitMessage
"""
for rev in xrange(repo[node].rev(), len(repo)):
message = repo[rev].description()
if(checkMessage(message) == False):
ui.warn("Revision "+str(rev)+" commit message:["+message+"] | JIRA issue key is not set\n")
printUsage(ui)
#reject
return BAD_COMMIT
return OK
def checkMessage(msg):
"""
Checks message for matching regex
Correct message example:
#"JIRAPROJ-123 -" necessary prefix
JIRAPROJ-123 - your commit message here
"""
is_correct = False
#HERE you can set your JIRA Project Names
p = re.compile('^(JIRAPROJ-\d+|JIRAPROJ2-\d+) - ')
r = p.search(msg)
if r:
is_correct = True
return is_correct
def printUsage(ui):
ui.warn('=====\n')
ui.warn('Commit message must have JIRA issue key\n')
ui.warn('Example:\n')
ui.warn('JIRAPRO-42 - the answer to life, universe and everything \n')
ui.warn('=====\n')
| #coding: utf-8
import re
#If the hook returns True - hook fails
BAD_COMMIT = True
OK = False
def checkCommitMessage(ui, repo, **kwargs):
"""
Checks commit message for matching commit rule:
Every commit message must include JIRA issue key
Example:
PRJ-42 - added meaning of life
Include this hook in .hg/hgrc
[hooks]
pretxncommit.jirakeycheck = python:/path/to/jirakeycheck.py:checkCommitMessage
"""
hg_commit_message = repo['tip'].description()
if(checkMessage(hg_commit_message) == False):
printUsage(ui)
#reject commit transaction
return BAD_COMMIT
else:
return OK
def checkAllCommitMessage(ui, repo, node, **kwargs):
"""
For push: checks commit messages for all incoming commits
[hooks]
pretxnchangegroup.jirakeycheckall = python:/path/to/jirakeycheck.py:checkAllCommitMessage
"""
for rev in xrange(repo[node].rev(), len(repo)):
message = repo[rev].description()
if(checkMessage(message) == False):
ui.warn("Revision "+str(rev)+" commit message:["+message+"] | JIRA issue key is not set\n")
printUsage(ui)
#reject
return BAD_COMMIT
return OK
def checkMessage(msg):
"""
Checks message for matching regex
Correct message example:
#"JIRAPROJ-123 -" necessary prefix
JIRAPROJ-123 - your commit message here
"""
is_correct = False
#HERE you can set your JIRA Project Names
p = re.compile('^(JIRAPROJ-\d+|JIRAPROJ2-\d+) - ')
r = p.search(msg)
if r:
is_correct = True
return is_correct
def printUsage(ui):
ui.warn('=====\n')
ui.warn('Commit message must have JIRA issue key\n')
ui.warn('Example:\n')
ui.warn('JIRAPRO-42 - the answer to life, universe and everything \n')
ui.warn('=====\n')
| mit | Python |
73d96935e04ef3c75536cf8ba273ab00e951b1a8 | Make the simple demo jump around less. | jhartford/pybo,mwhoffman/pybo | demos/simple.py | demos/simple.py | import numpy as np
import matplotlib.pyplot as pl
import pygp as pg
import pybo.models as pbm
import pybo.policies as pbp
def run_model(Model, sn, ell, sf, T):
model = Model(0.2)
gp = pg.BasicGP(sn, ell, sf)
policy = pbp.Thompson(gp, model.bounds)
xmin = model.bounds[0,0]
xmax = model.bounds[0,1]
X = np.linspace(xmin, xmax, 200)[:, None]
x = (xmax-xmin) / 2 + xmin
for i in xrange(T):
pg.gpplot(policy.gp, xmin=xmin, xmax=xmax, draw=False)
pl.plot(X, policy.get_index(X), lw=2)
pl.axvline(x, color='r')
pl.axis('tight')
pl.axis(xmin=xmin, xmax=xmax)
pl.draw()
y = model.get_data(x)
policy.add_data(x, y)
x = policy.get_next()
if __name__ == '__main__':
# run_model(pbm.Sinusoidal, 0.2, 0.70, 1.25, 100)
run_model(pbm.Gramacy, 0.2, 0.05, 1.25, 100)
| import numpy as np
import matplotlib.pyplot as pl
import pygp as pg
import pybo.models as pbm
import pybo.policies as pbp
def run_model(Model, sn, ell, sf, T):
model = Model(0.2)
gp = pg.BasicGP(sn, ell, sf)
policy = pbp.Thompson(gp, model.bounds)
xmin = model.bounds[0,0]
xmax = model.bounds[0,1]
X = np.linspace(xmin, xmax, 200)[:, None]
x = (xmax-xmin) / 2 + xmin
for i in xrange(T):
pg.gpplot(policy.gp, xmin=xmin, xmax=xmax)
pl.plot(X, policy.get_index(X), lw=2)
pl.axvline(x, color='r')
pl.axis('tight')
pl.axis(xmin=xmin, xmax=xmax)
pl.draw()
y = model.get_data(x)
policy.add_data(x, y)
x = policy.get_next()
if __name__ == '__main__':
# run_model(pbm.Sinusoidal, 0.2, 0.70, 1.25, 100)
run_model(pbm.Gramacy, 0.2, 0.05, 1.25, 100)
| bsd-2-clause | Python |
9cb668e41fc3a240dd6e1c0d625dc7f6e38e14d2 | Change deploy to project path | timsavage/denim | denim/system.py | denim/system.py | # -*- encoding:utf8 -*-
from fabric.api import env, settings, hide
from fabric.contrib import files
from denim import paths, utils
def user_exists(user=None):
"""
Check if a user exists.
:param user: name of the user to check; defaults to the deploy_user.
"""
if not user:
user = env.deploy_user
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
result = utils.run_as('id -u %s' % user)
return result.return_code == 0
def create_system_user(user=None, home=None):
"""
Create a system user.
If user already exists will ignore the operation.
:param user: name of the user to create; defaults to the deploy_user.
:param home: path to home directory of user; defaults to deploy_path.
:return: True if user is created; else False to indicate user already
exists.
"""
if user is None:
user = env.deploy_user
if not user_exists(user):
utils.run_as('adduser --system --quiet --home %(home)s %(user)s' % {
'home': home if home else paths.project_path(),
'user': user
}, use_sudo=True)
return True
else:
return False
def change_owner(path, recursive=False, user=None):
"""
Change the owner of a path.
:param path: to change owner of.
:param recursive: if the path references a folder recurs through all sub
folders.
:param user: name of the user to make owner; defaults to the deploy_user.
"""
if user is None:
user = env.deploy_user
utils.run_as('chown %s %s. %s' % ('-R' if recursive else '', user, path), use_sudo=True)
def create_symlink(target_path, link_path, replace_existing=True, *args, **kwargs):
"""
Create a symlink on remote server.
:param target_path: target of symlink.
:param link_path: location of symlink.
:param replace_existing: overwrite an existing symlink; else fail if link
already exists.
"""
if replace_existing:
remove_file(link_path, *args, **kwargs)
utils.run_as('ln -s "%s" "%s"' % (target_path, link_path), *args, **kwargs)
def remove_file(path, *args, **kwargs):
"""
Remove a file from remote server.
:param path: to file.
:return: True if file existed and was removed else False
"""
if not files.exists(path):
return False
utils.run_as('rm "%s"' % path, *args, **kwargs)
return True
| # -*- encoding:utf8 -*-
from fabric.api import env, settings, hide
from fabric.contrib import files
from denim import paths, utils
def user_exists(user=None):
"""
Check if a user exists.
:param user: name of the user to check; defaults to the deploy_user.
"""
if not user:
user = env.deploy_user
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
result = utils.run_as('id -u %s' % user)
return result.return_code == 0
def create_system_user(user=None, home=None):
"""
Create a system user.
If user already exists will ignore the operation.
:param user: name of the user to create; defaults to the deploy_user.
:param home: path to home directory of user; defaults to deploy_path.
:return: True if user is created; else False to indicate user already
exists.
"""
if user is None:
user = env.deploy_user
if not user_exists(user):
utils.run_as('adduser --system --quiet --home %(home)s %(user)s' % {
'home': home if home else paths.deploy_path(),
'user': user
}, use_sudo=True)
return True
else:
return False
def change_owner(path, recursive=False, user=None):
"""
Change the owner of a path.
:param path: to change owner of.
:param recursive: if the path references a folder recurs through all sub
folders.
:param user: name of the user to make owner; defaults to the deploy_user.
"""
if user is None:
user = env.deploy_user
utils.run_as('chown %s %s. %s' % ('-R' if recursive else '', user, path), use_sudo=True)
def create_symlink(target_path, link_path, replace_existing=True, *args, **kwargs):
"""
Create a symlink on remote server.
:param target_path: target of symlink.
:param link_path: location of symlink.
:param replace_existing: overwrite an existing symlink; else fail if link
already exists.
"""
if replace_existing:
remove_file(link_path, *args, **kwargs)
utils.run_as('ln -s "%s" "%s"' % (target_path, link_path), *args, **kwargs)
def remove_file(path, *args, **kwargs):
"""
Remove a file from remote server.
:param path: to file.
:return: True if file existed and was removed else False
"""
if not files.exists(path):
return False
utils.run_as('rm "%s"' % path, *args, **kwargs)
return True
| bsd-2-clause | Python |
b2431315ed3fefc8a57cb9b3c6571116024beb00 | bump version | czpython/djangocms-text-ckeditor,czpython/djangocms-text-ckeditor,yakky/djangocms-text-ckeditor,czpython/djangocms-text-ckeditor,vxsx/djangocms-text-ckeditor,vxsx/djangocms-text-ckeditor,yakky/djangocms-text-ckeditor,vxsx/djangocms-text-ckeditor,divio/djangocms-text-ckeditor,divio/djangocms-text-ckeditor,yakky/djangocms-text-ckeditor,vxsx/djangocms-text-ckeditor,divio/djangocms-text-ckeditor,divio/djangocms-text-ckeditor,czpython/djangocms-text-ckeditor | djangocms_text_ckeditor/__init__.py | djangocms_text_ckeditor/__init__.py | # -*- coding: utf-8 -*-
__version__ = "2.8.0"
default_app_config = 'djangocms_text_ckeditor.apps.TextCkeditorConfig'
| # -*- coding: utf-8 -*-
__version__ = "2.7.0"
default_app_config = 'djangocms_text_ckeditor.apps.TextCkeditorConfig'
| bsd-3-clause | Python |
93d12d4e8adad57d679386342c07ff3936eb114d | Bump version to 0.2.1 | xrmx/django-skebby | django_skebby/__init__.py | django_skebby/__init__.py | __version__ = '0.2.1'
| __version__ = '0.2.0'
| bsd-3-clause | Python |
a715821c75521e25172805c98d204fc4e24a4641 | Solve Code Fights circle of numbers problem | HKuz/Test_Code | CodeFights/circleOfNumbers.py | CodeFights/circleOfNumbers.py | #!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
mid = n / 2
return (mid + firstNumber if firstNumber < mid else firstNumber - mid)
def main():
tests = [
[10, 2, 7],
[10, 7, 2],
[4, 1, 3],
[6, 3, 0]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
| #!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
pass
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
| mit | Python |
94c5e5c6531a0f000377ee18f66ffc4c25f59bc1 | extend to 100 the number of tested hdus | desihub/desispec,desihub/desispec | py/desispec/io/fiberflat_vs_humidity.py | py/desispec/io/fiberflat_vs_humidity.py | import numpy as np
import fitsio
import astropy.io.fits as fits
from desiutil.log import get_logger
from .meta import findfile
from .util import native_endian
def get_humidity(night,expid,camera) :
log=get_logger()
raw_filename=findfile("raw",night=night,expid=expid)
table=fitsio.read(raw_filename,"SPECTCONS")
keyword="{}HUMID".format(camera[0].upper())
unit=int(camera[1])
selection=(table["unit"]==unit)
if np.sum(selection)==0 :
log.warning("no unit '{}' in '{}'".format(unit,raw_filename))
return np.nan
humidity=float(table[keyword][selection][0])
log.debug(f"NIGHT={night} EXPID={expid} CAM={camera} HUMIDITY={humidity}")
return humidity
def read_fiberflat_vs_humidity(filename):
"""Read fiberflat vs humidity from filename
Args:
filename (str): path to fiberflat_vs_humidity file
Returns: fiberflat , humidity , wave
fiberflat is 3D [nhumid, nspec, nwave]
humidity is 1D [nhumid] (and in percent)
wave is 1D [nwave] (and in Angstrom)
header (fits header)
"""
with fits.open(filename, uint=True, memmap=False) as fx:
header = fx[0].header
wave = native_endian(fx["WAVELENGTH"].data.astype('f8'))
fiberflat = list()
humidity = list()
for index in range(100) :
hdu="HUM{:02d}".format(index)
if hdu not in fx : continue
fiberflat.append(native_endian(fx[hdu].data.astype('f8')))
humidity.append(fx[hdu].header["MEDHUM"])
humidity = np.array(humidity)
fiberflat = np.array(fiberflat)
assert(fiberflat.shape[0] == humidity.size)
assert(fiberflat.shape[2] == wave.size)
return fiberflat , humidity , wave, header
| import numpy as np
import fitsio
import astropy.io.fits as fits
from desiutil.log import get_logger
from .meta import findfile
from .util import native_endian
def get_humidity(night,expid,camera) :
log=get_logger()
raw_filename=findfile("raw",night=night,expid=expid)
table=fitsio.read(raw_filename,"SPECTCONS")
keyword="{}HUMID".format(camera[0].upper())
unit=int(camera[1])
selection=(table["unit"]==unit)
if np.sum(selection)==0 :
log.warning("no unit '{}' in '{}'".format(unit,raw_filename))
return np.nan
humidity=float(table[keyword][selection][0])
log.debug(f"NIGHT={night} EXPID={expid} CAM={camera} HUMIDITY={humidity}")
return humidity
def read_fiberflat_vs_humidity(filename):
"""Read fiberflat vs humidity from filename
Args:
filename (str): path to fiberflat_vs_humidity file
Returns: fiberflat , humidity , wave
fiberflat is 3D [nhumid, nspec, nwave]
humidity is 1D [nhumid] (and in percent)
wave is 1D [nwave] (and in Angstrom)
header (fits header)
"""
with fits.open(filename, uint=True, memmap=False) as fx:
header = fx[0].header
wave = native_endian(fx["WAVELENGTH"].data.astype('f8'))
fiberflat = list()
humidity = list()
for index in range(20) :
hdu="HUM{:02d}".format(index)
if hdu not in fx : continue
fiberflat.append(native_endian(fx[hdu].data.astype('f8')))
humidity.append(fx[hdu].header["MEDHUM"])
humidity = np.array(humidity)
fiberflat = np.array(fiberflat)
assert(fiberflat.shape[0] == humidity.size)
assert(fiberflat.shape[2] == wave.size)
return fiberflat , humidity , wave, header
| bsd-3-clause | Python |
c906188a42125785d4de4a341eed436d659e83f1 | Bump version. | amigrave/pudb,albfan/pudb,amigrave/pudb,albfan/pudb | pudb/__init__.py | pudb/__init__.py | VERSION = "0.91.5"
CURRENT_DEBUGGER = [None]
def set_trace():
if CURRENT_DEBUGGER[0] is None:
from pudb.debugger import Debugger
dbg = Debugger()
CURRENT_DEBUGGER[0] = dbg
import sys
dbg.set_trace(sys._getframe().f_back)
def post_mortem(t):
p = Debugger()
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def pm():
import sys
post_mortem(sys.last_traceback)
if __name__ == "__main__":
print "To keep Python 2.6 happy, you now need to type 'python -m pudb.run'."
print "Sorry for the inconvenience."
| VERSION = "0.91.4"
CURRENT_DEBUGGER = [None]
def set_trace():
if CURRENT_DEBUGGER[0] is None:
from pudb.debugger import Debugger
dbg = Debugger()
CURRENT_DEBUGGER[0] = dbg
import sys
dbg.set_trace(sys._getframe().f_back)
def post_mortem(t):
p = Debugger()
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def pm():
import sys
post_mortem(sys.last_traceback)
if __name__ == "__main__":
print "To keep Python 2.6 happy, you now need to type 'python -m pudb.run'."
print "Sorry for the inconvenience."
| mit | Python |
7dcc8b0946d08f0ab491311b15454d8bcd6e51e9 | fix problem when using south's orm freezer. | anentropic/django-denorm,simas/django-denorm,PetrDlouhy/django-denorm,Eksmo/django-denorm,kennknowles/django-denorm,heinrich5991/django-denorm,incuna/django-denorm,miracle2k/django-denorm,lechup/django-denorm,Kronuz/django-denorm,mjtamlyn/django-denorm,victorvde/django-denorm,gerdemb/django-denorm,larsbijl/django-denorm,initcrash/django-denorm,idahogray/django-denorm,catalanojuan/django-denorm,alex-mcleod/django-denorm,Chive/django-denorm | denorm/helpers.py | denorm/helpers.py | from django.db import models
def find_fk(from_model, to_model, foreign_key=None):
if foreign_key:
if not isinstance(foreign_key, (str, unicode)):
foreign_key = foreign_key.attname
fkeys = filter(lambda x: isinstance(x, models.ForeignKey)
and x.rel.to._meta.db_table == to_model._meta.db_table
and x.attname in [foreign_key,foreign_key+'_id'],
from_model._meta.fields)
else:
fkeys = filter(lambda x: isinstance(x, models.ForeignKey)
and x.rel.to == to_model,
from_model._meta.fields)
if not fkeys:
return None
if len(fkeys) > 1:
raise ValueError("%s has more than one ForeignKey to %s;"
" please specify which one to use."
% (from_model, to_model))
if fkeys[0].attname.endswith("_id"):
return fkeys[0].attname[:-3]
else:
return fkeys[0].attname
| from django.db import models
def find_fk(from_model, to_model, foreign_key=None):
if foreign_key:
if not isinstance(foreign_key, (str, unicode)):
foreign_key = foreign_key.attname
fkeys = filter(lambda x: isinstance(x, models.ForeignKey)
and x.rel.to == to_model
and x.attname in [foreign_key,foreign_key+'_id'],
from_model._meta.fields)
else:
fkeys = filter(lambda x: isinstance(x, models.ForeignKey)
and x.rel.to == to_model,
from_model._meta.fields)
if not fkeys:
return None
if len(fkeys) > 1:
raise ValueError("%s has more than one ForeignKey to %s;"
" please specify which one to use."
% (from_model, to_model))
if fkeys[0].attname.endswith("_id"):
return fkeys[0].attname[:-3]
else:
return fkeys[0].attname
| bsd-3-clause | Python |
167afdb96c4cc89109b26cf9793938b143578b47 | fix problem | h2rd/ppxml | pxml/__init__.py | pxml/__init__.py | #!/usr/bin/env python
# unicode: utf-8
import sys
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import XmlLexer
from xml.dom.minidom import parseString
INDENT=' '*2
def format_code(data):
body = ''
if data.startswith('HTTP'):
end = data.find("\r\n\r\n")
body = data[0:end]
data = data[end:].strip()
lines = [line for line in parseString(data).toprettyxml(indent=INDENT).split('\n')
if line.strip()]
return "%s\n\n%s" % (body, '\n'.join(lines),)
def color_code(code):
return highlight(code, XmlLexer(), TerminalFormatter())
def main():
data = sys.stdin.read()
if sys.stdout.isatty():
try:
data = color_code(format_code(data))
except ValueError as e:
print e
return data
if __name__ == '__main__':
print main()
| #!/usr/bin/env python
# unicode: utf-8
import sys
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import XmlLexer
from xml.dom.minidom import parseString
INDENT=' '*2
def format_code(data):
body = ''
if data.startswith('HTTP'):
end = data.find("\r\n\r\n")
body = data[0:end]
data = data[end:].strip()
lines = [line for line in parseString(data).toprettyxml(indent=INDENT).split('\n')
if line.strip()]
return "%s\n\n%s" % (body, '\n'.join(lines),)
def color_code(code):
return highlight(code, XmlLexer(), TerminalFormatter())
def main():
data = sys.stdin.read()
data = color_code(format_code(data))
return data
if __name__ == '__main__':
print main()
| mit | Python |
111ed982ed2182448ea5e80c2065514c51217be1 | update restfulAPI | EtienneChuang/etiennechuang.github.io,EtienneChuang/etiennechuang.github.io | py/restfulAPI.py | py/restfulAPI.py | from flask import Flask
from flask_restful import Resource, Api
import json
from flask_jsonpify import jsonify
from flask_cors import CORS
import urllib
import ssl
import requests
import csv
import sys
app = Flask(__name__)
CORS(app)
api = Api(app)
encoding = "utf-8"
def fetchCsvData(url):
try:
response = requests.get(url, verify=False)
decoded_content = response.content.decode('utf-8')
csvData = decoded_content.split('\r\n')
csvData = csvData[:-1]
#print(csvData)
return csvData
except Exception as e:
print(e)
class Maskdata(Resource):
def get(self):
try:
urlPath = "http://data.nhi.gov.tw/Datasets/Download.ashx?rid=A21030000I-D50001-001&l=https://data.nhi.gov.tw/resource/mask/maskdata.csv"
lines = fetchCsvData(urlPath)
lines = [line.strip() for line in lines]
keys = lines[0].split(',')
line_num = 1
total_lines = len(lines)
datas = []
jsondata = []
jsonkeys = ["updatetime", "data"]
updatetime = ""
while line_num < total_lines:
values = lines[line_num].split(",")
if(updatetime == ""):
updatetime = values[-1]
jsondata.append(updatetime)
values = values[1:-1] #remove first and last element
datas.append(values)
line_num = line_num + 1
jsondata.append(datas)
json_str = json.dumps(dict(zip(jsonkeys, jsondata)), ensure_ascii=False, indent=0)
#print(type(json_str))
result_data = json_str.replace('\\N','').replace('\n','')
print('size')
print(result_data)
print(sys.getsizeof(result_data))
return result_data
except Exception as e:
return e
class Hello_world(Resource):
def get(self):
result = "HELLO!"
return result
api.add_resource(Hello_world, '/')
api.add_resource(Maskdata, '/Maskdata') # Route_1
if __name__ == '__main__':
print('app run')
app.run() | from flask import Flask
from flask_restful import Resource, Api
import json
from flask_jsonpify import jsonify
from flask_cors import CORS
import urllib
import ssl
import requests
import csv
import sys
app = Flask(__name__)
CORS(app)
api = Api(app)
encoding = "utf-8"
def fetchCsvData(url):
try:
response = requests.get(url, verify=False)
decoded_content = response.content.decode('utf-8')
csvData = decoded_content.split('\r\n')
csvData = csvData[:-1]
#print(csvData)
return csvData
except Exception as e:
print(e)
class Maskdata(Resource):
def get(self):
try:
urlPath = "http://data.nhi.gov.tw/Datasets/Download.ashx?rid=A21030000I-D50001-001&l=https://data.nhi.gov.tw/resource/mask/maskdata.csv"
lines = fetchCsvData(urlPath)
lines = [line.strip() for line in lines]
keys = lines[0].split(',')
line_num = 1
total_lines = len(lines)
datas = []
jsondata = []
jsonkeys = ["updatetime", "data"]
updatetime = ""
while line_num < total_lines:
values = lines[line_num].split(",")
if(updatetime == ""):
updatetime = values[-1]
jsondata.append(updatetime)
values = values[1:-1] #remove first and last element
datas.append(values)
line_num = line_num + 1
jsondata.append(datas)
json_str = json.dumps(dict(zip(jsonkeys, jsondata)), ensure_ascii=False, indent=0)
#print(type(json_str))
result_data = json_str.replace(r'\"','').replace(r'\\N','').replace(r'\n','')
print('size')
print(result_data)
print(sys.getsizeof(result_data))
return result_data
except Exception as e:
return e
class Hello_world(Resource):
def get(self):
result = "HELLO!"
return result
api.add_resource(Hello_world, '/')
api.add_resource(Maskdata, '/Maskdata') # Route_1
if __name__ == '__main__':
print('app run')
app.run() | mit | Python |
9ac662557d6313190621c0c84a2c6923e0e9fa72 | Update event context instead of replace (NC-529) | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/logging/middleware.py | nodeconductor/logging/middleware.py | from __future__ import unicode_literals
import threading
_locals = threading.local()
def get_event_context():
return getattr(_locals, 'context', None)
def set_event_context(context):
_locals.context = context
def reset_event_context():
if hasattr(_locals, 'context'):
del _locals.context
def set_current_user(user):
context = get_event_context() or {}
context.update(user._get_log_context('user'))
set_event_context(context)
def get_ip_address(request):
"""
Correct IP address is expected as first element of HTTP_X_FORWARDED_FOR or REMOTE_ADDR
"""
if 'HTTP_X_FORWARDED_FOR' in request.META:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
else:
return request.META['REMOTE_ADDR']
class CaptureEventContextMiddleware(object):
def process_request(self, request):
context = {'ip_address': get_ip_address(request)}
user = getattr(request, 'user', None)
if user and not user.is_anonymous():
context.update(user._get_log_context('user'))
set_event_context(context)
def process_response(self, request, response):
reset_event_context()
return response
| from __future__ import unicode_literals
import threading
_locals = threading.local()
def get_event_context():
return getattr(_locals, 'context', None)
def set_event_context(context):
_locals.context = context
def reset_event_context():
if hasattr(_locals, 'context'):
del _locals.context
def set_current_user(user):
set_event_context(user._get_log_context('user'))
def get_ip_address(request):
"""
Correct IP address is expected as first element of HTTP_X_FORWARDED_FOR or REMOTE_ADDR
"""
if 'HTTP_X_FORWARDED_FOR' in request.META:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
else:
return request.META['REMOTE_ADDR']
class CaptureEventContextMiddleware(object):
def process_request(self, request):
context = {'ip_address': get_ip_address(request)}
user = getattr(request, 'user', None)
if user and not user.is_anonymous():
context.update(user._get_log_context('user'))
set_event_context(context)
def process_response(self, request, response):
reset_event_context()
return response
| mit | Python |
c5be1820f87de3b6b80faa7296354b0391a73240 | Update template-argparse_v2.py | csiu/tokens,csiu/tokens,csiu/tokens,csiu/tokens,csiu/tokens,csiu/tokens | python/template/template-argparse_v2.py | python/template/template-argparse_v2.py | #!/usr/bin/env python
# Author: Celia
# Created:
import argparse
import sys
import os
usage = """
"""
def main():
print "Hello world"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=usage, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--infile', dest='infile',
action='store',
default=None,
type=str,
#choices=['','',''],
required=True,
help='path to input file')
##get at the arguments
args = parser.parse_args()
## do something..
main()
| #!/usr/bin/env python
# Author: Celia
# Created:
import argparse
import sys
import os
usage = """ %s [options] -i INFILE
""" % (__file__)
def main():
print "Hello world"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=usage, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--infile', dest='infile',
action='store',
default=None,
type=str,
#choices=['','',''],
required=True,
help='path to input file')
##get at the arguments
args = parser.parse_args()
## do something..
main()
| mit | Python |
11921dd2d3e305a2ec014bf8fd13e5d57774fb95 | Add proper docstring to get_real_id.py, add example usage | kastden/nanagogo | bin/get_real_id.py | bin/get_real_id.py | #!/usr/bin/env python3
"""Get a 755 user's real talkId from their public ID on the website.
As an example, the ID for Furuhata Nao's page is
Xe8jJ0D40_aWkVIvojdMdG== (http://7gogo.jp/lp/Xe8jJ0D40_aWkVIvojdMdG==), but her
real talkId (for use with the API) is MqsG1FLTi-_9GtN76wEuUm==.
Example usage and output:
$ python3 get_real_id.py http://7gogo.jp/lp/Xe8jJ0D40_aWkVIvojdMdG== 9YFoGjThCxeWkVIvojdMdG==
http://7gogo.jp/lp/Xe8jJ0D40_aWkVIvojdMdG== : MqsG1FLTi-_9GtN76wEuUm==
http://7gogo.jp/lp/9YFoGjThCxeWkVIvojdMdG== : Kx09K9lOsMF9GtN76wEuUm==
"""
import re
import yaml
import sys
import requests
def get_real_id(url):
""" Get a user's talkId from their nanangogo page.
This function requests the HTML page on the given URL, extracts the
the settings dictionary from the embedded javascript, parses it as YAML
and returns their talkId.
"""
r = requests.get(url)
r.raise_for_status()
match = re.compile('setting = ({.*?});', re.DOTALL)
setting = match.search(r.text).group(1)
data = yaml.load(setting)
return data['talkId']
if __name__ == "__main__":
args = sys.argv[1:]
if not args:
sys.exit(__doc__)
for id in args:
if id.startswith('http'):
url = id
else:
url = 'http://7gogo.jp/lp/{}'.format(id)
real_id = get_real_id(url)
print('{} : {}'.format(url, real_id))
| #!/usr/bin/env python3
import re
import yaml
import sys
import requests
''' Get a 755 user's real talkId.
As an example, the ID for Furuhata Nao's page is
Xe8jJ0D40_aWkVIvojdMdG== (http://7gogo.jp/lp/Xe8jJ0D40_aWkVIvojdMdG==),
but her real talkId (for use with the API) is MqsG1FLTi-_9GtN76wEuUm==.'''
def get_real_id(url):
r = requests.get(url)
r.raise_for_status()
match = re.compile('setting = ({.*?});', re.DOTALL)
setting = match.search(r.text).group(1)
data = yaml.load(setting)
return data['talkId']
if __name__ == "__main__":
args = sys.argv[1:]
for id in args:
if id.startswith('http'):
url = id
else:
url = 'http://7gogo.jp/lp/{}'.format(id)
real_id = get_real_id(url)
print('{} : {}'.format(url, real_id))
| mit | Python |
5b62eae4e2a295e6b167f4e035a9e663278c22b5 | Update uploadFTP.py | sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy,sniemi/SamPy | smnIO/uploadFTP.py | smnIO/uploadFTP.py | """
This module contains functions related to FTP file transfer protocol.
:Author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
:version: 0.2
"""
import ftplib, os, glob
def upload(ftp, file):
"""
Upload files to a server using FTP protocol.
:param ftp: instance to the FTP server
:type ftp: ftplib.FTP instance
:param file: name of the file to be uploaded
:type file: str
"""
ext = os.path.splitext(file)[1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + file, open(file))
else:
ftp.storbinary("STOR " + file, open(file, "rb"), 1024)
if __name__ == '__main__':
#find all gzipped tar balls and sort
uploads = glob.glob('*.fits.tar.gz')
uploads.sort()
#open connection to the FTP server
ftp = ftplib.FTP('ftpix.iap.fr')
ftp.login('VISSIM','set_your_own_passwd')
#get existing files, note however, that this does
#not check the file size, so files that were
#uploaded only partially will not be uploaded
#again.
existing = ftp.nlst()
#print some output
print 'Found %i files from the server, these will not be uploaded again...' % len(existing)
#upload not existing files
i = 0
for file in uploads:
if not file in existing:
print 'Uploading %s' % file
upload(ftp, file)
i += 1
print '%i files were uploaded...' %i
| """
This module contains functions related to FTP file transfer protocol.
:Author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
:version: 0.2
"""
import ftplib, os, glob
def upload(ftp, file):
"""
Upload files to a server using FTP protocol.
:param ftp: instance to the FTP server
:type ftp: ftplib.FTP instance
:param file: name of the file to be uploaded
:type file: str
"""
ext = os.path.splitext(file)[1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + file, open(file))
else:
ftp.storbinary("STOR " + file, open(file, "rb"), 1024)
if __name__ == '__main__':
#find all gzipped tar balls and sort
uploads = glob.glob('*.fits.tar.gz')
uploads.sort()
#open connection to the FTP server
ftp = ftplib.FTP('ftpix.iap.fr')
ftp.login('VISSIM','!!!VISsim')
#get existing files, note however, that this does
#not check the file size, so files that were
#uploaded only partially will not be uploaded
#again.
existing = ftp.nlst()
#print some output
print 'Found %i files from the server, these will not be uploaded again...' % len(existing)
#upload not existing files
i = 0
for file in uploads:
if not file in existing:
print 'Uploading %s' % file
upload(ftp, file)
i += 1
print '%i files were uploaded...' %i | bsd-2-clause | Python |
93a95afe231910d9f683909994692fadaf107057 | Make md.render have the same API as rst.render | pypa/readme,pypa/readme_renderer | readme_renderer/markdown.py | readme_renderer/markdown.py | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import markdown
from .clean import clean
def render(raw):
rendered = markdown.markdown(
raw,
extensions=[
'markdown.extensions.codehilite',
'markdown.extensions.fenced_code',
'markdown.extensions.smart_strong',
])
if rendered:
return clean(rendered)
else:
return None
| # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import markdown
from .clean import clean
def render(raw):
rendered = markdown.markdown(
raw,
extensions=[
'markdown.extensions.codehilite',
'markdown.extensions.fenced_code',
'markdown.extensions.smart_strong',
])
return clean(rendered or raw), bool(rendered)
| apache-2.0 | Python |
0dcd5d1b5de5aeeeb653ea6cb6fdc04a931518b6 | SPeed up donate admin | wanghaven/readthedocs.org,raven47git/readthedocs.org,jerel/readthedocs.org,sils1297/readthedocs.org,emawind84/readthedocs.org,michaelmcandrew/readthedocs.org,GovReady/readthedocs.org,dirn/readthedocs.org,tddv/readthedocs.org,kenshinthebattosai/readthedocs.org,atsuyim/readthedocs.org,davidfischer/readthedocs.org,attakei/readthedocs-oauth,asampat3090/readthedocs.org,sils1297/readthedocs.org,SteveViss/readthedocs.org,kdkeyser/readthedocs.org,asampat3090/readthedocs.org,gjtorikian/readthedocs.org,clarkperkins/readthedocs.org,stevepiercy/readthedocs.org,emawind84/readthedocs.org,SteveViss/readthedocs.org,wijerasa/readthedocs.org,kdkeyser/readthedocs.org,Carreau/readthedocs.org,gjtorikian/readthedocs.org,atsuyim/readthedocs.org,agjohnson/readthedocs.org,singingwolfboy/readthedocs.org,sid-kap/readthedocs.org,wijerasa/readthedocs.org,istresearch/readthedocs.org,wanghaven/readthedocs.org,kenwang76/readthedocs.org,Tazer/readthedocs.org,mhils/readthedocs.org,gjtorikian/readthedocs.org,jerel/readthedocs.org,nikolas/readthedocs.org,soulshake/readthedocs.org,wanghaven/readthedocs.org,d0ugal/readthedocs.org,VishvajitP/readthedocs.org,laplaceliu/readthedocs.org,istresearch/readthedocs.org,agjohnson/readthedocs.org,kenwang76/readthedocs.org,dirn/readthedocs.org,dirn/readthedocs.org,titiushko/readthedocs.org,LukasBoersma/readthedocs.org,dirn/readthedocs.org,espdev/readthedocs.org,d0ugal/readthedocs.org,rtfd/readthedocs.org,LukasBoersma/readthedocs.org,royalwang/readthedocs.org,titiushko/readthedocs.org,wijerasa/readthedocs.org,nikolas/readthedocs.org,pombredanne/readthedocs.org,GovReady/readthedocs.org,tddv/readthedocs.org,techtonik/readthedocs.org,LukasBoersma/readthedocs.org,espdev/readthedocs.org,singingwolfboy/readthedocs.org,michaelmcandrew/readthedocs.org,VishvajitP/readthedocs.org,attakei/readthedocs-oauth,cgourlay/readthedocs.org,jerel/readthedocs.org,kdkeyser/readthedocs.org,kenshinthebattosai/readthedocs.org,sunnyzwh/readthedocs.org,titiushko/readthedocs.org,titiushko/readthedocs.org,sid-kap/readthedocs.org,michaelmcandrew/readthedocs.org,laplaceliu/readthedocs.org,LukasBoersma/readthedocs.org,cgourlay/readthedocs.org,safwanrahman/readthedocs.org,Tazer/readthedocs.org,soulshake/readthedocs.org,agjohnson/readthedocs.org,rtfd/readthedocs.org,royalwang/readthedocs.org,michaelmcandrew/readthedocs.org,singingwolfboy/readthedocs.org,attakei/readthedocs-oauth,attakei/readthedocs-oauth,hach-que/readthedocs.org,d0ugal/readthedocs.org,soulshake/readthedocs.org,Tazer/readthedocs.org,istresearch/readthedocs.org,hach-que/readthedocs.org,rtfd/readthedocs.org,kenwang76/readthedocs.org,CedarLogic/readthedocs.org,d0ugal/readthedocs.org,rtfd/readthedocs.org,agjohnson/readthedocs.org,sunnyzwh/readthedocs.org,mhils/readthedocs.org,wijerasa/readthedocs.org,clarkperkins/readthedocs.org,CedarLogic/readthedocs.org,clarkperkins/readthedocs.org,jerel/readthedocs.org,fujita-shintaro/readthedocs.org,safwanrahman/readthedocs.org,sunnyzwh/readthedocs.org,SteveViss/readthedocs.org,tddv/readthedocs.org,pombredanne/readthedocs.org,atsuyim/readthedocs.org,laplaceliu/readthedocs.org,takluyver/readthedocs.org,Tazer/readthedocs.org,hach-que/readthedocs.org,royalwang/readthedocs.org,safwanrahman/readthedocs.org,emawind84/readthedocs.org,espdev/readthedocs.org,stevepiercy/readthedocs.org,kenwang76/readthedocs.org,espdev/readthedocs.org,raven47git/readthedocs.org,singingwolfboy/readthedocs.org,cgourlay/readthedocs.org,GovReady/readthedocs.org,istresearch/readthedocs.org,safwanrahman/readthedocs.org,mhils/readthedocs.org,kenshinthebattosai/readthedocs.org,VishvajitP/readthedocs.org,nikolas/readthedocs.org,kenshinthebattosai/readthedocs.org,kdkeyser/readthedocs.org,clarkperkins/readthedocs.org,sils1297/readthedocs.org,Carreau/readthedocs.org,stevepiercy/readthedocs.org,laplaceliu/readthedocs.org,atsuyim/readthedocs.org,sid-kap/readthedocs.org,takluyver/readthedocs.org,Carreau/readthedocs.org,stevepiercy/readthedocs.org,asampat3090/readthedocs.org,fujita-shintaro/readthedocs.org,fujita-shintaro/readthedocs.org,techtonik/readthedocs.org,sils1297/readthedocs.org,emawind84/readthedocs.org,SteveViss/readthedocs.org,fujita-shintaro/readthedocs.org,asampat3090/readthedocs.org,Carreau/readthedocs.org,pombredanne/readthedocs.org,davidfischer/readthedocs.org,hach-que/readthedocs.org,raven47git/readthedocs.org,VishvajitP/readthedocs.org,mhils/readthedocs.org,takluyver/readthedocs.org,davidfischer/readthedocs.org,CedarLogic/readthedocs.org,cgourlay/readthedocs.org,nikolas/readthedocs.org,CedarLogic/readthedocs.org,techtonik/readthedocs.org,davidfischer/readthedocs.org,takluyver/readthedocs.org,wanghaven/readthedocs.org,gjtorikian/readthedocs.org,royalwang/readthedocs.org,espdev/readthedocs.org,soulshake/readthedocs.org,sid-kap/readthedocs.org,GovReady/readthedocs.org,techtonik/readthedocs.org,sunnyzwh/readthedocs.org,raven47git/readthedocs.org | readthedocs/donate/admin.py | readthedocs/donate/admin.py | from django.contrib import admin
from .models import Supporter
class SupporterAdmin(admin.ModelAdmin):
model = Supporter
raw_id_fields = ('user',)
list_display = ('name', 'email', 'dollars', 'public')
list_filter = ('name', 'email', 'dollars', 'public')
admin.site.register(Supporter, SupporterAdmin)
| from django.contrib import admin
from .models import Supporter
admin.site.register(Supporter)
| mit | Python |
d209e5318eb148176edf8b63b0a02731b80d1ff7 | replace stop_area_id by destination_stop_area_id | is06/navitia,lrocheWB/navitia,francois-vincent/navitia,CanalTP/navitia,xlqian/navitia,antoine-de/navitia,kinnou02/navitia,CanalTP/navitia,prhod/navitia,Tisseo/navitia,pbougue/navitia,prhod/navitia,ballouche/navitia,patochectp/navitia,VincentCATILLON/navitia,xlqian/navitia,pbougue/navitia,fueghan/navitia,francois-vincent/navitia,lrocheWB/navitia,xlqian/navitia,ballouche/navitia,djludo/navitia,VincentCATILLON/navitia,lrocheWB/navitia,kinnou02/navitia,CanalTP/navitia,frodrigo/navitia,TeXitoi/navitia,is06/navitia,xlqian/navitia,kadhikari/navitia,patochectp/navitia,antoine-de/navitia,is06/navitia,lrocheWB/navitia,thiphariel/navitia,kadhikari/navitia,frodrigo/navitia,CanalTP/navitia,thiphariel/navitia,CanalTP/navitia,kinnou02/navitia,pbougue/navitia,francois-vincent/navitia,Tisseo/navitia,frodrigo/navitia,kinnou02/navitia,djludo/navitia,TeXitoi/navitia,patochectp/navitia,Tisseo/navitia,prhod/navitia,is06/navitia,frodrigo/navitia,ballouche/navitia,djludo/navitia,VincentCATILLON/navitia,Tisseo/navitia,Tisseo/navitia,fueghan/navitia,djludo/navitia,VincentCATILLON/navitia,fueghan/navitia,patochectp/navitia,fueghan/navitia,antoine-de/navitia,xlqian/navitia,antoine-de/navitia,kadhikari/navitia,ballouche/navitia,TeXitoi/navitia,thiphariel/navitia,kadhikari/navitia,thiphariel/navitia,prhod/navitia,pbougue/navitia,TeXitoi/navitia,francois-vincent/navitia | source/sql/alembic/versions/12660cd87568_main_destination_for_route.py | source/sql/alembic/versions/12660cd87568_main_destination_for_route.py | """main destination for route
Revision ID: 12660cd87568
Revises: 29fc422c56cb
Create Date: 2015-05-05 13:47:06.507810
"""
# revision identifiers, used by Alembic.
revision = '12660cd87568'
down_revision = '13673746db16'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('route', sa.Column('destination_stop_area_id', sa.BIGINT(), primary_key=False, nullable=True), schema='navitia')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('route', 'destination_stop_area_id', schema='navitia')
### end Alembic commands ###
| """main destination for route
Revision ID: 12660cd87568
Revises: 29fc422c56cb
Create Date: 2015-05-05 13:47:06.507810
"""
# revision identifiers, used by Alembic.
revision = '12660cd87568'
down_revision = '29fc422c56cb'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('route', sa.Column('stop_area_id', sa.BIGINT(), primary_key=False, nullable=True), schema='navitia')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('route', 'stop_area_id', schema='navitia')
### end Alembic commands ###
| agpl-3.0 | Python |
06ae20b428bc92790bcaae9636200e427d99abd9 | Bump version number for Python 3.2-matching release | harlowja/pythonfutures,plucury/pythonfutures,startover/pythonfutures | python2/setup.py | python2/setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='futures',
version='2.0',
description='Java-style futures implementation in Python 2.x',
author='Brian Quinlan',
author_email='brian@sweetapp.com',
url='http://code.google.com/p/pythonfutures',
download_url='http://pypi.python.org/pypi/futures3/',
packages=['futures'],
license='BSD',
classifiers=['License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2']
)
| #!/usr/bin/env python
from distutils.core import setup
setup(name='futures',
version='1.0',
description='Java-style futures implementation in Python 2.x',
author='Brian Quinlan',
author_email='brian@sweetapp.com',
url='http://code.google.com/p/pythonfutures',
download_url='http://pypi.python.org/pypi/futures3/',
packages=['futures'],
license='BSD',
classifiers=['License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2']
)
| bsd-2-clause | Python |
2e30d22ad8b6e35cfffed6a883bf959009707d73 | Make more readable error msg on quantum client authentication failure | badock/nova,CloudServer/nova,mandeepdhami/nova,apporc/nova,dawnpower/nova,felixma/nova,gspilio/nova,LoHChina/nova,shail2810/nova,mgagne/nova,sebrandon1/nova,affo/nova,BeyondTheClouds/nova,cloudbase/nova-virtualbox,CCI-MOC/nova,isyippee/nova,takeshineshiro/nova,orbitfp7/nova,citrix-openstack-build/nova,mahak/nova,luogangyi/bcec-nova,barnsnake351/nova,thomasem/nova,adelina-t/nova,devoid/nova,TwinkleChawla/nova,hanlind/nova,cloudbase/nova-virtualbox,plumgrid/plumgrid-nova,petrutlucian94/nova_dev,saleemjaveds/https-github.com-openstack-nova,MountainWei/nova,mahak/nova,tanglei528/nova,hanlind/nova,double12gzh/nova,viggates/nova,joker946/nova,silenceli/nova,gooddata/openstack-nova,scripnichenko/nova,JioCloud/nova,iuliat/nova,mahak/nova,badock/nova,eonpatapon/nova,openstack/nova,maelnor/nova,tianweizhang/nova,rrader/nova-docker-plugin,alvarolopez/nova,rajalokan/nova,CEG-FYP-OpenStack/scheduler,TwinkleChawla/nova,JioCloud/nova,JioCloud/nova_test_latest,NeCTAR-RC/nova,thomasem/nova,Juniper/nova,Yuriy-Leonov/nova,ted-gould/nova,tangfeixiong/nova,bgxavier/nova,berrange/nova,CloudServer/nova,dims/nova,devendermishrajio/nova_test_latest,shail2810/nova,vmturbo/nova,Stavitsky/nova,gspilio/nova,shootstar/novatest,berrange/nova,NeCTAR-RC/nova,mmnelemane/nova,watonyweng/nova,noironetworks/nova,petrutlucian94/nova_dev,jianghuaw/nova,jianghuaw/nova,imsplitbit/nova,akash1808/nova_test_latest,openstack/nova,tudorvio/nova,kimjaejoong/nova,citrix-openstack-build/nova,affo/nova,shootstar/novatest,jianghuaw/nova,yosshy/nova,blueboxgroup/nova,fnordahl/nova,bigswitch/nova,MountainWei/nova,eayunstack/nova,eharney/nova,varunarya10/nova_test_latest,JianyuWang/nova,Tehsmash/nova,zaina/nova,Francis-Liu/animated-broccoli,viggates/nova,felixma/nova,yatinkumbhare/openstack-nova,mandeepdhami/nova,rajalokan/nova,sebrandon1/nova,tudorvio/nova,cloudbau/nova,rahulunair/nova,virtualopensystems/nova,kimjaejoong/nova,klmitch/nova,petrutlucian94/nova,sridevikoushik31/nova,yatinkumbhare/openstack-nova,orbitfp7/nova,rrader/nova-docker-plugin,rahulunair/nova,fnordahl/nova,bgxavier/nova,LoHChina/nova,BeyondTheClouds/nova,bigswitch/nova,double12gzh/nova,gooddata/openstack-nova,sridevikoushik31/nova,cernops/nova,petrutlucian94/nova,takeshineshiro/nova,SUSE-Cloud/nova,Yusuke1987/openstack_template,Tehsmash/nova,cloudbase/nova,projectcalico/calico-nova,belmiromoreira/nova,phenoxim/nova,hanlind/nova,ruslanloman/nova,Stavitsky/nova,whitepages/nova,DirectXMan12/nova-hacking,tealover/nova,ntt-sic/nova,eonpatapon/nova,ewindisch/nova,sacharya/nova,Juniper/nova,alaski/nova,vladikr/nova_drafts,noironetworks/nova,CiscoSystems/nova,OpenAcademy-OpenStack/nova-scheduler,apporc/nova,projectcalico/calico-nova,jeffrey4l/nova,raildo/nova,iuliat/nova,silenceli/nova,sridevikoushik31/nova,akash1808/nova,zzicewind/nova,OpenAcademy-OpenStack/nova-scheduler,leilihh/novaha,devendermishrajio/nova,edulramirez/nova,rajalokan/nova,DirectXMan12/nova-hacking,saleemjaveds/https-github.com-openstack-nova,mmnelemane/nova,Juniper/nova,raildo/nova,whitepages/nova,mikalstill/nova,cernops/nova,qwefi/nova,sebrandon1/nova,angdraug/nova,cloudbase/nova,Juniper/nova,leilihh/novaha,tealover/nova,klmitch/nova,TieWei/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,vmturbo/nova,SUSE-Cloud/nova,bclau/nova,vmturbo/nova,devendermishrajio/nova,bclau/nova,vmturbo/nova,blueboxgroup/nova,akash1808/nova,sridevikoushik31/nova,klmitch/nova,redhat-openstack/nova,plumgrid/plumgrid-nova,nikesh-mahalka/nova,shahar-stratoscale/nova,scripnichenko/nova,nikesh-mahalka/nova,rahulunair/nova,leilihh/nova,angdraug/nova,BeyondTheClouds/nova,ntt-sic/nova,gspilio/nova,devendermishrajio/nova_test_latest,jeffrey4l/nova,adelina-t/nova,maelnor/nova,gooddata/openstack-nova,alexandrucoman/vbox-nova-driver,yosshy/nova,joker946/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,rajalokan/nova,watonyweng/nova,tangfeixiong/nova,dawnpower/nova,cernops/nova,alaski/nova,openstack/nova,virtualopensystems/nova,sacharya/nova,qwefi/nova,akash1808/nova_test_latest,vladikr/nova_drafts,imsplitbit/nova,mikalstill/nova,Francis-Liu/animated-broccoli,eharney/nova,Yuriy-Leonov/nova,devoid/nova,j-carpentier/nova,luogangyi/bcec-nova,alexandrucoman/vbox-nova-driver,ruslanloman/nova,zaina/nova,gooddata/openstack-nova,rickerc/nova_audit,eayunstack/nova,varunarya10/nova_test_latest,spring-week-topos/nova-week,spring-week-topos/nova-week,CCI-MOC/nova,cloudbau/nova,tanglei528/nova,barnsnake351/nova,belmiromoreira/nova,Yusuke1987/openstack_template,Metaswitch/calico-nova,tianweizhang/nova,klmitch/nova,CEG-FYP-OpenStack/scheduler,rickerc/nova_audit,mgagne/nova,phenoxim/nova,ewindisch/nova,TieWei/nova,cloudbase/nova,shahar-stratoscale/nova,zhimin711/nova,cyx1231st/nova,zzicewind/nova,CiscoSystems/nova,alvarolopez/nova,JianyuWang/nova,DirectXMan12/nova-hacking,dims/nova,mikalstill/nova,isyippee/nova,jianghuaw/nova,JioCloud/nova_test_latest,ted-gould/nova,leilihh/nova,cyx1231st/nova,redhat-openstack/nova,j-carpentier/nova,Metaswitch/calico-nova,zhimin711/nova,edulramirez/nova | nova/network/quantumv2/__init__.py | nova/network/quantumv2/__init__.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from quantumclient import client
from quantumclient.common import exceptions
from quantumclient.v2_0 import client as clientv20
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _get_auth_token():
try:
httpclient = client.HTTPClient(
username=CONF.quantum_admin_username,
tenant_name=CONF.quantum_admin_tenant_name,
region_name=CONF.quantum_region_name,
password=CONF.quantum_admin_password,
auth_url=CONF.quantum_admin_auth_url,
timeout=CONF.quantum_url_timeout,
auth_strategy=CONF.quantum_auth_strategy,
insecure=CONF.quantum_api_insecure)
httpclient.authenticate()
return httpclient.auth_token
except exceptions.QuantumClientException as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Quantum client authentication failed: %s'), e)
def _get_client(token=None):
if not token and CONF.quantum_auth_strategy:
token = _get_auth_token()
params = {
'endpoint_url': CONF.quantum_url,
'timeout': CONF.quantum_url_timeout,
'insecure': CONF.quantum_api_insecure,
}
if token:
params['token'] = token
else:
params['auth_strategy'] = None
return clientv20.Client(**params)
def get_client(context, admin=False):
if admin:
token = None
else:
token = context.auth_token
return _get_client(token=token)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from quantumclient import client
from quantumclient.v2_0 import client as clientv20
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _get_auth_token():
try:
httpclient = client.HTTPClient(
username=CONF.quantum_admin_username,
tenant_name=CONF.quantum_admin_tenant_name,
region_name=CONF.quantum_region_name,
password=CONF.quantum_admin_password,
auth_url=CONF.quantum_admin_auth_url,
timeout=CONF.quantum_url_timeout,
auth_strategy=CONF.quantum_auth_strategy,
insecure=CONF.quantum_api_insecure)
httpclient.authenticate()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("_get_auth_token() failed"))
return httpclient.auth_token
def _get_client(token=None):
if not token and CONF.quantum_auth_strategy:
token = _get_auth_token()
params = {
'endpoint_url': CONF.quantum_url,
'timeout': CONF.quantum_url_timeout,
'insecure': CONF.quantum_api_insecure,
}
if token:
params['token'] = token
else:
params['auth_strategy'] = None
return clientv20.Client(**params)
def get_client(context, admin=False):
if admin:
token = None
else:
token = context.auth_token
return _get_client(token=token)
| apache-2.0 | Python |
20e846e849e53e6f3b5d82b2a3e9f9db310a68a4 | Bump version number | timxx/gitc,timxx/gitc | qgitc/version.py | qgitc/version.py | # -*- coding: utf-8 -*-
VERSION_MAJOR = 3
VERSION_MINOR = 0
VERSION_PATCH = 0
VERSION = "{}.{}.{}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
| # -*- coding: utf-8 -*-
VERSION_MAJOR = 2
VERSION_MINOR = 1
VERSION_PATCH = 0
VERSION = "{}.{}.{}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
| apache-2.0 | Python |
188fa155b9f421b980c6db048c0e1a235d8967ab | fix 10 files at a time | crateio/crate.web,crateio/crate.web | crate_project/apps/crate/management/commands/fix_missing_files.py | crate_project/apps/crate/management/commands/fix_missing_files.py | from django.core.management.base import BaseCommand
from packages.models import ReleaseFile
from pypi.processor import PyPIPackage
class Command(BaseCommand):
def handle(self, *args, **options):
i = 0
for rf in ReleaseFile.objects.filter(digest="").distinct("release")[:10]:
print rf.release.package.name, rf.release.version
p = PyPIPackage(rf.release.package.name, version=rf.release.version)
p.process(skip_modified=False)
i += 1
print "Fixed %d releases" % i
| from django.core.management.base import BaseCommand
from packages.models import ReleaseFile
from pypi.processor import PyPIPackage
class Command(BaseCommand):
def handle(self, *args, **options):
i = 0
for rf in ReleaseFile.objects.filter(digest="").distinct("release")[:1]:
print rf.release.package.name, rf.release.version
p = PyPIPackage(rf.release.package.name, version=rf.release.version)
p.process(skip_modified=False)
i += 1
print "Fixed %d releases" % i
| bsd-2-clause | Python |
9d1f0bb4b332cbf3775408336fd09cd56c478554 | Fix formatting | amolenaar/gaphor,amolenaar/gaphor | gaphor/ui/__init__.py | gaphor/ui/__init__.py | """
This module contains user interface related code, such as the
main screen and diagram windows.
"""
from gi.repository import Gtk, Gdk
import pkg_resources
import os.path
icon_theme = Gtk.IconTheme.get_default()
icon_theme.append_search_path(
os.path.abspath(pkg_resources.resource_filename("gaphor.ui", "pixmaps"))
)
import re
def _repl(m):
v = m.group(1).lower()
return len(v) == 1 and v or "%c-%c" % tuple(v)
_repl.expr = "(.?[A-Z])"
def icon_for_element(element):
return re.sub(_repl.expr, _repl, type(element).__name__)
# Set style for model canvas
css_provider = Gtk.CssProvider.new()
screen = Gdk.Display.get_default().get_default_screen()
Gtk.StyleContext.add_provider_for_screen(
screen, css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
css_provider.load_from_data("#diagram-tab { background: white }".encode("utf-8"))
| """
This module contains user interface related code, such as the
main screen and diagram windows.
"""
from gi.repository import Gtk, Gdk
import pkg_resources
import os.path
icon_theme = Gtk.IconTheme.get_default()
icon_theme.append_search_path(
os.path.abspath(pkg_resources.resource_filename("gaphor.ui", "pixmaps"))
)
import re
def _repl(m):
v = m.group(1).lower()
return len(v) == 1 and v or "%c-%c" % tuple(v)
_repl.expr = "(.?[A-Z])"
def icon_for_element(element):
return re.sub(_repl.expr, _repl, type(element).__name__)
# Set style for model canvas
css_provider = Gtk.CssProvider.new()
screen = Gdk.Display.get_default().get_default_screen()
Gtk.StyleContext.add_provider_for_screen(screen, css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
css_provider.load_from_data("#diagram-tab { background: white }".encode("utf-8"))
| lgpl-2.1 | Python |
7d3f69d2ce8a55480573c5ff044a2d0565661b89 | improve user lookup and docs (#33) | lock8/django-rest-framework-jwt-refresh-token | refreshtoken/permissions.py | refreshtoken/permissions.py | from rest_framework import permissions
class IsOwnerOrAdmin(permissions.BasePermission):
"""
Only admins or owners are allowed.
"""
def has_permission(self, request, view):
user = request.user
return user and user.is_authenticated
def has_object_permission(self, request, view, obj):
"""
Allow staff or superusers, and the owner of the object itself.
"""
user = request.user
if not user.is_authenticated:
return False
elif user.is_staff or user.is_superuser:
return True
return user == obj.user
| from rest_framework import permissions
class IsOwnerOrAdmin(permissions.BasePermission):
"""
Only admins or owners can have permission
"""
def has_permission(self, request, view):
return request.user and request.user.is_authenticated
def has_object_permission(self, request, view, obj):
"""
If user is staff or superuser or 'owner' of object return True
Else return false.
"""
if not request.user.is_authenticated:
return False
elif request.user.is_staff or request.user.is_superuser:
return True
else:
return request.user == obj.user
| mit | Python |
2b9566814e085023a9b5ef0f8e5b15dcd932f6c0 | Load pep8 package lazily | tk0miya/flake8-coding | flake8_coding.py | flake8_coding.py | # -*- coding: utf-8 -*-
import re
__version__ = '1.1.1'
class CodingChecker(object):
name = 'flake8_coding'
version = __version__
def __init__(self, tree, filename):
self.filename = filename
@classmethod
def add_options(cls, parser):
parser.add_option(
'--accept-encodings', default='latin-1, utf-8', action='store',
help="Acceptable source code encodings for `coding:` magic comment"
)
parser.config_options.append('accept-encodings')
@classmethod
def parse_options(cls, options):
cls.encodings = [e.strip().lower() for e in options.accept_encodings.split(',')]
def read_headers(self):
import pep8
if self.filename in ('stdin', '-', None):
return pep8.stdin_get_value().splitlines(True)[:2]
else:
return pep8.readlines(self.filename)[:2]
def run(self):
try:
# PEP-263 says: a magic comment must be placed into the source
# files either as first or second line in the file
lines = self.read_headers()
if len(lines) == 0:
raise StopIteration()
for lineno, line in enumerate(lines, start=1):
matched = re.search('coding[:=]\s*([-\w.]+)', line, re.IGNORECASE)
if matched:
if matched.group(1).lower() not in self.encodings:
yield lineno, 0, "C102 Unknown encoding found in coding magic comment", type(self)
break
else:
yield 0, 0, "C101 Coding magic comment not found", type(self)
except IOError:
pass
| # -*- coding: utf-8 -*-
import re
import pep8
__version__ = '1.1.1'
class CodingChecker(object):
name = 'flake8_coding'
version = __version__
def __init__(self, tree, filename):
self.filename = filename
@classmethod
def add_options(cls, parser):
parser.add_option(
'--accept-encodings', default='latin-1, utf-8', action='store',
help="Acceptable source code encodings for `coding:` magic comment"
)
parser.config_options.append('accept-encodings')
@classmethod
def parse_options(cls, options):
cls.encodings = [e.strip().lower() for e in options.accept_encodings.split(',')]
def read_headers(self):
if self.filename in ('stdin', '-', None):
return pep8.stdin_get_value().splitlines(True)[:2]
else:
return pep8.readlines(self.filename)[:2]
def run(self):
try:
# PEP-263 says: a magic comment must be placed into the source
# files either as first or second line in the file
lines = self.read_headers()
if len(lines) == 0:
raise StopIteration()
for lineno, line in enumerate(lines, start=1):
matched = re.search('coding[:=]\s*([-\w.]+)', line, re.IGNORECASE)
if matched:
if matched.group(1).lower() not in self.encodings:
yield lineno, 0, "C102 Unknown encoding found in coding magic comment", type(self)
break
else:
yield 0, 0, "C101 Coding magic comment not found", type(self)
except IOError:
pass
| apache-2.0 | Python |
2fe2ca16f2074a8674ceef43f4731e164156a9b2 | refactor and comments | cgoldberg/pageloadtimer | pageloadtimer.py | pageloadtimer.py | #!/usr/bin/env python
#
# Copyright (c) 2015 Corey Goldberg
# License: MIT
import collections
import textwrap
from selenium import webdriver
class PageLoadTimer:
def __init__(self, driver):
"""
takes:
driver: webdriver instance from selenium package.
this should be instantiated with an active driver.
"""
self.driver = driver
self.jscript = textwrap.dedent("""
var performance = window.performance || {};
var timings = performance.timing || {};
return timings;
""")
def inject_timing_js(self):
timings = self.driver.execute_script(self.jscript)
return timings
def get_event_times(self):
timings = self.inject_timing_js()
# the W3C Navigation Timing spec guarantees a monotonic clock:
# "The difference between any two chronologically recorded timing
# attributes must never be negative. For all navigations, including
# subdocument navigations, the user agent must record the system
# clock at the beginning of the root document navigation and define
# subsequent timing attributes in terms of a monotonic clock
# measuring time elapsed from the beginning of the navigation."
# However, some navigation events produce a value of 0 when unable to
# retrieve a timestamp. We filter those out here:
good_values = [epoch for epoch in timings.values() if epoch != 0]
# rather than time since epoch, we care about elapsed time since first
# sample was reported until event time. Since the dict we received was
# inherently unordered, we order things here, according to W3C spec
# fields.
ordered_events = ('navigationStart', 'fetchStart', 'domainLookupStart',
'domainLookupEnd', 'connectStart', 'connectEnd',
'secureConnectionStart', 'requestStart',
'responseStart', 'responseEnd', 'domLoading',
'domInteractive', 'domContentLoadedEventStart',
'domContentLoadedEventEnd', 'domComplete',
'loadEventStart', 'loadEventEnd'
)
event_times = ((event, timings[event] - min(good_values)) for event
in ordered_events if event in timings)
return collections.OrderedDict(event_times)
if __name__ == '__main__':
url = 'http://www.example.com'
driver = webdriver.Firefox()
driver.get(url)
timer = PageLoadTimer(driver)
print timer.get_event_times()
driver.quit()
| #!/usr/bin/env python
#
# Copyright (c) 2015 Corey Goldberg
# License: MIT
import collections
import logging
import textwrap
from pyvirtualdisplay import Display
from selenium import webdriver
class PageLoadTimer:
def __init__(self, driver):
self.driver = driver
self.jscript = textwrap.dedent("""
var performance = window.performance || {};
var timings = performance.timing || {};
return timings;
""")
def inject_timing_js(self):
timings = self.driver.execute_script(self.jscript)
return timings
def get_event_times(self):
ordered_events = ('navigationStart', 'fetchStart', 'domainLookupStart',
'domainLookupEnd', 'connectStart', 'connectEnd',
'secureConnectionStart', 'requestStart', 'responseStart',
'responseEnd', 'domLoading', 'domInteractive',
'domContentLoadedEventStart', 'domContentLoadedEventEnd',
'domComplete', 'loadEventStart', 'loadEventEnd'
)
timings = self.inject_timing_js()
min_time = min((epoch for epoch in timings.values() if epoch != 0))
event_times = ([(event, timings[event] - min_time) for event
in ordered_events if event in timings])
event_times_ordered = collections.OrderedDict(event_times)
return event_times_ordered
if __name__ == '__main__':
url = 'http://www.example.com'
driver = webdriver.Firefox()
driver.get(url)
timer = PageLoadTimer(driver)
print timer.get_event_times()
driver.quit()
| mit | Python |
e88f1694d3c7e01f701ae91d472b75731799acdf | Fix ckan PR'ing - Caused by plague006's 12e44142 commit | EIREXE/SpaceDock,EIREXE/SpaceDock,EIREXE/SpaceDock,EIREXE/SpaceDock | KerbalStuff/ckan.py | KerbalStuff/ckan.py | from KerbalStuff.config import _cfg
from github import Github
from flask import url_for
import subprocess
import json
import os
import re
# TODO(Thomas): Make this modular
def send_to_ckan(mod):
if not _cfg("netkan_repo_path"):
return
if not mod.ckan:
return
json_blob = {
'spec_version': 'v1.4',
'identifier': re.sub(r'\W+', '', mod.name),
'$kref': '#/ckan/spacedock/' + str(mod.id),
'license': mod.license,
'x_via': 'Automated ' + _cfg('site-name') + ' CKAN submission'
}
wd = _cfg("netkan_repo_path")
path = os.path.join(wd, 'NetKAN', json_blob['identifier'] + '.netkan')
if os.path.exists(path):
# If the file is already there, then chances are this mod has already been indexed
return
with open(path, 'w') as f:
f.write(json.dumps(json_blob, indent=4))
subprocess.call(['git', 'fetch', 'upstream'], cwd=wd)
subprocess.call(['git', 'checkout', '-b', 'add-' + json_blob['identifier'], 'upstream/master'], cwd=wd)
subprocess.call(['git', 'add', '-A'], cwd=wd)
subprocess.call(['git', 'commit', '-m', 'Add {0} from '.format(mod.name) + _cfg('site-name') + '\n\nThis is an automated commit on behalf of {1}'\
.format(mod.name, mod.user.username), '--author={0} <{1}>'.format(mod.user.username, mod.user.email)], cwd=wd)
subprocess.call(['git', 'push', '-u', 'origin', 'add-' + json_blob['identifier']], cwd=wd)
g = Github(_cfg('github_user'), _cfg('github_pass'))
r = g.get_repo("KSP-CKAN/NetKAN")
r.create_pull(title="Add {0} from ".format(mod.name) + _cfg('site-name'), base=r.default_branch, head=_cfg('github_user') + ":add-" + json_blob['identifier'], body=\
"""\
This pull request was automatically generated by """ + _cfg('site-name') + """ on behalf of {0}, to add [{1}]({4}{2}) to CKAN.
Mod details:
name = {2}
author = {0}
description = {5}
abstract = {6}
license = {7}
Homepage = {8}
Please direct questions about this pull request to [{0}]({4}{3}).
""".format(mod.user.username, mod.name,\
url_for('mods.mod', mod_name=mod.name, id=mod.id),\
url_for("profile.view_profile", username=mod.user.username),\
_cfg("protocol") + "://" + _cfg("domain"),\
mod.description, mod.short_description,\
mod.license, mod.external_link))
| from KerbalStuff.config import _cfg
from github import Github
from flask import url_for
import subprocess
import json
import os
import re
# TODO(Thomas): Make this modular
def send_to_ckan(mod):
if not _cfg("netkan_repo_path"):
return
if not mod.ckan:
return
json_blob = {
'spec_version': 'v1.4',
'identifier': re.sub(r'\W+', '', mod.name),
'$kref': '#/ckan/spacedock/' + str(mod.id),
'license': mod.license,
'x_via': 'Automated ' + _cfg('site-name') + ' CKAN submission'
}
wd = _cfg("netkan_repo_path")
path = os.path.join(wd, 'NetKAN', json_blob['identifier'] + '.netkan')
if os.path.exists(path):
# If the file is already there, then chances are this mod has already been indexed
return
with open(path, 'w') as f:
f.write(json.dumps(json_blob, indent=4))
subprocess.call(['git', 'fetch', 'upstream'], cwd=wd)
subprocess.call(['git', 'checkout', '-b', 'add-' + json_blob['identifier'], 'upstream/master'], cwd=wd)
subprocess.call(['git', 'add', '-A'], cwd=wd)
subprocess.call(['git', 'commit', '-m', 'Add {0} from '.format(mod.name) + _cfg('site-name') + '\n\nThis is an automated commit on behalf of {1}'\
.format(mod.name, mod.user.username), '--author={0} <{1}>'.format(mod.user.username, mod.user.email)], cwd=wd)
subprocess.call(['git', 'push', '-u', 'origin', 'add-' + json_blob['identifier']], cwd=wd)
g = Github(_cfg('github_user'), _cfg('github_pass'))
r = g.get_repo("KSP-CKAN/NetKAN")
r.create_pull(title="Add {0} from ".format(mod.name) + _cfg('site-name'), base=r.default_branch, head=_cfg('github_user') + ":add-" + json_blob['identifier'], body=\
"""\
This pull request was automatically generated by """ + _cfg('site-name') + """ on behalf of {0}, to add [{1}]({4}{2}) to CKAN.
Mod details:
name = {2}
author = {0}
description = {5}
abstract = {6}
license = {7}
Homepage = {8}
Please direct questions about this pull request to [{0}]({4}{3}).
""".format(mod.user.username, mod.name,\
url_for('mods.mod', mod_name=mod.name, id=mod.id),\
url_for("profile.view_profile", username=mod.user.username),\
_cfg("protocol") + "://" + _cfg("domain"),\
mod.description, short_description,\
license, external_link))
| mit | Python |
aa164b92e0a3d00c18dd7ad4aecb067ad2bc9bb0 | bump to 1.3.0 | fireeye/flare-floss,fireeye/flare-floss | floss/version.py | floss/version.py | __version__ = '1.3.0'
| __version__ = '1.2.0'
| apache-2.0 | Python |
345ccc9d503e6e55fe46d7813958c0081cc1cffe | Fix issues with importing the Login form | Mirantis/mos-horizon,endorphinl/horizon,RudoCris/horizon,davidcusatis/horizon,Daniex/horizon,watonyweng/horizon,tqtran7/horizon,sandvine/horizon,openstack/horizon,mdavid/horizon,davidcusatis/horizon,maestro-hybrid-cloud/horizon,VaneCloud/horizon,CiscoSystems/avos,Dark-Hacker/horizon,luhanhan/horizon,RudoCris/horizon,froyobin/horizon,xme1226/horizon,Mirantis/mos-horizon,CiscoSystems/horizon,tellesnobrega/horizon,Daniex/horizon,redhat-cip/horizon,Daniex/horizon,karthik-suresh/horizon,nvoron23/avos,davidcusatis/horizon,liyitest/rr,wangxiangyu/horizon,JioCloud/horizon,wangxiangyu/horizon,vladryk/horizon,yeming233/horizon,kfox1111/horizon,wangxiangyu/horizon,Metaswitch/horizon,JioCloud/horizon,dan1/horizon-x509,gerrive/horizon,Metaswitch/horizon,saydulk/horizon,aaronorosen/horizon-congress,j4/horizon,promptworks/horizon,watonyweng/horizon,Dark-Hacker/horizon,FNST-OpenStack/horizon,blueboxgroup/horizon,blueboxgroup/horizon,Hodorable/0602,yjxtogo/horizon,ging/horizon,damien-dg/horizon,endorphinl/horizon-fork,agileblaze/OpenStackTwoFactorAuthentication,tellesnobrega/horizon,VaneCloud/horizon,yjxtogo/horizon,xinwu/horizon,ging/horizon,kfox1111/horizon,liyitest/rr,eayunstack/horizon,takeshineshiro/horizon,newrocknj/horizon,promptworks/horizon,yjxtogo/horizon,nvoron23/avos,doug-fish/horizon,promptworks/horizon,bac/horizon,noironetworks/horizon,CiscoSystems/avos,ging/horizon,tqtran7/horizon,coreycb/horizon,mrunge/horizon_lib,karthik-suresh/horizon,zouyapeng/horizon,yeming233/horizon,orbitfp7/horizon,endorphinl/horizon-fork,mdavid/horizon,karthik-suresh/horizon,wolverineav/horizon,redhat-cip/horizon,dan1/horizon-proto,newrocknj/horizon,BiznetGIO/horizon,idjaw/horizon,RudoCris/horizon,redhat-cip/horizon,Tesora/tesora-horizon,izadorozhna/dashboard_integration_tests,mdavid/horizon,icloudrnd/automation_tools,agileblaze/OpenStackTwoFactorAuthentication,yeming233/horizon,NCI-Cloud/horizon,Tesora/tesora-horizon,ChameleonCloud/horizon,Solinea/horizon,tsufiev/horizon,doug-fish/horizon,NeCTAR-RC/horizon,Dark-Hacker/horizon,gerrive/horizon,django-leonardo/horizon,philoniare/horizon,flochaz/horizon,flochaz/horizon,idjaw/horizon,nvoron23/avos,openstack/horizon,Tesora/tesora-horizon,maestro-hybrid-cloud/horizon,liyitest/rr,NCI-Cloud/horizon,agileblaze/OpenStackTwoFactorAuthentication,mrunge/horizon_lib,yjxtogo/horizon,NCI-Cloud/horizon,bigswitch/horizon,redhat-openstack/horizon,saydulk/horizon,takeshineshiro/horizon,dan1/horizon-proto,redhat-openstack/horizon,dan1/horizon-proto,tsufiev/horizon,mrunge/horizon_lib,froyobin/horizon,anthonydillon/horizon,pranavtendolkr/horizon,bigswitch/horizon,mandeepdhami/horizon,doug-fish/horizon,kfox1111/horizon,BiznetGIO/horizon,takeshineshiro/horizon,Hodorable/0602,NeCTAR-RC/horizon,sandvine/horizon,NCI-Cloud/horizon,wolverineav/horizon,Solinea/horizon,sandvine/horizon,django-leonardo/horizon,NeCTAR-RC/horizon,mrunge/horizon,mandeepdhami/horizon,bigswitch/horizon,gerrive/horizon,blueboxgroup/horizon,Dark-Hacker/horizon,idjaw/horizon,icloudrnd/automation_tools,xme1226/horizon,RudoCris/horizon,noironetworks/horizon,mrunge/horizon,ging/horizon,JioCloud/horizon,takeshineshiro/horizon,blueboxgroup/horizon,Mirantis/mos-horizon,FNST-OpenStack/horizon,newrocknj/horizon,j4/horizon,mrunge/horizon,henaras/horizon,mrunge/openstack_horizon,Metaswitch/horizon,aaronorosen/horizon-congress,bac/horizon,kfox1111/horizon,FNST-OpenStack/horizon,flochaz/horizon,wangxiangyu/horizon,luhanhan/horizon,orbitfp7/horizon,endorphinl/horizon,aaronorosen/horizon-congress,django-leonardo/horizon,doug-fish/horizon,openstack/horizon,zouyapeng/horizon,damien-dg/horizon,endorphinl/horizon-fork,Tesora/tesora-horizon,anthonydillon/horizon,endorphinl/horizon,Solinea/horizon,FNST-OpenStack/horizon,BiznetGIO/horizon,watonyweng/horizon,noironetworks/horizon,henaras/horizon,watonyweng/horizon,promptworks/horizon,dan1/horizon-x509,wolverineav/horizon,karthik-suresh/horizon,luhanhan/horizon,Hodorable/0602,noironetworks/horizon,Daniex/horizon,maestro-hybrid-cloud/horizon,froyobin/horizon,VaneCloud/horizon,zouyapeng/horizon,philoniare/horizon,eayunstack/horizon,bac/horizon,gerrive/horizon,ChameleonCloud/horizon,django-leonardo/horizon,CiscoSystems/horizon,mandeepdhami/horizon,redhat-cip/horizon,anthonydillon/horizon,vladryk/horizon,CiscoSystems/horizon,xinwu/horizon,BiznetGIO/horizon,endorphinl/horizon-fork,tsufiev/horizon,eayunstack/horizon,redhat-openstack/horizon,dan1/horizon-x509,wolverineav/horizon,ChameleonCloud/horizon,agileblaze/OpenStackTwoFactorAuthentication,redhat-openstack/horizon,pranavtendolkr/horizon,sandvine/horizon,tqtran7/horizon,maestro-hybrid-cloud/horizon,henaras/horizon,CiscoSystems/horizon,mrunge/openstack_horizon,saydulk/horizon,tsufiev/horizon,Solinea/horizon,bac/horizon,pranavtendolkr/horizon,icloudrnd/automation_tools,tqtran7/horizon,luhanhan/horizon,Metaswitch/horizon,mandeepdhami/horizon,saydulk/horizon,nvoron23/avos,damien-dg/horizon,tellesnobrega/horizon,CiscoSystems/avos,zouyapeng/horizon,mrunge/openstack_horizon,xinwu/horizon,NeCTAR-RC/horizon,liyitest/rr,icloudrnd/automation_tools,vladryk/horizon,newrocknj/horizon,j4/horizon,coreycb/horizon,anthonydillon/horizon,j4/horizon,idjaw/horizon,coreycb/horizon,ChameleonCloud/horizon,vladryk/horizon,orbitfp7/horizon,endorphinl/horizon,damien-dg/horizon,orbitfp7/horizon,xinwu/horizon,dan1/horizon-x509,Hodorable/0602,openstack/horizon,VaneCloud/horizon,Mirantis/mos-horizon,xme1226/horizon,henaras/horizon,CiscoSystems/avos,philoniare/horizon,yeming233/horizon,mdavid/horizon,davidcusatis/horizon,pranavtendolkr/horizon,tellesnobrega/horizon,coreycb/horizon,philoniare/horizon,izadorozhna/dashboard_integration_tests,flochaz/horizon,dan1/horizon-proto,bigswitch/horizon | openstack_dashboard/views.py | openstack_dashboard/views.py | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import forms
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = forms.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
| # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import views
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
| apache-2.0 | Python |
1e1f84d8b9303ee88a901f9440187b77cd06e464 | Update to Support SoftwareUpdater | Salandora/octoprint-customControl,Salandora/octoprint-customControl,Salandora/octoprint-customControl | octoprint_customControl/__init__.py | octoprint_customControl/__init__.py | # coding=utf-8
from __future__ import absolute_import
__author__ = "Marc Hannappel <salandora@gmail.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
from octoprint.settings import settings
import octoprint.plugin
class CustomControlPlugin(octoprint.plugin.SettingsPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.AssetPlugin):
def get_settings_defaults(self):
return dict(
controls = []
)
def get_template_configs(self):
if "editorcollection" in self._plugin_manager.enabled_plugins:
return [
dict(type="plugin_editorcollection_EditorCollection", template="customControl_hookedsettings.jinja2", custom_bindings=True)
]
else:
return [
dict(type="settings", template="customControl_hookedsettings.jinja2", custom_bindings=True)
]
def on_settings_save(self, data):
s = settings()
s.set(["controls"], data["controls"])
def get_assets(self):
return dict(
js=[
"js/customControl.js",
"js/customControlDialog.js",
],
css=["css/customControls.css"],
less=["less/customControls.less"]
)
def get_update_information(self):
return dict(
systemcommandeditor=dict(
displayName="Custom Control Editor Plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="Salanddora",
repo="octoprint-customControl",
current=self._plugin_version,
# update method: pip
pip="https://github.com/Salandora/octoprint-customControl/archive/{target_version}.zip"
)
)
__plugin_name__ = "Custom Control Editor"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = CustomControlPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
global __plugin_license__
__plugin_license__ = "AGPLv3"
| # coding=utf-8
from __future__ import absolute_import
__author__ = "Marc Hannappel <sunpack@web.de>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from octoprint.settings import settings
import octoprint.plugin
class CustomControlPlugin(octoprint.plugin.SettingsPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.AssetPlugin):
def get_settings_defaults(self):
return dict(
controls = []
)
def get_template_configs(self):
if "editorcollection" in self._plugin_manager.enabled_plugins:
return [
dict(type="plugin_editorcollection_EditorCollection", template="customControl_hookedsettings.jinja2", custom_bindings=True)
]
else:
return [
dict(type="settings", template="customControl_hookedsettings.jinja2", custom_bindings=True)
]
def on_settings_save(self, data):
s = settings()
s.set(["controls"], data["controls"])
def get_assets(self):
return dict(
js=[
"js/customControl.js",
"js/customControlDialog.js",
],
css=["css/customControls.css"],
less=["less/customControls.less"]
)
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "CustomControl"
__plugin_license__ = "AGPLv3"
__plugin_implementation__ = CustomControlPlugin()
| agpl-3.0 | Python |
33abd340a824b16f084472987a93dd34f9af359d | Add missing USER_LIMIT field validations | philanthropy-u/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform | openedx/features/partners/models.py | openedx/features/partners/models.py | from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from jsonfield.fields import JSONField
from model_utils import Choices
from model_utils.models import TimeStampedModel
from .constants import PARTNER_USER_STATUS_WAITING, PARTNER_USER_STATUS_APPROVED
class Partner(TimeStampedModel):
"""
This model represents white-labelled partners.
"""
performance_url = models.URLField(blank=True, default=None)
label = models.CharField(max_length=100)
main_logo = models.CharField(max_length=255)
small_logo = models.CharField(max_length=255)
slug = models.CharField(max_length=100, unique=True)
configuration = JSONField(null=False, blank=True, default={"USER_LIMIT": ""})
def __unicode__(self):
return '{}'.format(self.label)
class Meta:
verbose_name = "Partner"
verbose_name_plural = "Partners"
def clean(self, *args, **kwargs):
user_limit = self.configuration.get("USER_LIMIT")
if user_limit is None or user_limit == "":
pass
elif not isinstance(user_limit, basestring) or not user_limit.isdigit():
raise ValidationError({
"configuration": ValidationError("USER_LIMIT can only be an integer string or blank string"),
})
super(Partner, self).clean(*args, **kwargs)
class PartnerUser(TimeStampedModel):
"""
This model represents all the users that are associated to a partner.
"""
USER_STATUS = Choices(PARTNER_USER_STATUS_WAITING, PARTNER_USER_STATUS_APPROVED)
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE, related_name="partner_user")
partner = models.ForeignKey(Partner, db_index=True, on_delete=models.CASCADE, related_name="partner")
status = models.CharField(max_length=32, choices=USER_STATUS, default=PARTNER_USER_STATUS_APPROVED)
def __unicode__(self):
return '{partner}-{user}'.format(partner=self.partner.label, user=self.user.username)
class Meta:
unique_together = ('user', 'partner')
class PartnerCommunity(models.Model):
community_id = models.IntegerField()
partner = models.ForeignKey(Partner, db_index=True, on_delete=models.CASCADE, related_name='communities')
class Meta:
unique_together = ('community_id', 'partner')
| from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from jsonfield.fields import JSONField
from model_utils import Choices
from model_utils.models import TimeStampedModel
from .constants import PARTNER_USER_STATUS_WAITING, PARTNER_USER_STATUS_APPROVED
class Partner(TimeStampedModel):
"""
This model represents white-labelled partners.
"""
performance_url = models.URLField(blank=True, default=None)
label = models.CharField(max_length=100)
main_logo = models.CharField(max_length=255)
small_logo = models.CharField(max_length=255)
slug = models.CharField(max_length=100, unique=True)
configuration = JSONField(null=False, blank=True, default={"USER_LIMIT": ""})
def __unicode__(self):
return '{}'.format(self.label)
class Meta:
verbose_name = "Partner"
verbose_name_plural = "Partners"
def clean(self, *args, **kwargs):
user_limit = self.configuration.get("USER_LIMIT")
try:
int(user_limit)
except ValueError:
raise ValidationError({
'configuration': ValidationError('USER_LIMIT can only be an integer string or blank string'),
})
super(Partner, self).clean(*args, **kwargs)
class PartnerUser(TimeStampedModel):
"""
This model represents all the users that are associated to a partner.
"""
USER_STATUS = Choices(PARTNER_USER_STATUS_WAITING, PARTNER_USER_STATUS_APPROVED)
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE, related_name="partner_user")
partner = models.ForeignKey(Partner, db_index=True, on_delete=models.CASCADE, related_name="partner")
status = models.CharField(max_length=32, choices=USER_STATUS, default=PARTNER_USER_STATUS_APPROVED)
def __unicode__(self):
return '{partner}-{user}'.format(partner=self.partner.label, user=self.user.username)
class Meta:
unique_together = ('user', 'partner')
class PartnerCommunity(models.Model):
community_id = models.IntegerField()
partner = models.ForeignKey(Partner, db_index=True, on_delete=models.CASCADE, related_name='communities')
class Meta:
unique_together = ('community_id', 'partner')
| agpl-3.0 | Python |
a74df5851ea211be5bb29caeb03179be70f488cc | fix bug: conf to redis_conf | alone-walker/BlogSpider,hack4code/BlogSpider,hack4code/BlogSpider,wartalker/BlogSpider,wartalker/BlogSpider,hack4code/BlogSpider,wartalker/BlogSpider,hack4code/BlogSpider,alone-walker/BlogSpider,alone-walker/BlogSpider,alone-walker/BlogSpider,wartalker/BlogSpider | spider/mydm/extensions/stats.py | spider/mydm/extensions/stats.py | # -*- coding: utf-8 -*-
import logging
from redis.exceptions import ConnectionError
import redis
from scrapy import signals
from ..util import parse_redis_url
logger = logging.getLogger(__name__)
class ExtensionStats:
def __init__(self, stats, settings):
self.stats = stats
self.redis_conf = parse_redis_url(settings['SPIDER_STATS_URL'])
@classmethod
def from_crawler(cls, crawler):
ext = cls(crawler.stats,
crawler.settings)
crawler.signals.connect(ext.spider_opened,
signal=signals.spider_opened)
crawler.signals.connect(ext.spider_closed,
signal=signals.spider_closed)
crawler.signals.connect(ext.item_scraped,
signal=signals.item_scraped)
return ext
def spider_opened(self, spider):
self.stats.set_value(spider._id,
0)
def spider_closed(self, spider):
try:
r = redis.Redis(host=self.redis_conf.host,
port=self.redis_conf.port,
db=self.redis_conf.database)
r.set(spider._id,
self.stats.get_value(spider._id))
except ConnectionError:
logger.error('Error in ExtensionStats connect redis server failed')
def item_scraped(self, item, spider):
self.stats.inc_value(spider._id)
| # -*- coding: utf-8 -*-
import logging
from redis.exceptions import ConnectionError
import redis
from scrapy import signals
from ..util import parse_redis_url
logger = logging.getLogger(__name__)
class ExtensionStats:
def __init__(self, stats, settings):
self.stats = stats
self.redis_conf = parse_redis_url(settings['SPIDER_STATS_URL'])
@classmethod
def from_crawler(cls, crawler):
ext = cls(crawler.stats,
crawler.settings)
crawler.signals.connect(ext.spider_opened,
signal=signals.spider_opened)
crawler.signals.connect(ext.spider_closed,
signal=signals.spider_closed)
crawler.signals.connect(ext.item_scraped,
signal=signals.item_scraped)
return ext
def spider_opened(self, spider):
self.stats.set_value(spider._id,
0)
def spider_closed(self, spider):
try:
r = redis.Redis(host=self.conf.host,
port=self.conf.port,
db=self.conf.database)
r.set(spider._id,
self.stats.get_value(spider._id))
except ConnectionError:
logger.error('Error in ExtensionStats connect redis server failed')
def item_scraped(self, item, spider):
self.stats.inc_value(spider._id)
| mit | Python |
d520b36f88099a9bc0986824d919cd854b6ff5e1 | Add start and end layer info to layer parser | ulikoehler/PCBCheck,ulikoehler/PCBCheck | ODB/Layers.py | ODB/Layers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parser for the ODB++ PCB matrix file
"""
import os.path
from collections import namedtuple
from .StructuredTextParser import *
from .Structures import polarity_map
from enum import Enum
__all__ = ["Layer", "LayerSet", "LayerType", "parse_layers", "read_layers"]
# start,end = start and end layer name
Layer = namedtuple("Layer", ["name", "type", "polarity", "row", "start", "end"])
class LayerSet(list):
def find(self, layer_type):
"Find all layers that have the given type"
return LayerSet(filter(lambda l: l.type == layer_type, self))
class LayerType(Enum):
Component = 1
SilkScreen = 2
SolderPaste = 3
SolderMask = 4
Signal = 5
Drill = 6
Route = 7
Document = 8
Mixed = 9 # Mixed plane & signal
Mask = 10 # GenFlex additional information
_layer_type_map = { # See ODB++ 7.0 spec page 38
"COMPONENT": LayerType.Component,
"SILK_SCREEN": LayerType.SilkScreen,
"SOLDER_PASTE": LayerType.SolderPaste,
"SOLDER_MASK": LayerType.SolderMask,
"SIGNAL": LayerType.Signal,
"DRILL": LayerType.Drill,
"ROUT": LayerType.Route,
"DOCUMENT": LayerType.Document,
"MIXED": LayerType.Mixed,
"MASK": LayerType.Mask
}
def parse_layers(matrix):
layers = LayerSet()
for array in matrix.arrays:
if array.name != "LAYER":
continue
layers.append(Layer(
array.attributes["NAME"].lower(), # DipTrace seems to use lowercase for directories
_layer_type_map[array.attributes["TYPE"]],
polarity_map[array.attributes["POLARITY"]],
int(array.attributes["ROW"]),
array.attributes["START_NAME"].lower() or None,
array.attributes["END_NAME"].lower() or None
))
return layers
def read_layers(directory):
matrix = read_structured_text(os.path.join(directory, "matrix/matrix"))
return parse_layers(matrix)
if __name__ == "__main__":
#Parse commandline arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="The ODB++ directory")
args = parser.parse_args()
#Perform check
for layer in read_layers(args.directory):
print(layer)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parser for the ODB++ PCB matrix file
"""
import os.path
from collections import namedtuple
from .StructuredTextParser import *
from .Structures import polarity_map
from enum import Enum
__all__ = ["Layer", "LayerSet", "LayerType", "parse_layers", "read_layers"]
Layer = namedtuple("Layer", ["name", "type", "polarity", "row"])
class LayerSet(list):
def find(self, layer_type):
"Find all layers that have the given type"
return LayerSet(filter(lambda l: l.type == layer_type, self))
class LayerType(Enum):
Component = 1
SilkScreen = 2
SolderPaste = 3
SolderMask = 4
Signal = 5
Drill = 6
Route = 7
Document = 8
Mixed = 9 # Mixed plane & signal
Mask = 10 # GenFlex additional information
_layer_type_map = { # See ODB++ 7.0 spec page 38
"COMPONENT": LayerType.Component,
"SILK_SCREEN": LayerType.SilkScreen,
"SOLDER_PASTE": LayerType.SolderPaste,
"SOLDER_MASK": LayerType.SolderMask,
"SIGNAL": LayerType.Signal,
"DRILL": LayerType.Drill,
"ROUT": LayerType.Route,
"DOCUMENT": LayerType.Document,
"MIXED": LayerType.Mixed,
"MASK": LayerType.Mask
}
def parse_layers(matrix):
layers = LayerSet()
for array in matrix.arrays:
if array.name != "LAYER":
continue
layers.append(Layer(
array.attributes["NAME"].lower(), # DipTrace seems to use lowercase for directories
_layer_type_map[array.attributes["TYPE"]],
polarity_map[array.attributes["POLARITY"]],
int(array.attributes["ROW"])
))
return layers
def read_layers(directory):
matrix = read_structured_text(os.path.join(directory, "matrix/matrix"))
return parse_layers(matrix)
if __name__ == "__main__":
#Parse commandline arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="The ODB++ directory")
args = parser.parse_args()
#Perform check
for layer in read_layers(args.directory):
print(layer)
| apache-2.0 | Python |
aa2006626743e0c1add50aae36e462d151034259 | Create list of 3-tuples `(label, length, hashname)` | ryuslash/DisPass | dispass/labelfile.py | dispass/labelfile.py | # Copyright (c) 2011-2012 Benjamin Althues <benjamin@babab.nl>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from os.path import expanduser
class Parse:
'''Labelfile parser'''
file_stripped = []
'''Labelfile stripped for parsing'''
labels = []
'''List of 3-tuples `(label, length, hashname)`'''
default_length = 30
'''Default passphrase length'''
default_hashname = 'dispass1'
'''Name of hash to use as default'''
def __init__(self, file_location='~/.dispass'):
'''Open file and strip empty lines and comments'''
try:
filehandle = open(expanduser(file_location), 'r')
except IOError:
print 'error: could not load labelfile'
return
for i in filehandle:
if i[0] != '\n' and i[0] != '#':
self.file_stripped.append(i)
def parse(self):
'''Strip spaces and create list of labels with their options'''
labels = []
for i in self.file_stripped:
wordlist = []
line = i.rsplit(' ')
for word in line:
if word != '':
wordlist.append(word.strip('\n'))
labels.append(wordlist)
for label in labels:
labelname = label.pop(0)
length = self.default_length
hashname = self.default_hashname
for i in label:
if 'length=' in i:
length = int(i.strip('length='))
elif 'hash=' in i:
hashname = i.strip('hash=')
self.labels.append((labelname, length, hashname))
return self
class Write:
'''Labelfile editor'''
pass
if __name__ == '__main__':
p = Parse()
for i in p.parse().labels:
print i
| # Copyright (c) 2011-2012 Benjamin Althues <benjamin@babab.nl>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from os.path import expanduser
class Parse:
'''Labelfile parser'''
file_stripped = []
'''Labelfile stripped for parsing'''
labels = []
'''List of labels (+options)'''
def __init__(self, file_location='~/.dispass'):
'''Open file and strip empty lines and comments'''
try:
filehandle = open(expanduser(file_location), 'r')
except IOError:
print 'error: could not load labelfile'
return
for i in filehandle:
if i[0] != '\n' and i[0] != '#':
self.file_stripped.append(i)
def parse(self):
'''Strip spaces and create lists of labels with their options'''
for i in self.file_stripped:
wordlist = []
line = i.rsplit(' ')
for word in line:
if word != '':
wordlist.append(word.strip('\n'))
self.labels.append(wordlist)
class Write:
'''Labelfile editor'''
pass
if __name__ == '__main__':
p = Parse()
p.parse()
for i in p.labels:
print i
| isc | Python |
2279aa0c450d53b04f774d9441e4fc0647466581 | Send platform name to defaul template context | rougeth/bottery | bottery/message.py | bottery/message.py | import os
from datetime import datetime
import attr
from jinja2 import Environment, FileSystemLoader, select_autoescape
@attr.s
class Message:
id = attr.ib()
platform = attr.ib()
user = attr.ib()
text = attr.ib()
timestamp = attr.ib()
raw = attr.ib()
@property
def datetime(self):
return datetime.utcfromtimestamp(self.timestamp)
def render(message, template_name, context={}):
base_dir = os.path.join(os.getcwd(), 'templates')
paths = [base_dir]
# Include paths on settings
# paths.extend(settings.TEMPLATES)
env = Environment(
loader=FileSystemLoader(paths),
autoescape=select_autoescape(['html']))
template = env.get_template(template_name)
default_context = {
'user': message.user,
'platform': message.platform,
}
default_context.update(context)
return template.render(**default_context)
| import os
from datetime import datetime
import attr
from jinja2 import Environment, FileSystemLoader, select_autoescape
@attr.s
class Message:
id = attr.ib()
platform = attr.ib()
user = attr.ib()
text = attr.ib()
timestamp = attr.ib()
raw = attr.ib()
@property
def datetime(self):
return datetime.utcfromtimestamp(self.timestamp)
def render(message, template_name, context={}):
base_dir = os.path.join(os.getcwd(), 'templates')
paths = [base_dir]
# Include paths on settings
# paths.extend(settings.TEMPLATES)
env = Environment(
loader=FileSystemLoader(paths),
autoescape=select_autoescape(['html']))
template = env.get_template(template_name)
default_context = {
'user': message.user
}
default_context.update(context)
return template.render(**default_context)
| mit | Python |
119c3101d17627b359382738afe059e6be204636 | disable Notice of Election scraper | DemocracyClub/EveryElection,DemocracyClub/EveryElection,DemocracyClub/EveryElection | every_election/apps/election_snooper/management/commands/snoop.py | every_election/apps/election_snooper/management/commands/snoop.py | from django.core.management.base import BaseCommand
from election_snooper.snoopers.aldc import ALDCScraper
# from election_snooper.snoopers.customsearch import CustomSearchScraper
from election_snooper.snoopers.lib_dem_newbies import LibDemNewbiesScraper
class Command(BaseCommand):
# def add_arguments(self, parser):
# parser.add_argument('sample', nargs='+')
def handle(self, *args, **options):
ALDCScraper().get_all()
# CustomSearchScraper().get_all()
LibDemNewbiesScraper().get_all()
| from django.core.management.base import BaseCommand
from election_snooper.snoopers.aldc import ALDCScraper
from election_snooper.snoopers.customsearch import CustomSearchScraper
from election_snooper.snoopers.lib_dem_newbies import LibDemNewbiesScraper
class Command(BaseCommand):
# def add_arguments(self, parser):
# parser.add_argument('sample', nargs='+')
def handle(self, *args, **options):
ALDCScraper().get_all()
CustomSearchScraper().get_all()
LibDemNewbiesScraper().get_all()
| bsd-3-clause | Python |
6afad2e2b6b0e2fe9ce1ba55c6ed7dd000c9d4ca | correct handling of input flag | shuggiefisher/brain4k,wkal/brain4k | brain4k/brain4k.py | brain4k/brain4k.py | import os
import logging
from argparse import ArgumentParser
from pipeline import execute_pipeline
logging.basicConfig(level=logging.DEBUG)
class Brain4kArgumentParser(ArgumentParser):
def __init__(self, *args, **kwargs):
super(Brain4kArgumentParser, self).__init__(*args, **kwargs)
self.add_argument(
'repo path',
nargs='?',
default=os.getcwd(),
help='Path to the brain4k repository'
)
self.add_argument(
'--force-render-metrics',
dest='force_render_metrics',
action='store_true',
help='Re-render the metrics and README.md'
)
def run():
parser = Brain4kArgumentParser()
brain4k_args = parser.parse_args()
repo_path = getattr(brain4k_args, 'repo path')
if not os.path.isabs(repo_path):
repo_path = os.path.join(os.getcwd(), repo_path)
execute_pipeline(
repo_path,
force_render_metrics=brain4k_args.force_render_metrics
)
| import os
import logging
from argparse import ArgumentParser
from pipeline import execute_pipeline
logging.basicConfig(level=logging.DEBUG)
class Brain4kArgumentParser(ArgumentParser):
def __init__(self, *args, **kwargs):
super(Brain4kArgumentParser, self).__init__(*args, **kwargs)
self.add_argument(
'repo path',
nargs='?',
default=os.getcwd(),
help='Path to the brain4k repository'
)
self.add_argument(
'--force-render-metrics',
dest='force_render_metrics',
action='store_false',
help='Re-render the metrics and README.md'
)
def run():
parser = Brain4kArgumentParser()
brain4k_args = parser.parse_args()
repo_path = getattr(brain4k_args, 'repo path')
if not os.path.isabs(repo_path):
repo_path = os.path.join(os.getcwd(), repo_path)
execute_pipeline(
repo_path,
force_render_metrics=brain4k_args.force_render_metrics
)
| apache-2.0 | Python |
22b697729d1ee43d322aa1187b3a5f6101f836a5 | Remove Python 2.6 backwards compatibility | python-odin/odin | odin/__init__.py | odin/__init__.py | # Disable logging if an explicit handler is not added
import logging
logging.getLogger('odin.registration').addHandler(logging.NullHandler())
__authors__ = "Tim Savage"
__author_email__ = "tim@savage.company"
__copyright__ = "Copyright (C) 2014 Tim Savage"
__version__ = "1.0"
from odin.fields import * # noqa
from odin.fields.composite import * # noqa
from odin.fields.virtual import * # noqa
from odin.mapping import * # noqa
from odin.resources import Resource # noqa
from odin.adapters import ResourceAdapter # noqa
| __authors__ = "Tim Savage"
__author_email__ = "tim@savage.company"
__copyright__ = "Copyright (C) 2014 Tim Savage"
__version__ = "1.0"
# Disable logging if an explicit handler is not added
try:
import logging
logging.getLogger('odin').addHandler(logging.NullHandler())
except AttributeError:
pass # Fallback for python 2.6
from odin.fields import * # noqa
from odin.fields.composite import * # noqa
from odin.fields.virtual import * # noqa
from odin.mapping import * # noqa
from odin.resources import Resource # noqa
from odin.adapters import ResourceAdapter # noqa
| bsd-3-clause | Python |
99d82aac2ed1a4ff8b87a30e18beea2731de1d4a | Remove project from modules and steps in admin. | patrickbeeson/diy-trainer | diytrainer/projects/admin.py | diytrainer/projects/admin.py | from django.contrib import admin
from sorl.thumbnail.admin import AdminImageMixin
from .models import Project, Feedback, DetailLevel, Step, Module
class StepInline(AdminImageMixin, admin.StackedInline):
model = Step
class StepAdmin(admin.ModelAdmin):
list_display = ('sanitized_title', 'detail_level', 'rank', 'module')
readonly_fields = ('module',)
def module(self, obj):
if obj.detail_level.level == 3:
module = Module.objects.filter(
steps__id=obj.id).values_list('title', flat=True)[0]
return module
else:
return 'N/A'
module.short_description = 'Module'
#def project(self, obj):
# project = Project.objects.filter(
# detaillevel__level=obj.detail_level.level).values_list(
# 'name', flat=True)#[0]
# return project
#project.short_description = 'Project'
class ModuleAdmin(admin.ModelAdmin):
fields = ('detail_level', 'project', 'title', 'rank', 'steps')
list_display = ('title', 'rank', 'detail_level')
filter_horizontal = ('steps',)
#readonly_fields = ('project',)
#def project(self, obj):
# project = Project.objects.filter(
# detaillevel__level=obj.detail_level.level).values_list(
# 'name', flat=True)[0]
# return project
#project.short_description = 'Project'
class ModuleInline(admin.StackedInline):
model = Module
filter_horizontal = ('steps',)
extra = 1
class ProjectAdmin(AdminImageMixin, admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class DetailLevelAdmin(admin.ModelAdmin):
save_on_top = True
list_display = ('level', 'project',)
inlines = [
StepInline
]
class FeedbackAdmin(admin.ModelAdmin):
list_display = (
'submission_date', 'project', 'detail_level', 'was_satisifed')
list_filter = ('was_satisifed', 'submission_date')
readonly_fields = (
'project',
'detail_level',
'project_progress',
'project_confidence',
'project_recommendation',
'submission_date',
'was_satisifed')
admin.site.register(DetailLevel, DetailLevelAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Feedback, FeedbackAdmin)
admin.site.register(Module, ModuleAdmin)
admin.site.register(Step, StepAdmin)
| from django.contrib import admin
from sorl.thumbnail.admin import AdminImageMixin
from .models import Project, Feedback, DetailLevel, Step, Module
class StepInline(AdminImageMixin, admin.StackedInline):
model = Step
class StepAdmin(admin.ModelAdmin):
list_display = ('sanitized_title', 'detail_level', 'rank', 'module')
readonly_fields = ('module',)
def module(self, obj):
if obj.detail_level.level == 3:
module = Module.objects.filter(
steps__id=obj.id).values_list('title', flat=True)[0]
return module
else:
return 'N/A'
module.short_description = 'Module'
#def project(self, obj):
# project = Project.objects.filter(
# detaillevel__level=obj.detail_level.level).values_list(
# 'name', flat=True)#[0]
# return project
#project.short_description = 'Project'
class ModuleAdmin(admin.ModelAdmin):
fields = ('detail_level', 'project', 'title', 'rank', 'steps')
list_display = ('title', 'rank', 'project', 'detail_level')
filter_horizontal = ('steps',)
readonly_fields = ('project',)
def project(self, obj):
project = Project.objects.filter(
detaillevel__level=obj.detail_level.level).values_list(
'name', flat=True)[0]
return project
project.short_description = 'Project'
class ModuleInline(admin.StackedInline):
model = Module
filter_horizontal = ('steps',)
extra = 1
class ProjectAdmin(AdminImageMixin, admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class DetailLevelAdmin(admin.ModelAdmin):
save_on_top = True
list_display = ('level', 'project',)
inlines = [
StepInline
]
class FeedbackAdmin(admin.ModelAdmin):
list_display = (
'submission_date', 'project', 'detail_level', 'was_satisifed')
list_filter = ('was_satisifed', 'submission_date')
readonly_fields = (
'project',
'detail_level',
'project_progress',
'project_confidence',
'project_recommendation',
'submission_date',
'was_satisifed')
admin.site.register(DetailLevel, DetailLevelAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Feedback, FeedbackAdmin)
admin.site.register(Module, ModuleAdmin)
admin.site.register(Step, StepAdmin)
| mit | Python |
c452d12add3eb71215d449019282d2e30552d5be | Fix bug in HUME test. | johnbachman/belpy,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,johnbachman/belpy,pvtodorov/indra,sorgerlab/indra,pvtodorov/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/indra,bgyori/indra | indra/tests/test_hume.py | indra/tests/test_hume.py | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import unittest
from indra.statements import *
from indra.sources.hume.api import *
# Path to the HUME test files
path_this = os.path.dirname(os.path.abspath(__file__))
test_file_simple = os.path.join(path_this, 'bbn_test_simple.json-ld')
test_file_negatedCause = os.path.join(path_this,
'bbn_test_negatedCause.json-ld')
test_file_negatedEffect = os.path.join(path_this,
'bbn_test_negatedEffect.json-ld')
def test_simple_extraction():
"""Verify that processor extracts a simple causal assertion correctly from
a JSON-LD file."""
bp = process_json_file_old(test_file_simple)
statements = bp.statements
assert(len(statements) == 1)
s0 = statements[0]
assert(isinstance(s0, Influence))
assert(s0.subj.name == 'cow')
assert(s0.subj.db_refs['HUME'] == 'Bovine')
assert(s0.obj.name == 'moo')
assert(s0.obj.db_refs['HUME'] == 'MooSound')
assert(len(s0.evidence) == 1)
ev0 = s0.evidence[0]
assert(ev0.source_api == 'hume')
assert(ev0.text == 'Cow causes moo.')
def test_negated_cause():
"""We only want to extract causal relations between two positive events.
The processor should give no statements for a negated cause."""
bp = process_json_file_old(test_file_negatedCause)
assert(len(bp.statements) == 0)
def test_negated_effect():
"""We only want to extract causal relations between two positive events.
The processor should give no statements for a negated effect."""
bp = process_json_file_old(test_file_negatedEffect)
assert(len(bp.statements) == 0)
@unittest.skip('Need updated JSON-LD file')
def test_bbn_on_ben_paragraph():
bp = process_jsonld_file(os.path.join(path_this,
'hackathon_test_paragraph.json-ld'))
assert bp is not None
print(bp.statements)
stmt_dict = {s.get_hash(shallow=False): s for s in bp.statements}
assert len(stmt_dict) == 3, len(stmt_dict)
| from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import unittest
from indra.statements import *
from indra.sources.hume.api import *
# Path to the HUME test files
path_this = os.path.dirname(os.path.abspath(__file__))
test_file_simple = os.path.join(path_this, 'bbn_test_simple.json-ld')
test_file_negatedCause = os.path.join(path_this,
'bbn_test_negatedCause.json-ld')
test_file_negatedEffect = os.path.join(path_this,
'bbn_test_negatedEffect.json-ld')
def test_simple_extraction():
"""Verify that processor extracts a simple causal assertion correctly from
a JSON-LD file."""
bp = process_json_file_old(test_file_simple)
statements = bp.statements
assert(len(statements) == 1)
s0 = statements[0]
assert(isinstance(s0, Influence))
assert(s0.subj.name == 'cow')
assert(s0.subj.db_refs['HUME'] == 'Bovine')
assert(s0.obj.name == 'moo')
assert(s0.obj.db_refs['HUME'] == 'MooSound')
assert(len(s0.evidence) == 1)
ev0 = s0.evidence[0]
assert(ev0.source_api == 'hume')
assert(ev0.text == 'Cow causes moo.')
def test_negated_cause():
"""We only want to extract causal relations between two positive events.
The processor should give no statements for a negated cause."""
bp = process_json_file_old(test_file_negatedCause)
assert(len(bp.statements) == 0)
def test_negated_effect():
"""We only want to extract causal relations between two positive events.
The processor should give no statements for a negated effect."""
bp = process_json_file_old(test_file_negatedEffect)
assert(len(bp.statements) == 0)
@unittest.skip('Need updated JSON-LD file')
def test_bbn_on_ben_paragraph():
bp = process_jsonld_file(os.join(path_this,
'hackathon_test_paragraph.json-ld'))
assert bp is not None
print(bp.statements)
stmt_dict = {s.get_hash(shallow=False): s for s in bp.statements}
assert len(stmt_dict) == 3, len(stmt_dict)
| bsd-2-clause | Python |
7a593b9f4e6b05a276b58d14143e9df3fb7a603e | Update int test function | ForestPride/rail-problem | request_integer_in_range.py | request_integer_in_range.py | """ Prompts user to provide integer within a range """
def request_integer_in_range(prompt, lowest, highest):
"""
Purpose: prompts user for an integer, tests that an integer was
provided, and verifies the integer is within an acceptable range.
Inputs:
prompt (str): request to present to user.
lowest (int): lowest acceptable value.
highest (int): highest acceptable value.
Return (int): the accepted response from user.
"""
# Create an error prompt to use later, if needed
#error_prompt = "Please enter an integer between " + str(lowest)
#error_prompt = error_prompt + " and " + str(highest) + ": "
# Prompt user.
response = input(prompt)
# Loop until an acceptable response is received.
while True:
# Test to see if the response can be converted to an int.
try:
response = int(response)
# If response in desired range, we're done.
if (response >= lowest) & (response <= highest):
# Exit the while loop since response is in the desired range
break
# Otherwise the catchall recovery will be executed.
# One can use an specific exception statement to catch conversion error
except ValueError:
print("ValueError: You did not enter an integer")
response = input("Please enter an integer between " + str(lowest) + " and " + str(highest) + ": ")
# One can also use an else statement to catch any error not otherwise
# specifically handled.
else:
print("Catchall recovery: Number out of range")
response = input("Please enter an integer between " + str(lowest) + " and " + str(highest) + ": ")
# Check the resultant integer is in the valid range
print(response, " is acceptable.")
return response
def main():
""" Test harness """
answer = request_integer_in_range("Enter integer between 0 and 5: ", 0, 5)
print("main received ", answer)
main()
| """ Prompts user to provide integer within a range """
def request_integer_in_range(prompt, lowest, highest):
"""
Purpose: prompts user for an integer, tests that an integer was
provided, and verifies the integer is within an acceptable range.
Inputs:
prompt (str): request to present to user.
lowest (int): lowest acceptable value.
highest (int): highest acceptable value.
Return (int): the accepted response from user.
"""
# Create an error prompt to use later, if needed
error_prompt = "Please enter an integer between " + str(lowest)
error_prompt = error_prompt + " and " + str(highest) + ": "
# Prompt user.
response = input(prompt)
# Loop until an acceptable response is received.
while True:
# Test to see if the response can be converted to an int.
try:
response = int(response)
# If response in desired range, we're done.
if (response >= lowest) & (response <= highest):
# Exit the while loop since response is in the desired range
break
# Otherwise the catchall recovery will be executed.
# One can use an specific exception statement to catch conversion error
except ValueError:
print("Example of a int() conversion ValueError.")
# One can also use an else statement to catch any error not otherwise
# specifically handled.
else:
print("Example of a catchall recovery.")
response = input(error_prompt)
# Check the resultant integer is in the valid range
print(response, " is acceptable.")
return response
def main():
""" Test harness """
answer = request_integer_in_range("Enter integer between 0 and 5: ", 0, 5)
print("main received ", answer)
main()
| mit | Python |
fe5db566be15f50813b6e76fe727e94488249bd3 | bump version | gipit/gippy,gipit/gippy | gippy/version.py | gippy/version.py | #!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: matt.a.hanson@gmail.com
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
__version__ = '0.4.0a1'
| #!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: matt.a.hanson@gmail.com
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
__version__ = '0.3.5'
| apache-2.0 | Python |
fcdd05392ba5d2bd19d05abdd944021f385edfc1 | Update configuration parser, write commented default configuration file | manuelbua/gitver,manuelbua/gitver,manuelbua/gitver | gitver/config.py | gitver/config.py | #!/usr/bin/env python2
# coding=utf-8
"""
The default per-repository configuration
"""
import json
import string
from os.path import exists, dirname
from gitver.defines import CFGFILE
default_config_text = """{
# automatically generated configuration file
#
# These defaults implements Semantic Versioning as described in the latest
# available documentation at http://semver.org/spec/v2.0.0.html
# default pre-release metadata when commit count > 0 AND
# no NEXT has been defined
"default_meta_pr_in_next_no_next": "NEXT",
# default pre-release metadata when commit count > 0
"default_meta_pr_in_next": "SNAPSHOT",
# default pre-release metadata prefix
"meta_pr_prefix": "-",
# default commit count prefix
"commit_count_prefix": "-",
# Python-based format string variable names are:
# maj, min, patch, meta_pr_prefix, meta_pr, commit_count_prefix,
# commit_count, build_id, build_id_full
# Note that prefixes will be empty strings if their valued counterpart doesn't
# have a meaningful value (i.e., 0 for commit count, no meta pre-release, ..)
# format string used to build the current version string when the
# commit count is 0
"format": "%(maj)s.%(min)s.%(patch)s%(meta_pr_prefix)s%(meta_pr)s",
# format string used to build the current version string when the
# commit count is > 0
"format_next": "%(maj)s.%(min)s.%(patch)s%(meta_pr_prefix)s%(meta_pr)s%(commit_count_prefix)s%(commit_count)s+%(build_id)s"
}"""
def remove_comments(text):
data = string.split(text, '\n')
ret = ''
for line in data:
if not line.strip().startswith('#'):
ret += line
return ret
default_config = json.loads(remove_comments(default_config_text))
def init_or_load_user_config():
# try load user configuration
try:
with open(CFGFILE, 'r') as f:
data = ''
for line in f:
l = line.strip()
if not l.startswith('#'):
data += l
user = json.loads(data)
except (IOError, ValueError):
user = dict()
# save to file as an example
if not exists(CFGFILE):
if exists(dirname(CFGFILE)):
with open(CFGFILE, 'w') as f:
f.writelines(default_config_text)
print "(wrote default configuration file \"" + CFGFILE + \
"\""
# merge user with defaults
return dict(default_config, **user)
cfg = init_or_load_user_config()
| #!/usr/bin/env python2
# coding=utf-8
"""
The default per-repository configuration
"""
import json
from os.path import exists, dirname
from gitver.defines import CFGFILE
default_config = {
'next_suffix': 'NEXT',
'next_custom_suffix': 'SNAPSHOT'
}
def init_or_load_user_config():
# try load user configuration
try:
with open(CFGFILE, 'r') as f:
user = json.load(f)
except (IOError, ValueError) as v:
user = dict()
# save to file as an example
if not exists(CFGFILE):
if exists(dirname(CFGFILE)):
with open(CFGFILE, 'w') as f:
json.dump(default_config, f)
# merge user with defaults
return dict(default_config, **user)
cfg = init_or_load_user_config()
| apache-2.0 | Python |
c440b460090410400971e8377b6d4ec564fc5215 | fix for Py3.6: override cls in json_kwargs of Django’s JSON serializer | nimbis/django-shop,nimbis/django-shop,awesto/django-shop,nimbis/django-shop,divio/django-shop,divio/django-shop,awesto/django-shop,nimbis/django-shop,divio/django-shop,awesto/django-shop | shop/money/serializers.py | shop/money/serializers.py | # -*- coding: utf-8 -*-
"""
Override django.core.serializers.json.Serializer which renders our MoneyType as float.
"""
from __future__ import unicode_literals
import json
from django.core.serializers.json import DjangoJSONEncoder, Serializer as DjangoSerializer
from django.core.serializers.json import Deserializer
from .money_maker import AbstractMoney
__all__ = ['JSONEncoder', 'Serializer', 'Deserializer']
class JSONEncoder(DjangoJSONEncoder):
"""
Money type aware JSON encoder for reciprocal usage, such as import/export/dumpdata/loaddata.
"""
def default(self, obj):
if isinstance(obj, AbstractMoney):
return float(obj)
return super(JSONEncoder, self).default(obj)
class Serializer(DjangoSerializer):
"""
Money type aware JSON serializer.
"""
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
kwargs = dict(self.json_kwargs, cls=JSONEncoder)
json.dump(self.get_dump_object(obj), self.stream, **kwargs)
self._current = None
| # -*- coding: utf-8 -*-
"""
Override django.core.serializers.json.Serializer which renders our MoneyType as float.
"""
from __future__ import unicode_literals
import json
from django.core.serializers.json import DjangoJSONEncoder, Serializer as DjangoSerializer
from django.core.serializers.json import Deserializer
from .money_maker import AbstractMoney
__all__ = ['JSONEncoder', 'Serializer', 'Deserializer']
class JSONEncoder(DjangoJSONEncoder):
"""
Money type aware JSON encoder for reciprocal usage, such as import/export/dumpdata/loaddata.
"""
def default(self, obj):
if isinstance(obj, AbstractMoney):
return float(obj)
return super(JSONEncoder, self).default(obj)
class Serializer(DjangoSerializer):
"""
Money type aware JSON serializer.
"""
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream, cls=JSONEncoder, **self.json_kwargs)
self._current = None
| bsd-3-clause | Python |
129a4d6dbade887ad4d21629b2909a9ce41124b8 | Update static db auto mkdir of db | galileo-project/Galileo-gpm | gpm/utils/sdb.py | gpm/utils/sdb.py | from gpm.utils.operation import LocalOperation
from gpm.const import GPM_DB, DB_SF
import os
try:
import cPickle as pickle
except:
import pickle
class StaticDB(object):
def __init__(self):
self.__path = os.path.join(GPM_DB, "%s.%s" % (self.__class__.__name__, DB_SF))
self.__data = {}
self.__load()
def __getattr__(self, item):
return self.__getitem__(item)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __delattr__(self, item):
self.__delitem__(item)
def __getitem__(self, item):
return self.__data.get(item)
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self.__data[key]
def __del__(self):
self.__save()
def __len__(self):
return len(self.__data)
def update(self, data):
self.add(data)
def add(self, data):
if self.__file_exist:
self.__load()
self.__data.update(data)
self.__save()
def __save(self, data = None):
with open(self.__path, "wb") as stream:
pickle.dump(data or self._data, stream)
self._data = data
def __load(self):
if self.__file_exist:
with open(self.__path, "rb") as stream:
data = pickle.load(stream)
self._data = data or {}
@property
def __file_exist(self):
db_dir = os.path.dirname(self.__path)
if not LocalOperation.exist(db_dir):
LocalOperation.mkdir(db_dir)
return LocalOperation.exist(self.__path) | from gpm.utils.operation import LocalOperation
from gpm.const import GPM_DB, DB_SF
import os
try:
import cPickle as pickle
except:
import pickle
class StaticDB(object):
def __init__(self):
self.__path = os.path.join(GPM_DB, "%s.%s" % (self.__class__.__name__, DB_SF))
self.__data = {}
self.__load()
def __getattr__(self, item):
return self.__getitem__(item)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __delattr__(self, item):
self.__delitem__(item)
def __getitem__(self, item):
return self.__data.get(item)
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self.__data[key]
def __del__(self):
self.__save()
def __len__(self):
return len(self.__data)
def update(self, data):
self.add(data)
def add(self, data):
if self.__file_exist:
self.__load()
self.__data.update(data)
self.__save()
def __save(self, data = None):
with open(self.__path, "wb") as stream:
pickle.dump(data or self._data, stream)
self._data = data
def __load(self):
if self.__file_exist:
with open(self.__path, "rb") as stream:
data = pickle.load(stream)
self._data = data or {}
@property
def __file_exist(self):
return LocalOperation.exist(self.__path) | mit | Python |
19fa0b053cf9a7dfd54312dfef409c64d6642b5c | update version | CoolerVoid/Vision | Vision-cpe.py | Vision-cpe.py | #!/usr/bin/python
import sys, os.path
from parse import parsers
def banner_vision():
print """ ..::: VISION v0.2 :::...
Nmap\'s XML result parser and NVD's CPE correlation to search CVE
Example:
python vision.py result_scan.xml 3 txt > log_result.txt
argv 1 = Nmap scanner results in XML
argv 2 = Limit CVEs per CPE to get
argv 3 = Type of output (xml or txt)
Coded by CoolerVoid
"""
return ;
def main(argv):
if len(sys.argv)==4:
file_input=sys.argv[1]
if os.path.exists(file_input):
limit=int(sys.argv[2])
type_output=str(sys.argv[3])
parsers.nmap_xml_parse(file_input,limit,type_output)
else:
print "Either file is missing or is not readable"
sys.exit(0)
sys.exit(0)
else:
print "\nError needs nmap's XML scan result by passed by first argument\n"
banner_vision()
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| #!/usr/bin/python
import sys, os.path
from parse import parsers
def banner_vision():
print """ ..::: VISION v0.1 :::...
Nmap\'s XML result parser and NVD's CPE correlation to search CVE
Example:
python vision.py result_scan.xml 3 txt > log_result.txt
argv 1 = Nmap scanner results in XML
argv 2 = Limit CVEs per CPE to get
argv 3 = Type of output (xml or txt)
Coded by CoolerVoid
"""
return ;
def main(argv):
if len(sys.argv)==4:
file_input=sys.argv[1]
if os.path.exists(file_input):
limit=int(sys.argv[2])
type_output=str(sys.argv[3])
parsers.nmap_xml_parse(file_input,limit,type_output)
else:
print "Either file is missing or is not readable"
sys.exit(0)
sys.exit(0)
else:
print "\nError needs nmap's XML scan result by passed by first argument\n"
banner_vision()
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | Python |
8e1b85e1b38c96d9798eb378f224f0dfbb5cdb45 | Update dev settings | praekelt/nurseconnect,praekelt/nurseconnect,praekelt/nurseconnect | nurseconnect/settings/dev.py | nurseconnect/settings/dev.py | from .base import * # noqa
DEBUG = True
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# WAGTAILSEARCH_BACKENDS = {
# "default": {
# "BACKEND": ("molo.core.wagtailsearch.backends.elasticsearch"),
# "INDEX": "base",
# "URLS": ["http://localhost:9200"],
# "TIMEOUT": 5,
# },
# }
CLINIC_CODE_API = environ.get("CLINIC_CODE_API")
# JEMBI configuration
JEMBI_URL = environ.get("JEMBI_URL")
JEMBI_USERNAME = environ.get("JEMBI_USERNAME")
JEMBI_PASSWORD = environ.get("JEMBI_PASSWORD")
try:
from .local import * # noqa
except ImportError:
pass
| from .base import * # noqa
DEBUG = True
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# WAGTAILSEARCH_BACKENDS = {
# "default": {
# "BACKEND": ("molo.core.wagtailsearch.backends.elasticsearch"),
# "INDEX": "base",
# "URLS": ["http://localhost:9200"],
# "TIMEOUT": 5,
# },
# }
CLINIC_CODE_API = "http://example.com"
JEMBI_URL = "http://example.com"
JEMBI_USERNAME = "user"
JEMBI_PASSWORD = "password"
try:
from .local import * # noqa
except ImportError:
pass
| bsd-2-clause | Python |
9fc5c310ce1430097d1efd781c9951405373e001 | Make User's __eq__ case-insensitive. | ayust/kitnirc | kitnirc/user.py | kitnirc/user.py | def split_hostmask(hostmask):
"""Splits a nick@host string into nick and host."""
nick, _, host = hostmask.partition('@')
nick, _, user = nick.partition('!')
return nick, user or None, host or None
class User(object):
"""A user on an IRC network."""
def __init__(self, hostmask):
self._nick = None
self.update_from_hostmask(hostmask)
self.realname = None
self.modes = set()
def update_from_hostmask(self, hostmask):
self.nick, self.username, self.host = split_hostmask(hostmask)
def _get_nick(self):
return self._nick
def _set_nick(self, value):
if value.startswith("~"):
self.ident = True
self._nick = value[1:]
else:
self.ident = False
self._nick = value
nick = property(_get_nick, _set_nick)
def __eq__(self, value):
if isinstance(value, User):
if value.host is None or self.host is None:
return value.nick.lower() == self.nick.lower()
return (value.nick.lower() == self.nick.lower() and
value.host.lower() == self.host.lower())
elif isinstance(value, str):
user = User(value)
if "@" not in value:
return user.nick.lower() == self.nick.lower()
return (user.nick.lower() == self.nick.lower() and
user.host.lower() == self.host.lower())
else:
raise TypeError("Cannot compare User and %s" % type(value))
def __str__(self):
if not self.host:
return self.nick
if not self.username:
return "%s@%s" % (self.nick, self.host)
return "%s!%s@%s" % (self.nick, self.username, self.host)
def __repr__(self):
return "kitnirc.user.User(%s)" % str(self)
# vim: set ts=4 sts=4 sw=4 et:
| def split_hostmask(hostmask):
"""Splits a nick@host string into nick and host."""
nick, _, host = hostmask.partition('@')
nick, _, user = nick.partition('!')
return nick, user or None, host or None
class User(object):
"""A user on an IRC network."""
def __init__(self, hostmask):
self._nick = None
self.update_from_hostmask(hostmask)
self.realname = None
self.modes = set()
def update_from_hostmask(self, hostmask):
self.nick, self.username, self.host = split_hostmask(hostmask)
def _get_nick(self):
return self._nick
def _set_nick(self, value):
if value.startswith("~"):
self.ident = True
self._nick = value[1:]
else:
self.ident = False
self._nick = value
nick = property(_get_nick, _set_nick)
def __eq__(self, value):
if isinstance(value, User):
if value.host is None or self.host is None:
return value.nick == self.nick
return value.nick == self.nick and value.host == self.host
elif isinstance(value, str):
user = User(value)
if "@" not in value:
return user.nick == self.nick
return user.nick == self.nick and user.host == self.host
else:
raise TypeError("Cannot compare User and %s" % type(value))
def __str__(self):
if not self.host:
return self.nick
if not self.username:
return "%s@%s" % (self.nick, self.host)
return "%s!%s@%s" % (self.nick, self.username, self.host)
def __repr__(self):
return "kitnirc.user.User(%s)" % str(self)
# vim: set ts=4 sts=4 sw=4 et:
| mit | Python |
77ea926c854d89768e992fdce628663f21fa7dab | Add __repr__ to TSS | konrad/kufpybio | kufpybio/tss.py | kufpybio/tss.py | class TSS(object):
def __init__(self, seq_id, pos, strand, extra=None):
"""A transcription start site
seq_id - identifier of the harboring chromosome, plasmid, etc.
pos - position of the
strand - the strand (+ or -)
extra - any other information that should be associated
There is no assumptions / restrictions if the TSS position is
in a 0-based or 1-based coordinate system.
"""
self.seq_id = seq_id
self.pos = int(pos)
self.strand = strand
if extra:
self.extra = extra
def __repr__(self):
extra_str = ""
if not self.extra is None:
extra_str = self.extra
return "TSS: replicon: %s pos: %s strand: %s extra: %s" % (
self.seq_id, self.pos, self.strand, extra_str)
| class TSS(object):
def __init__(self, seq_id, pos, strand, extra=None):
"""A transcription start site
seq_id - identifier of the harboring chromosome, plasmid, etc.
pos - position of the
strand - the strand (+ or -)
extra - any other information that should be associated
There is no assumptions / restrictions if the TSS position is
in a 0-based or 1-based coordinate system.
"""
self.seq_id = seq_id
self.pos = int(pos)
self.strand = strand
if extra:
self.extra = extra
| isc | Python |
ddd23068a84b1bdf7e3284b1e6bebea3c2b362e9 | format literal tests | metasmile/transync | strsync/strsync_playground.py | strsync/strsync_playground.py | # -*- coding: utf-8 -*-
import googletrans
from googletrans import Translator
from googletrans.constants import DEFAULT_USER_AGENT, LANGCODES, LANGUAGES, SPECIAL_CASES
translator = Translator()
# print googletrans.LANGCODES
# print googletrans.constants
# print [l.text for l in translator.translate(['hi','you'], src='en', dest='ko')]
import strlocale
test_l1 = 'zh-cn'
test_l2 = 'zh-tw'
for l in ['zh-Hans', 'zh-CN', 'zh-SG'] + ['zh-Hant', 'zh-MO', 'zh-HK', 'zh-TW']:
print strlocale.lang(l)
print strlocale.region(l)
print strlocale.script(l)
print strlocale.is_equal_lang_and_script(test_l1,l)
print strlocale.is_equal_lang_and_script(test_l2,l)
print '---'
import re
lines='''\
Worker name is %s and id is %d
That is %i%%
%c
Decimal: %d Justified: %.6d
%10c%5hc%5C%5lc
The temp is %.*f
%ss%lii
%*.*s | %.3d | %lC | %s%%%02d'''
cfmt='''\
( # start of capture group 1
% # literal "%"
(?: # first option
(?:[-+0 #]{0,5}) # optional flags
(?:\d+|\*)? # width
(?:\.(?:\d+|\*))? # precision
(?:h|l|ll|w|I|I32|I64)? # size
[cCdiouxXeEfgGaAnpsSZ] # type
) | # OR
%%) # literal "%%"
'''
for line in lines.splitlines():
print '"{}"\n\t{}\n'.format(line,
tuple((m.start(1), m.group(1)) for m in re.finditer(cfmt, line, flags=re.X)))
| # -*- coding: utf-8 -*-
import googletrans
from googletrans import Translator
from googletrans.constants import DEFAULT_USER_AGENT, LANGCODES, LANGUAGES, SPECIAL_CASES
translator = Translator()
# print googletrans.LANGCODES
# print googletrans.constants
# print [l.text for l in translator.translate(['hi','you'], src='en', dest='ko')]
import strlocale
test_l1 = 'zh-cn'
test_l2 = 'zh-tw'
for l in ['zh-Hans', 'zh-CN', 'zh-SG'] + ['zh-Hant', 'zh-MO', 'zh-HK', 'zh-TW']:
print strlocale.lang(l)
print strlocale.region(l)
print strlocale.script(l)
print strlocale.is_equal_lang_and_script(test_l1,l)
print strlocale.is_equal_lang_and_script(test_l2,l)
print '---'
| mit | Python |
e8c999cfbe88907a2a90d6b49b2d771cb3c94f54 | sort in ES | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/enikshay/data_store.py | custom/enikshay/data_store.py | import pytz
from django.utils.dateparse import parse_datetime
from corehq.apps.es import filters
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.util import get_indicator_adapter
from custom.enikshay.const import DOSE_KNOWN_INDICATORS
from dimagi.utils.decorators.memoized import memoized
class AdherenceDatastore(object):
# collection of adherence-data lookup queries that can be run on adherence UCR
def __init__(self, domain):
self.datasource = StaticDataSourceConfiguration.by_id("static-{}-adherence".format(domain))
self.adapter = get_indicator_adapter(self.datasource)
self.es = self.adapter.get_query_object().es
def _base_filters(self, episode_id):
return filters.AND(
filters.term('episode_id', episode_id),
filters.term('adherence_value', DOSE_KNOWN_INDICATORS)
)
@memoized
def dose_known_adherences(self, episode_id):
# return sorted adherences, so self.latest_adherence_date can reuse the result of this query
return self.es.filter(self._base_filters(episode_id)).sort('adherence_date', desc=True).run().hits
def latest_adherence_date(self, episode_id):
result = self.dose_known_adherences(episode_id)
if result:
return pytz.UTC.localize(parse_datetime(result[0].get('adherence_date')))
| import pytz
from django.utils.dateparse import parse_datetime
from corehq.apps.es import filters
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.util import get_indicator_adapter
from custom.enikshay.const import DOSE_KNOWN_INDICATORS
from dimagi.utils.decorators.memoized import memoized
class AdherenceDatastore(object):
# collection of adherence-data lookup queries that can be run on adherence UCR
def __init__(self, domain):
self.datasource = StaticDataSourceConfiguration.by_id("static-{}-adherence".format(domain))
self.adapter = get_indicator_adapter(self.datasource)
self.es = self.adapter.get_query_object().es
def _base_filters(self, episode_id):
return filters.AND(
filters.term('episode_id', episode_id),
filters.term('adherence_value', DOSE_KNOWN_INDICATORS)
)
@memoized
def dose_known_adherences(self, episode_id):
return self.es.filter(self._base_filters(episode_id)).run().hits
def latest_adherence_date(self, episode_id):
result = self.dose_known_adherences(episode_id)
if len(result) > 0:
latest = sorted(result, key=lambda x: x['adherence_date'])[-1]
return pytz.UTC.localize(parse_datetime(latest.get('adherence_date')))
else:
return None
| bsd-3-clause | Python |
2a2d3f8ffcbc709e94e054bd426ccd7452b8f029 | Remove I as symbol for imaginary unit | mph-/lcapy | lcapy/config.py | lcapy/config.py | # SymPy symbols to exclude.
exclude = ('I', 'C', 'O', 'S', 'N', 'E', 'E1', 'Q', 'beta', 'gamma', 'zeta')
# Aliases for SymPy symbols
aliases = {'delta': 'DiracDelta', 'step': 'Heaviside', 'u': 'Heaviside',
'j': 'I'}
# String replacements when printing as LaTeX. For example, SymPy uses
# theta for Heaviside's step.
latex_string_map = {r'\theta\left': r'u\left'}
import sympy as sym
print_expr_map = {sym.I: 'j'}
# Hack to pretty print i as j
junicode = '\u2149'
from sympy.printing.pretty.pretty_symbology import atoms_table
atoms_table['ImaginaryUnit'] = junicode
# Words to format in Roman font for LaTeX expressions.
words = ('in', 'out', 'ref', 'rms', 'load', 'source', 'avg',
'mean', 'peak', 'pk', 'pk-pk', 'pp', 'min', 'max', 'src', 'bat',
'cc', 'ee', 'dd', 'ss', 'ih', 'il', 'oh', 'ol',
'typ', 'pkg', 'comp', 'step', 'heaviside', 'diracdelta',
'alpha', 'beta', 'gamma', 'delta', 'eta', 'zeta', 'theta',
'iota', 'kappa', 'mu', 'nu', 'omicron', 'pi', 'rho', 'sigma', 'tau',
'upsilon', 'omega')
| # SymPy symbols to exclude.
exclude = ('C', 'O', 'S', 'N', 'E', 'E1', 'Q', 'beta', 'gamma', 'zeta')
# Aliases for SymPy symbols
aliases = {'delta': 'DiracDelta', 'step': 'Heaviside', 'u': 'Heaviside',
'j': 'I'}
# String replacements when printing as LaTeX. For example, SymPy uses
# theta for Heaviside's step.
latex_string_map = {r'\theta\left': r'u\left'}
import sympy as sym
print_expr_map = {sym.I: 'j'}
# Hack to pretty print i as j
junicode = '\u2149'
from sympy.printing.pretty.pretty_symbology import atoms_table
atoms_table['ImaginaryUnit'] = junicode
# Words to format in Roman font for LaTeX expressions.
words = ('in', 'out', 'ref', 'rms', 'load', 'source', 'avg',
'mean', 'peak', 'pk', 'pk-pk', 'pp', 'min', 'max', 'src', 'bat',
'cc', 'ee', 'dd', 'ss', 'ih', 'il', 'oh', 'ol',
'typ', 'pkg', 'comp', 'step', 'heaviside', 'diracdelta',
'alpha', 'beta', 'gamma', 'delta', 'eta', 'zeta', 'theta',
'iota', 'kappa', 'mu', 'nu', 'omicron', 'pi', 'rho', 'sigma', 'tau',
'upsilon', 'omega')
| lgpl-2.1 | Python |
bd47f378e0a02aeed88793eeec182e7b280dc2d2 | Bump Zen version | zepheira/zenpub,zepheira/zenpub,zepheira/zenpub,zepheira/zenpub | lib/__init__.py | lib/__init__.py | #freemixlib
__version__ = '0.9.3.3'
#Mapping from service ID URI too URL template and/or callable
SERVICES = {}
def register_service(s):
'''
info - either a callable, which has its URL as the serviceid attribute
or a tuple of (serviceid, callable)
Note: rgistration of remote services is done in the Zen section of Akara config, for now
'''
if callable(s):
SERVICES[s.serviceid] = s
else:
SERVICES[s[0]] = s[1]
#Bootstrap in the built-in ("local") services
try:
from zenlib import local
from httpmodel import *
except (KeyboardInterrupt, SystemExit):
raise
except ImportError:
#There will be ImportError during install
pass
#Convenience decorator for registering services
def zservice(service_id):
"""Add the function as an Zen service
This affect how the resource is registered in Zen:
service_id - a string which identifies this service; should be a URL
"""
def zregister(func):
func.serviceid = service_id
register_service(func)
return func
return zregister
def service_proxy(url):
'''
Returns a proxy callable corresponding to a service
e.g. service(u'http://example.org/your-service')
'''
return SERVICES[url]
| #freemixlib
__version__ = '0.9.3.2'
#Mapping from service ID URI too URL template and/or callable
SERVICES = {}
def register_service(s):
'''
info - either a callable, which has its URL as the serviceid attribute
or a tuple of (serviceid, callable)
Note: rgistration of remote services is done in the Zen section of Akara config, for now
'''
if callable(s):
SERVICES[s.serviceid] = s
else:
SERVICES[s[0]] = s[1]
#Bootstrap in the built-in ("local") services
try:
from zenlib import local
from httpmodel import *
except (KeyboardInterrupt, SystemExit):
raise
except ImportError:
#There will be ImportError during install
pass
#Convenience decorator for registering services
def zservice(service_id):
"""Add the function as an Zen service
This affect how the resource is registered in Zen:
service_id - a string which identifies this service; should be a URL
"""
def zregister(func):
func.serviceid = service_id
register_service(func)
return func
return zregister
def service_proxy(url):
'''
Returns a proxy callable corresponding to a service
e.g. service(u'http://example.org/your-service')
'''
return SERVICES[url]
| apache-2.0 | Python |
21b56cea2ffa8ba74b2be1903e786cb2619905f1 | Convert Slack @ mentions to usernames | laneshetron/monopoly | monopoly/Bank/Slack.py | monopoly/Bank/Slack.py | from Bank.Base import Base
import re
class Bank(Base):
def __init__(self, team):
self.users = {}
self.channels = {}
if 'users' in team:
for user in team['users']:
self.users[user['id']] = user
if 'channels' in team:
for channel in team['channels']:
self.channels[channel['id']] = channel
if 'groups' in team:
for group in team['groups']:
self.channels[group['id']] = group
super().__init__()
def set_channel(self, channel):
self.channels[channel['id']] = channel
def id_to_name(self, id):
if id in self.users:
return self.users[id]['name']
def members(self, id):
if id in self.channels and 'members' in self.channels[id]:
return [self.id_to_name(uid) for uid in self.channels[id]['members']]
return []
def receive(self, message):
# Handle members joining & leaving
if 'subtype' in message:
if (message['subtype'] in ['channel_join', 'group_join'] and
message['user'] not in self.channels[message['channel']]['members']):
self.channels[message['channel']]['members'].append(message['user'])
if (message['subtype'] in ['channel_leave', 'group_leave'] and
message['user'] in self.channels[message['channel']]['members']):
self.channels[message['channel']]['members'].remove(message['user'])
# Replace Slack @ mentions with uname before proceeding
text = message['text']
re_mentions = re.compile("<@([0-9a-zA-Z]+)\|?(?:[0-9a-zA-Z]+)?>")
for mention in re.findall(re_mentions, text):
name = self.id_to_name(mention[0])
text = re.sub(re_mentions, name, text)
sender = self.id_to_name(message['user'])
clients = self.members(message['channel'])
return super().receive(text, sender, clients)
| from Bank.Base import Base
class Bank(Base):
def __init__(self, team):
self.users = {}
self.channels = {}
if 'users' in team:
for user in team['users']:
self.users[user['id']] = user
if 'channels' in team:
for channel in team['channels']:
self.channels[channel['id']] = channel
if 'groups' in team:
for group in team['groups']:
self.channels[group['id']] = group
super().__init__()
def set_channel(self, channel):
self.channels[channel['id']] = channel
def id_to_name(self, id):
if id in self.users:
return self.users[id]['name']
def members(self, id):
if id in self.channels and 'members' in self.channels[id]:
return [self.id_to_name(uid) for uid in self.channels[id]['members']]
return []
def receive(self, message):
# Handle members joining & leaving
if 'subtype' in message:
if (message['subtype'] in ['channel_join', 'group_join'] and
message['user'] not in self.channels[message['channel']]['members']):
self.channels[message['channel']]['members'].append(message['user'])
if (message['subtype'] in ['channel_leave', 'group_leave'] and
message['user'] in self.channels[message['channel']]['members']):
self.channels[message['channel']]['members'].remove(message['user'])
text = message['text']
sender = self.id_to_name(message['user'])
clients = self.members(message['channel'])
return super().receive(text, sender, clients)
| mit | Python |
1c6e81052927dd2e9d5ef1fc31432f8bf1fff7dc | update durations for tablature notation | msbmsb/wordstrument,msbmsb/wordstrument | lib/duration.py | lib/duration.py | """
duration.py:
Given a string, calculate the duration of the corresponding note.
The average English word is roughly 5 characters long, based on that:
# of characters note duration
0-1 1/16th
2-3 1/8th
4-6 1/4
7-8 1/2
9-10 1
11+ 2
* Author: Mitchell Bowden <mitchellbowden AT gmail DOT com>
* License: MIT License: http://creativecommons.org/licenses/MIT/
"""
# given a string, calculate the duration of the corresponding note
def calculate_duration(t):
# avg English word len is ~5
l = len(t)
if(l<2): return 0.0625
if(l<4): return 0.125
if(l<7): return 0.25
if(l<9): return 0.5
if(l<11): return 1
return 2
# VexFlow notation (moved down since vexflow handles 1/32 to whole)
vexFlowNotation = {
0.0625:'32',
0.125:'16',
0.25:'8',
0.5:'q',
1:'h',
2:'w'
}
def toVexFlowNotation(d):
if d not in vexFlowNotation.keys():
return None
return vexFlowNotation[d]
| """
duration.py:
Given a string, calculate the duration of the corresponding note.
The average English word is roughly 5 characters long, based on that:
# of characters note duration
0-1 1/16th
2-3 1/8th
4-6 1/4
7-8 1/2
9-10 1
11+ 2
* Author: Mitchell Bowden <mitchellbowden AT gmail DOT com>
* License: MIT License: http://creativecommons.org/licenses/MIT/
"""
# given a string, calculate the duration of the corresponding note
def calculate_duration(t):
# avg English word len is ~5
l = len(t)
if(l<2): return 0.0625
if(l<4): return 0.125
if(l<7): return 0.25
if(l<9): return 0.5
if(l<11): return 1
return 2
| mit | Python |
381c75be231cdc3338596df50d81970ffe8fb322 | Add `filters` to the list of imports. | nanshe-org/nanshe,DudLab/nanshe,DudLab/nanshe,nanshe-org/nanshe,jakirkham/nanshe,jakirkham/nanshe | nanshe/imp/__init__.py | nanshe/imp/__init__.py | __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Apr 14, 2014 20:37:08 EDT$"
__all__ = [
"advanced_image_processing", "binary_image_processing", "denoising",
"filters", # "neuron_matplotlib_viewer",
"registration", "simple_image_processing", "wavelet_transform"
]
import advanced_image_processing
import binary_image_processing
import denoising
import filters
# import neuron_matplotlib_viewer
import registration
import simple_image_processing
import wavelet_transform
| __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Apr 14, 2014 20:37:08 EDT$"
__all__ = [
"advanced_image_processing", "binary_image_processing", "denoising",
# "neuron_matplotlib_viewer",
"registration", "simple_image_processing", "wavelet_transform"
]
import advanced_image_processing
import binary_image_processing
import denoising
# import neuron_matplotlib_viewer
import registration
import simple_image_processing
import wavelet_transform
| bsd-3-clause | Python |
59daf205869c42b3797aa9dbaaa97930cbca2417 | Add function to check if nbserverproxy is running | nanshe-org/nanshe_workflow,DudLab/nanshe_workflow | nanshe_workflow/ipy.py | nanshe_workflow/ipy.py | __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Nov 10, 2015 17:09$"
import json
import re
try:
from IPython.utils.shimmodule import ShimWarning
except ImportError:
class ShimWarning(Warning):
"""Warning issued by IPython 4.x regarding deprecated API."""
pass
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('error', '', ShimWarning)
try:
# IPython 3
from IPython.html.widgets import FloatProgress
from IPython.parallel import Client
except ShimWarning:
# IPython 4
from ipywidgets import FloatProgress
from ipyparallel import Client
from IPython.display import display
import ipykernel
import notebook.notebookapp
import requests
def check_nbserverproxy():
"""
Return the url of the current jupyter notebook server.
"""
kernel_id = re.search(
"kernel-(.*).json",
ipykernel.connect.get_connection_file()
).group(1)
servers = notebook.notebookapp.list_running_servers()
for s in servers:
response = requests.get(
requests.compat.urljoin(s["url"], "api/sessions"),
params={"token": s.get("token", "")}
)
for n in json.loads(response.text):
if n["kernel"]["id"] == kernel_id:
# Found server that is running this Jupyter Notebook.
# Try to requests this servers port through nbserverproxy.
url = requests.compat.urljoin(
s["url"], "proxy/%i" % s["port"]
)
# If the proxy is running, it will redirect.
# If not, it will error out.
try:
requests.get(url).raise_for_status()
except requests.HTTPError:
return False
else:
return True
| __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Nov 10, 2015 17:09$"
try:
from IPython.utils.shimmodule import ShimWarning
except ImportError:
class ShimWarning(Warning):
"""Warning issued by IPython 4.x regarding deprecated API."""
pass
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('error', '', ShimWarning)
try:
# IPython 3
from IPython.html.widgets import FloatProgress
from IPython.parallel import Client
except ShimWarning:
# IPython 4
from ipywidgets import FloatProgress
from ipyparallel import Client
from IPython.display import display
| apache-2.0 | Python |
2fc8b7e98594c0a3a8c2e4c285dcf3923f247b92 | allow reuse address | cenkalti/kuyruk,cenkalti/kuyruk | kuyruk/manager/server.py | kuyruk/manager/server.py | import Queue
from pprint import pformat
from functools import total_ordering
from SocketServer import ThreadingTCPServer, BaseRequestHandler
from kuyruk.manager.messaging import message_loop
class ManagerServer(ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, host, port):
self.clients = {}
ThreadingTCPServer.__init__(self, (host, port), RequestHandler)
def get_request(self):
client_sock, client_addr = ThreadingTCPServer.get_request(self)
self.clients[client_addr] = ClientStruct(client_sock)
print 'self.clients', pformat(self.clients)
return client_sock, client_addr
def process_request_thread(self, request, client_address):
ThreadingTCPServer.process_request_thread(self, request,
client_address)
self._remove_socket(client_address)
def _remove_socket(self, client_address):
del self.clients[client_address]
print 'self.clients', pformat(self.clients)
class RequestHandler(BaseRequestHandler):
def handle(self):
try:
message_loop(self.request, self._generate_action, self._on_stats)
except EOFError:
print 'Client disconnected'
def _generate_action(self):
try:
return self.struct.actions.get_nowait()
except Queue.Empty:
pass
def _on_stats(self, sock, stats):
print self.client_address, pformat(stats)
self.struct.stats = stats
@property
def struct(self):
return self.server.clients[self.client_address]
@total_ordering
class ClientStruct(dict):
def __init__(self, socket):
super(ClientStruct, self).__init__()
self.socket = socket
self.stats = {}
self.actions = Queue.Queue()
def __lt__(self, other):
return self.sort_key < other.sort_key
@property
def sort_key(self):
order = ('hostname', 'queue', 'uptime', 'pid')
return tuple(self.get_stat(attr) for attr in order)
def get_stat(self, name):
return self.stats.get(name, None)
| import Queue
from pprint import pformat
from functools import total_ordering
from SocketServer import ThreadingTCPServer, BaseRequestHandler
from kuyruk.manager.messaging import message_loop
class ManagerServer(ThreadingTCPServer):
daemon_threads = True
def __init__(self, host, port):
self.clients = {}
ThreadingTCPServer.__init__(self, (host, port), RequestHandler)
def get_request(self):
client_sock, client_addr = ThreadingTCPServer.get_request(self)
self.clients[client_addr] = ClientStruct(client_sock)
print 'self.clients', pformat(self.clients)
return client_sock, client_addr
def process_request_thread(self, request, client_address):
ThreadingTCPServer.process_request_thread(self, request,
client_address)
self._remove_socket(client_address)
def _remove_socket(self, client_address):
del self.clients[client_address]
print 'self.clients', pformat(self.clients)
class RequestHandler(BaseRequestHandler):
def handle(self):
try:
message_loop(self.request, self._generate_action, self._on_stats)
except EOFError:
print 'Client disconnected'
def _generate_action(self):
try:
return self.struct.actions.get_nowait()
except Queue.Empty:
pass
def _on_stats(self, sock, stats):
print self.client_address, pformat(stats)
self.struct.stats = stats
@property
def struct(self):
return self.server.clients[self.client_address]
@total_ordering
class ClientStruct(dict):
def __init__(self, socket):
super(ClientStruct, self).__init__()
self.socket = socket
self.stats = {}
self.actions = Queue.Queue()
def __lt__(self, other):
return self.sort_key < other.sort_key
@property
def sort_key(self):
order = ('hostname', 'queue', 'uptime', 'pid')
return tuple(self.get_stat(attr) for attr in order)
def get_stat(self, name):
return self.stats.get(name, None)
| mit | Python |
97744ad54911e25210a89bb9fc92fab23dbfde2a | add test. what happens when company name does not match anything | kern3020/opportunity,kern3020/opportunity | opportunity/tracker/tests.py | opportunity/tracker/tests.py | from django.utils import unittest
from models import Company
from views import populateCompany
class FetchFromCrunch(unittest.TestCase):
def test_normal(self):
'''
The simpliest case is a single token with no special characters
which matches a specific company in crunchbase.
'''
co = Company()
co.name = "Solum"
populateCompany(co)
self.assertEqual(co.city, "Mountain View")
def test_encoding(self):
'''
If there is a space in the given name, you need to encode the
string. If you fail to so, you'll likely
'''
co = Company()
co.name = "Red Hat"
populateCompany(co)
self.assertEqual(co.city, "Raleigh")
def test_matches_multiple(self):
'''
What happens if a company has multiple offices?
'''
co = Company()
co.name = "IBM"
populateCompany(co)
self.assertEqual(co.city, "Armonk")
def test_no_match(self):
'''
What happens if there is no match with crunchbase?
'''
co = Company()
name = "pirulito"
co.name = "pirulito"
populateCompany(co)
self.assertEqual(co.name, name)
self.assertEqual(co.city, "")
| from django.utils import unittest
from models import Company
from views import populateCompany
class FetchFromCrunch(unittest.TestCase):
def test_normal(self):
'''
The simpliest case is a single token with no special characters
which matches a specific company in crunchbase.
'''
co = Company()
co.name = "Solum"
populateCompany(co)
self.assertEqual(co.city, "Mountain View")
def test_encoding(self):
'''
If there is a space in the given name, you need to encode the
string. If you fail to so, you'll likely
'''
co = Company()
co.name = "Red Hat"
populateCompany(co)
self.assertEqual(co.city, "Raleigh")
def test_matches_multiple(self):
'''
What happens if a company has multiple offices?
'''
co = Company()
co.name = "IBM"
populateCompany(co)
self.assertEqual(co.city, "Armonk")
| mit | Python |
47ca06c56096225b0cb92ead9459186dcb3a182f | add blank=true at manager | bungoume/labobooks,bungoume/labobooks | labobooks/core/models.py | labobooks/core/models.py | from django.db import models
# class Library(models.Model):
# ...
# class BookShelf(models.Model):
# name = models.CharField("研究室名", max_length=191)
class MyBook(models.Model):
book_info = models.ForeignKey('BookInfo')
buy_date = models.DateField("購入日", null=True)
buy_user = models.CharField("購入希望者", max_length=191, blank=True)
manager = models.CharField("管理責任者", max_length=191, blank=True)
buy_at = models.CharField("購入場所", max_length=191, blank=True)
purpose = models.TextField("購入目的", blank=True)
money_source = models.CharField("資金源", max_length=191, blank=True)
book_expire_at = models.DateField("本の賞味期限", null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class BookInfo(models.Model):
isbn = models.CharField("ISBN", primary_key=True, max_length=20)
title = models.CharField("タイトル", max_length=191)
title_kana = models.CharField("タイトル(カナ)", max_length=191, blank=True)
sub_title = models.CharField("サブタイトル", max_length=191, blank=True)
sub_title_kana = models.CharField("サブタイトル(カナ)", max_length=191, blank=True)
series_name = models.CharField("シリーズ名", max_length=191, blank=True)
series_name_kana = models.CharField("シリーズ名(カナ)", max_length=191, blank=True)
author = models.CharField("著者", max_length=191, blank=True)
author_kana = models.CharField("著者(カナ)", max_length=191, blank=True)
publisher_name = models.CharField("出版社", max_length=191, blank=True)
book_size = models.CharField("本サイズ", max_length=191, blank=True)
item_caption = models.TextField("キャプション", blank=True)
sales_date = models.DateField("発売日", null=True)
item_price = models.IntegerField("価格", blank=True)
image_url = models.URLField("画像URL", blank=True)
genre_id = models.CharField("書籍ジャンル", max_length=191, blank=True)
| from django.db import models
# class Library(models.Model):
# ...
# class BookShelf(models.Model):
# name = models.CharField("研究室名", max_length=191)
class MyBook(models.Model):
book_info = models.ForeignKey('BookInfo')
buy_date = models.DateField("購入日", null=True)
buy_user = models.CharField("購入希望者", max_length=191, blank=True)
manager = models.CharField("管理責任者", max_length=191)
buy_at = models.CharField("購入場所", max_length=191, blank=True)
purpose = models.TextField("購入目的", blank=True)
money_source = models.CharField("資金源", max_length=191, blank=True)
book_expire_at = models.DateField("本の賞味期限", null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class BookInfo(models.Model):
isbn = models.CharField("ISBN", primary_key=True, max_length=20)
title = models.CharField("タイトル", max_length=191)
title_kana = models.CharField("タイトル(カナ)", max_length=191, blank=True)
sub_title = models.CharField("サブタイトル", max_length=191, blank=True)
sub_title_kana = models.CharField("サブタイトル(カナ)", max_length=191, blank=True)
series_name = models.CharField("シリーズ名", max_length=191, blank=True)
series_name_kana = models.CharField("シリーズ名(カナ)", max_length=191, blank=True)
author = models.CharField("著者", max_length=191, blank=True)
author_kana = models.CharField("著者(カナ)", max_length=191, blank=True)
publisher_name = models.CharField("出版社", max_length=191, blank=True)
book_size = models.CharField("本サイズ", max_length=191, blank=True)
item_caption = models.TextField("キャプション", blank=True)
sales_date = models.DateField("発売日", null=True)
item_price = models.IntegerField("価格", blank=True)
image_url = models.URLField("画像URL", blank=True)
genre_id = models.CharField("書籍ジャンル", max_length=191, blank=True)
| mit | Python |
39904e39f1ac163ee17c293372ab97af48a1b37e | Add time/space complexity | bowen0701/algorithms_data_structures | lc118_pascal_triangle.py | lc118_pascal_triangle.py | """Leetcode 118. Pascal's Triangle
Easy
Given a non-negative integer numRows, generate the first numRows of Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Example:
Input: 5
Output:
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
"""
class Solution(object):
def get_num(self, last_row, i):
if i < 0 or i >= len(last_row):
return 0
return last_row[i]
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
Time complexity: O(n^2).
Space complexity: O(n^2).
"""
if numRows == 0:
return []
triangle = [[1] * (r + 1) for r in range(numRows)]
if numRows <= 2:
return triangle
for r in range(2, numRows):
last_row = triangle[r - 1]
current_row = triangle[r]
for i in range(1, r):
current_row[i] = last_row[i - 1] + last_row[i]
return triangle
def main():
numRows = 5
print('Pascal\'s triangle:\n{}'.format(
Solution().generate(numRows)))
if __name__ == '__main__':
main()
| """Leetcode 118. Pascal's Triangle
Easy
Given a non-negative integer numRows, generate the first numRows of Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Example:
Input: 5
Output:
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
"""
class Solution(object):
def get_num(self, last_row, i):
if i < 0 or i >= len(last_row):
return 0
return last_row[i]
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows == 0:
return []
triangle = [[1] * (r + 1) for r in range(numRows)]
if numRows <= 2:
return triangle
for r in range(2, numRows):
last_row = triangle[r - 1]
current_row = triangle[r]
for i in range(1, r):
current_row[i] = last_row[i - 1] + last_row[i]
return triangle
def main():
numRows = 5
print('Pascal\'s triangle:\n{}'.format(
Solution().generate(numRows)))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
2be92936ca87dbacb5c1c2b4e4b0103b741f005c | Add comments on checking left pos is target | bowen0701/algorithms_data_structures | lc0034_find_first_and_last_position_of_element_in_sorted_array.py | lc0034_find_first_and_last_position_of_element_in_sorted_array.py | """Leetcode 34. Find left and right Position of Element in Sorted Array
Medium
URL: https://leetcode.com/problems/find-left-and-right-position-of-element-in-sorted-array
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
"""
class SolutionBinarySearchTwice(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
Time complexity: O(logn), where n is the length of nums.
Space complexity: O(1).
"""
# Apply to 2 binary searches to update result [-1, -1].
res = [-1, -1]
if not nums:
return res
# Apply the 1st binary search to search target's left position.
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < target:
left = mid + 1
else:
right = mid
# If left pos is not target, return not found.
if nums[left] != target:
return res
else:
res[0] = left
# Apply the 2nd binary search to search target's right position.
right = len(nums) - 1
while left < right:
# Make mid biased to the right.
mid = left + (right - left) // 2 + 1
if nums[mid] > target:
right = mid - 1
else:
left = mid
res[1] = right
return res
def main():
# Ans: [3,4]
nums = [5,7,7,8,8,10]
target = 8
print SolutionBinarySearchTwice().searchRange(nums, target)
# Ans: [-1,-1]
nums = [5,7,7,8,8,10]
target = 6
print SolutionBinarySearchTwice().searchRange(nums, target)
if __name__ == '__main__':
main()
| """Leetcode 34. Find left and right Position of Element in Sorted Array
Medium
URL: https://leetcode.com/problems/find-left-and-right-position-of-element-in-sorted-array
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
"""
class SolutionBinarySearchTwice(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
Time complexity: O(logn), where n is the length of nums.
Space complexity: O(1).
"""
# Apply to 2 binary searches to update result [-1, -1].
res = [-1, -1]
if not nums:
return res
# Apply the 1st binary search to search target's left position.
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < target:
left = mid + 1
else:
right = mid
if nums[left] != target:
return res
else:
res[0] = left
# Apply the 2nd binary search to search target's right position.
right = len(nums) - 1
while left < right:
# Make mid biased to the right.
mid = left + (right - left) // 2 + 1
if nums[mid] > target:
right = mid - 1
else:
left = mid
res[1] = right
return res
def main():
# Ans: [3,4]
nums = [5,7,7,8,8,10]
target = 8
print SolutionBinarySearchTwice().searchRange(nums, target)
# Ans: [-1,-1]
nums = [5,7,7,8,8,10]
target = 6
print SolutionBinarySearchTwice().searchRange(nums, target)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
04302fd46ec1fc2b5e8846983642f0663bc3d73c | Add failed upload status | TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary | src/tmlib/models/status.py | src/tmlib/models/status.py | class FileUploadStatus(object):
'''Upload status of a file.'''
#: The file is registered, but upload not yet started
WAITING = 'WAITING'
#: Upload is ongoing
UPLOADING = 'UPLOADING'
#: Upload is complete
COMPLETE = 'COMPLETE'
#: Upload has failed
FAILED = 'FAILED'
| class FileUploadStatus(object):
'''Upload status of a file.'''
#: The file is registered, but upload not yet started
WAITING = 'WAITING'
#: Upload is ongoing
UPLOADING = 'UPLOADING'
#: Upload is complete
COMPLETE = 'COMPLETE'
| agpl-3.0 | Python |
cbb62604b0cd495ec9d0fd64ca96d1d50e48df2f | use sure for signals tests | Amoki/Amoki-Music,Amoki/Amoki-Music,Amoki/Amoki-Music | player/tests/test_signals.py | player/tests/test_signals.py | from utils.testcase import TestCase
from player.models import Events, Room
import sure
class TestSignals(TestCase):
def test_update_token_on_password_change(self):
first_token = self.r.token
self.r.password = 'b'
self.r.save()
self.r.token.should_not.eql(first_token)
def test_create_room_event(self):
Room(name='b', password='b').save()
Events.get_all().should.have.key('b')
| from utils.testcase import TestCase
from player.models import Events, Room
class TestSignals(TestCase):
def test_update_token_on_password_change(self):
first_token = self.r.token
self.r.password = 'b'
self.r.save()
self.assertNotEqual(self.r.token, first_token)
def test_create_room_event(self):
Room(name='b', password='b').save()
Events.get_all().should.have.key('b')
| mit | Python |
bca2c3eed387125296f14d7544ba9887065bf1d8 | use 4-space indentation | zmwangx/you-get,xyuanmu/you-get,zmwangx/you-get,smart-techs/you-get,cnbeining/you-get,qzane/you-get,xyuanmu/you-get,cnbeining/you-get,qzane/you-get,smart-techs/you-get | src/you_get/util/strings.py | src/you_get/util/strings.py | try:
# py 3.4
from html import unescape as unescape_html
except ImportError:
import re
from html.entities import entitydefs
def unescape_html(string):
'''HTML entity decode'''
string = re.sub(r'&#[^;]+;', _sharp2uni, string)
string = re.sub(r'&[^;]+;', lambda m: entitydefs[m.group(0)[1:-1]], string)
return string
def _sharp2uni(m):
'''&#...; ==> unicode'''
s = m.group(0)[2:].rstrip(';;')
if s.startswith('x'):
return chr(int('0'+s, 16))
else:
return chr(int(s))
from .fs import legitimize
def get_filename(htmlstring):
return legitimize(unescape_html(htmlstring))
| try:
# py 3.4
from html import unescape as unescape_html
except ImportError:
import re
from html.entities import entitydefs
def unescape_html(string):
'''HTML entity decode'''
string = re.sub(r'&#[^;]+;', _sharp2uni, string)
string = re.sub(r'&[^;]+;', lambda m: entitydefs[m.group(0)[1:-1]], string)
return string
def _sharp2uni(m):
'''&#...; ==> unicode'''
s = m.group(0)[2:].rstrip(';;')
if s.startswith('x'):
return chr(int('0'+s, 16))
else:
return chr(int(s))
from .fs import legitimize
def get_filename(htmlstring):
return legitimize(unescape_html(htmlstring))
| mit | Python |
98db64a4b505572c5b1db8a0d3f4623752daaefa | update East Herts import script for parl.2017-06-08 (closes #945) | chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_east_hertfordshire.py | polling_stations/apps/data_collection/management/commands/import_east_hertfordshire.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000242'
addresses_name = 'parl.2017-06-08/Version 1/East Herts Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/East Herts Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000242'
addresses_name = 'Democracy_Club__04May2017 (5).CSV'
stations_name = 'Democracy_Club__04May2017 (5).CSV'
elections = ['parl.2017-06-08']
def station_record_to_dict(self, record):
"""
East Herts Council contacted us to say...
Change of polling station for the General Election:
Ware Drill Hall is being replaced by
3rd Ware Scout Hut, Broadmeads, Ware, SG12 9HY
"""
if record.polling_place_id == '819':
record = record._replace(polling_place_name = '3rd Ware Scout Hut')
record = record._replace(polling_place_address_1 = 'Broadmeads')
record = record._replace(polling_place_address_2 = 'Ware')
record = record._replace(polling_place_address_3 = '')
record = record._replace(polling_place_address_4 = '')
record = record._replace(polling_place_postcode = 'SG12 9HY')
record = record._replace(polling_place_easting = '0')
record = record._replace(polling_place_northing = '0')
return super().station_record_to_dict(record)
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.