commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
60b705ac031aa885306c31c96743b0fbc4c963a3
|
Fix dbcursor issue
|
sso/services/__init__.py
|
sso/services/__init__.py
|
import settings
from django.db import load_backend, transaction, IntegrityError
def get_api(api):
if settings.DISABLE_SERVICES:
return BaseService()
try:
mod = __import__(api)
except ImportError:
raise Exception('Error creating service')
for i in api.split(".")[1:]:
mod = getattr(mod, i)
return getattr(mod, mod.ServiceClass)()
def list_apis():
import os.path, pkgutil
pkgpath = os.path.dirname(__file__)
return [name for _, name, _ in pkgutil.iter_modules([pkgpath])]
class BaseService():
"""
Base Service class, all service classes should inherit from this
"""
settings = { 'require_user': True,
'require_password': True,
'provide_login': False }
def add_user(self, username, password, **kwargs):
""" Add a user, returns a UID for that user """
return username
def check_user(self, username):
""" Check if the username exists """
return False
def delete_user(self, uid):
""" Delete a user by uid """
return True
def disable_user(self, uid):
""" Disable a user by uid """
return True
def enable_user(self, uid, password):
""" Enable a user by uid """
return True
def reset_password(self, uid, password):
""" Reset the user's password """
return True
def login(uid):
""" Login the user and provide cookies back """
pass
class BaseDBService(BaseService):
@property
def _db(self):
if not hasattr(self, '_db'):
# Use the master DB settings, bar the database name
backend = load_backend(settings.DATABASE_ENGINE)
self._db = backend.DatabaseWrapper({
'DATABASE_HOST': settings.DATABASE_HOST,
'DATABASE_NAME': self.settings['database_name'],
'DATABASE_OPTIONS': {},
'DATABASE_PASSWORD': settings.DATABASE_PASSWORD,
'DATABASE_PORT': settings.DATABASE_PORT,
'DATABASE_USER': settings.DATABASE_USER,
'TIME_ZONE': settings.TIME_ZONE,})
return self._db
@property
def _dbcursor(self):
return self._db.dbcursor()
def __del__(self):
if hasattr(self, '_db'):
self._db.connection.commit()
self._db.close()
self._db = None
|
Python
| 0
|
@@ -2254,18 +2254,16 @@
elf._db.
-db
cursor()
|
aacace8b3215a3303325b5ad848b4b69b92261f3
|
update keypair hook to match route53 update
|
stacker/hooks/keypair.py
|
stacker/hooks/keypair.py
|
import logging
import os
import boto3
from . import utils
logger = logging.getLogger(__name__)
def find(lst, key, value):
for i, dic in enumerate(lst):
if dic[key] == value:
return lst[i]
return False
def ensure_keypair_exists(provider, context, **kwargs):
"""Ensure a specific keypair exists within AWS.
If the key doesn't exist, upload it.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
"""
client = boto3.client("ec2", region_name=provider.region)
keypair_name = kwargs.get("keypair", context.parameters.get("SshKeyName"))
resp = client.describe_key_pairs()
keypair = find(resp["KeyPairs"], "KeyName", keypair_name)
message = "keypair: %s (%s) %s"
if keypair:
logger.info(message,
keypair["KeyName"],
keypair["KeyFingerprint"],
"exists")
return True
logger.info("keypair: \"%s\" not found", keypair_name)
create_or_upload = raw_input(
"import or create keypair \"%s\"? (import/create/Cancel) " % (
keypair_name,
),
)
if create_or_upload == "import":
path = raw_input("path to keypair file: ")
full_path = utils.full_path(path)
if not os.path.exists(full_path):
logger.error("Failed to find keypair at path: %s", full_path)
return False
with open(full_path) as read_file:
contents = read_file.read()
keypair = client.import_key_pair(KeyName=keypair_name,
PublicKeyMaterial=contents)
logger.info(message,
keypair["KeyName"],
keypair["KeyFingerprint"],
"imported")
return True
elif create_or_upload == "create":
path = raw_input("directory to save keyfile: ")
full_path = utils.full_path(path)
if not os.path.exists(full_path) and not os.path.isdir(full_path):
logger.error("\"%s\" is not a valid directory", full_path)
return False
file_name = "{0}.pem".format(keypair_name)
if os.path.isfile(os.path.join(full_path, file_name)):
# This mimics the old boto2 keypair.save error
logger.error("\"%s\" already exists in \"%s\" directory",
file_name,
full_path)
return False
keypair = client.create_key_pair(KeyName=keypair_name)
logger.info(message,
keypair["KeyName"],
keypair["KeyFingerprint"],
"created")
f = open(os.path.join(full_path, file_name), "w")
f.write(keypair["KeyMaterial"])
f.close()
return True
else:
logger.warning("no action to find keypair, failing")
return False
|
Python
| 0
|
@@ -739,17 +739,16 @@
ext.
-parameter
+variable
s.ge
|
dc007104ad760c8e07e7444a8d620d8b2d90fbef
|
Fix preferences API authentication
|
smart/accesscontrol/rules/accesstoken.py
|
smart/accesscontrol/rules/accesstoken.py
|
"""
Rules for Accounts
"""
from smart.views import *
from smart.models.rdf_rest_operations import *
from smart.models.record_object import *
try:
from smart.plugins import *
except ImportError: pass
def check_token_for_record_wrapper(token):
def check_token_for_record(request, view_func, view_args, view_kwargs):
return token.share.record.id == view_kwargs['record_id']
return check_token_for_record
def check_frame_mode_wrapper(token):
pha = PHA.objects.get(id=token.share.with_app.id)
def r(request, view_func, view_args, view_kwargs):
return pha.mode == "frame_ui"
return r
def check_token_for_account_app_wrapper(token):
def check_token_for_account_app(request, view_func, view_args, view_kwargs):
pha = PHA.objects.get(id=token.share.with_app.id)
acc = Account.objects.get(id=token.share.authorized_by.id)
return pha.email == view_kwargs['pha_email'] and acc.email == view_kwargs['account_email']
return check_token_for_account_app
def grant(accesstoken, permset):
"""
grant the permissions of an account to this permset
"""
check_token_for_record = check_token_for_record_wrapper(accesstoken)
permset.grant(home)
permset.grant(record_by_token)
permset.grant(do_webhook)
permset.grant(record_delete_all_objects, [check_token_for_record])
permset.grant(record_delete_object, [check_token_for_record])
permset.grant(record_put_object, [check_token_for_record])
permset.grant(record_post_objects, [check_token_for_record])
permset.grant(record_get_all_objects, [check_token_for_record])
permset.grant(record_get_object, [check_token_for_record])
permset.grant(record_get_allergies, [check_token_for_record])
try:
permset.grant(record_proxy_backend.proxy_get, [check_token_for_record])
except:
pass
permset.grant(put_demographics, [check_token_for_record])
permset.grant(record_post_alert, [check_token_for_record])
permset.grant(user_search)
permset.grant(user_get)
check_frame_mode = check_frame_mode_wrapper(accesstoken)
permset.grant(resolve_activity_with_app, [])
permset.grant(resolve_manifest, [])
permset.grant(all_manifests, [])
permset.grant(record_search, [check_frame_mode])
check_token_for_account_app = check_token_for_account_app_wrapper(accesstoken)
permset.grant(preferences_get, [check_token_for_account_app])
permset.grant(preferences_put, [check_token_for_account_app])
permset.grant(preferences_delete, [check_token_for_account_app])
|
Python
| 0.000013
|
@@ -996,21 +996,15 @@
gs%5B'
-account_email
+user_id
'%5D%0A
|
b16d5c08b5766e40fbf1773e3490dce461b20098
|
Exit code as int.
|
ci/github-pullrequest.py
|
ci/github-pullrequest.py
|
import json
import sys
import os
import requests
token = os.environ['GITHUB_ACCESS_TOKEN']
with open('version/version') as version_file:
version = version_file.read()
print("Making PR request to this release {}".format(version))
response = requests.post(
url='https://api.github.com/repos/aws-quickstart/quickstart-pivotal-cloudfoundry/pulls',
data=json.dumps({
"title": "PR via CI updating to release {}".format(version),
"body": "Please pull this in!",
"head": "cf-platform-eng:develop",
"base": "develop"
}),
headers={
'Authorization': 'Token {}'.format(token),
'Content-Type': 'application/json',
}
)
print(response.status_code)
print(response)
sys.exit(response.status_code < 300)
|
Python
| 0
|
@@ -707,58 +707,74 @@
de)%0A
-print(response)%0A%0Asys.exit(response.status_code %3C 300
+%0Aif response.status_code %3C 300:%0A sys.exit(0)%0Aelse:%0A sys.exit(1
)%0A
|
b1bc019320eaa8ceb7a70eca21b20fcda9cf609e
|
Add a timeout to the nginx cache clearing request
|
ckanext/nhm/lib/cache.py
|
ckanext/nhm/lib/cache.py
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
import logging
import requests
from ckan.plugins import toolkit
log = logging.getLogger(__name__)
def cache_clear_nginx_proxy():
'''Clear NGINX Proxy Cache - issue PURGE request to load balancer.'''
url = toolkit.config.get(u'ckan.site_url')
# Prepare a PURGE request to send to front end proxy
req = requests.Request(u'PURGE', url)
s = requests.Session()
try:
r = s.send(req.prepare())
r.raise_for_status()
except:
log.critical(u'Error clearing NGINX Cache')
|
Python
| 0
|
@@ -548,16 +548,29 @@
repare()
+, timeout=0.5
)%0A
|
5c9278226ca74eb614b940faa56b6c063d7e0198
|
Update clashcallerbot_search.py added send_message()
|
clashcallerbot_search.py
|
clashcallerbot_search.py
|
#! python3
# -*- coding: utf-8 -*-
"""Searches recent reddit comments for ClashCaller! string and saves to database.
This module uses the Python Reddit API Wrapper (PRAW) to search recent reddit comments
for the ClashCaller! string. If found, the username, comment time, message, and
expiration time (if any) are parsed. The default, or provided, expiration time is
applied, then all the comment data is saved to a MySQL-compatible database."""
import praw
import logging.config
import re
import datetime
import clashcallerbot_database as db
# Logger
logging.config.fileConfig('logging.conf', disable_existing_loggers=False)
logging.raiseExceptions = True # Production mode if False (no console sys.stderr output)
logger = logging.getLogger('search')
# Generate reddit instance
reddit = praw.Reddit('clashcaller') # Section name in praw.ini
subreddit = reddit.subreddit('ClashCallerBot') # Limit scope for testing purposes
def main():
# Search recent comments for ClashCaller! string
clashcaller_re = re.compile(r'''
[!|\s]? # prefix ! or space (optional)
[C|c]lash[C|c]aller # upper or lowercase ClashCaller
[!|\s] # suffix ! or space (required)
''', re.VERBOSE)
for comment in subreddit.stream.comments():
match = clashcaller_re.search(comment.body)
if match and comment.author.name != 'ClashCallerBot' and not db.find_comment_id(comment.id):
logger.info(f'In from {comment.author.name}: {comment}')
# TODO: If found, parse username, comment date, message, permalink, and expiration time (if any)
# Strip everything before and including ClashCaller! string
comment.body = comment.body[match.end():].strip()
logger.debug(f'Stripped comment body: {comment.body}')
# Check for expiration time
expiration_re = re.compile(r'''
(?P<exp_digit>(\d){1,2}) # single or double digit
(\s)? # optional space
(?P<exp_unit>minute(s)?\s| # minute(s) (space after required)
min\s| # minute abbr. (space after required)
hour(s)?\s| # hour(s) (space after required)
hr\s # hour abbr. (space after required)
)+''', re.VERBOSE | re.IGNORECASE) # case-insensitive
minute_tokens = ('min', 'minute', 'minutes')
match = expiration_re.search(comment.body)
if not match:
timedelta = datetime.timedelta(hours=1) # Default to 1 hour
else:
exp_digit = int(match.group('exp_digit').strip())
if exp_digit == 0: # ignore zeros
logging.error('Expiration time is zero.')
# TODO: Send message and ignore comment
continue
exp_unit = match.group('exp_unit').strip().lower()
if exp_unit in minute_tokens:
timedelta = datetime.timedelta(minutes=exp_digit)
else:
if exp_digit >= 24: # ignore days
logging.error('Expiration time is >= 1 day.')
# TODO: Send message and ignore comment
continue
timedelta = datetime.timedelta(hours=exp_digit)
# Strip expiration time
comment.body = comment.body[match.end():].strip()
logger.debug(f'timedelta = {timedelta.seconds} seconds')
# Evaluate message
if len(comment.body) > 100:
logger.error('Message length > 100 characters.')
# TODO: send message and ignore comment
continue
message_re = re.compile(r'''
(\s)* # optional space
base # required string: base
[\W|\s]* # optional non-word character or space
(\d){1,2} # required single or double digit
''', re.VERBOSE | re.IGNORECASE) # case-insensitive
match = message_re.search(comment.body)
if not match:
logger.error('Message not properly formatted.')
# TODO: send message and ignore comment
continue
message = comment.body
logger.debug(f'message = {message}')
# Apply expiration time to comment date
comment_datetime = datetime.datetime.fromtimestamp(comment.created_utc, datetime.timezone.utc)
logger.info(f'comment_datetime = {comment_datetime}')
expiration_datetime = comment_datetime + timedelta
logger.info(f'expiration_datetime = {expiration_datetime}')
# Save message data to MySQL-compatible database
db.save_message(comment.permalink, message, expiration_datetime, comment.author.id)
# TODO: Compose message for comment and PM
# TODO: If not already commented, comment and send PM
# TODO: Add comment.id to database
# If run directly, instead of imported as a module, run main():
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -451,16 +451,32 @@
ort praw
+%0Aimport prawcore
%0A%0Aimport
@@ -5484,16 +5484,555 @@
abase%0A%0A%0A
+def send_message(uid: str, subj: str, msg: str) -%3E bool:%0A %22%22%22Send message to reddit user.%0A%0A Function sends given message with given subject line to given user.%0A%0A Args:%0A uid: userID of user.%0A subj: Subject line of message.%0A msg: Message to send to user.%0A%0A Returns:%0A True if successful, False otherwise.%0A %22%22%22%0A try:%0A reddit.redditor(uid).message(subj, msg)%0A%0A except prawcore.exceptions as err:%0A logger.error(f'send_message: %7Berr%7D')%0A return False%0A return True%0A%0A%0A
# If run
|
46f2ff677372299c690da947356d814a1b588b5d
|
Correct AuditLog._format_value for selection field
|
smile_audit/models/audit_log.py
|
smile_audit/models/audit_log.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, fields, models, _
from openerp.exceptions import Warning
from openerp.tools.safe_eval import safe_eval as eval
class AuditLog(models.Model):
_name = 'audit.log'
_description = 'Audit Log'
_order = 'create_date desc'
name = fields.Char('Resource Name', size=256, compute='_get_name')
create_date = fields.Datetime('Date', readonly=True)
user_id = fields.Many2one('res.users', 'User', required=True, readonly=True)
model_id = fields.Many2one('ir.model', 'Object', required=True, readonly=True)
res_id = fields.Integer('Resource Id', readonly=True)
method = fields.Char('Method', size=64, readonly=True)
data = fields.Text('Data', readonly=True)
data_html = fields.Html('HTML Data', readonly=True, compute='_render_html')
@api.one
def _get_name(self):
if self.model_id and self.res_id:
record = self.env[self.model_id.model].browse(self.res_id).exists()
if record:
self.name = record.display_name
else:
data = eval(self.data or '{}')
rec_name = self.env[self.model_id.model]._rec_name
if rec_name in data['new']:
self.name = data['new'][rec_name]
elif rec_name in data['old']:
self.name = data['old'][rec_name]
else:
self.name = 'id=%s' % self.res_id
else:
self.name = ''
@api.multi
def _format_value(self, field, value):
self.ensure_one()
if not value and field.type not in ('boolean', 'integer', 'float'):
return ''
if field.type == 'selection':
selection = field.selection
if callable(selection):
selection = selection(self.env[self.model_id.model])
return dict(selection).get(value, value)
if field.type == 'many2one' and value:
return self.env[field.comodel_name].browse(value).exists().display_name or value
if field.type == 'reference' and value:
res_model, res_id = value.split(',')
return self.env[res_model].browse(int(res_id)).exists().display_name or value
if field.type in ('one2many', 'many2many') and value:
return ', '.join([self.env[field.comodel_name].browse(rec_id).exists().display_name or str(rec_id)
for rec_id in value])
if field.type == 'binary' and value:
return '<binary data>'
return value
@api.multi
def _get_label(self, field):
label = field.string
lang = self.env.user.lang
translated_label = ''
if lang != 'en_US':
params = ('%s,%s' % (field.model, field.name), 'field', lang, label)
translated_label = self.env['ir.translation'].sudo()._get_source(*params)
return translated_label or label
@api.multi
def _get_content(self):
self.ensure_one()
content = []
data = eval(self.data or '{}')
model_obj = self.env[self.model_id.model]
for fname in set(data['new'].keys() + data['old'].keys()):
field = model_obj._fields.get(fname) or model_obj._inherit_fields.get(fname)
old_value = self._format_value(field, data['old'].get(fname, ''))
new_value = self._format_value(field, data['new'].get(fname, ''))
label = self._get_label(field)
content.append((label, old_value, new_value))
return content
@api.one
def _render_html(self):
thead = ''
for head in (_('Field'), _('Old value'), _('New value')):
thead += '<th>%s</th>' % head
thead = '<thead><tr class="oe_list_header_columns">%s</tr></thead>' % thead
tbody = ''
for line in self._get_content():
row = ''
for item in line:
row += '<td>%s</td>' % item
tbody += '<tr>%s</tr>' % row
tbody = '<tbody>%s</tbody>' % tbody
self.data_html = '<table class="oe_list_content">%s%s</table>' % (thead, tbody)
@api.multi
def unlink(self):
raise Warning(_('You cannot remove audit logs!'))
|
Python
| 0.000001
|
@@ -2719,15 +2719,17 @@
if
-callabl
+isinstanc
e(se
@@ -2735,16 +2735,28 @@
election
+, basestring
):%0A
@@ -2778,25 +2778,23 @@
ction =
-selection
+getattr
(self.en
@@ -2815,16 +2815,29 @@
d.model%5D
+, selection)(
)%0A
|
c7990836b51cc5e05c5ae3bd49a316c418bac44e
|
Revert "redirect www.storycheck.co.za to storycheck.co.za"
|
code4sa/middleware.py
|
code4sa/middleware.py
|
from django.http import HttpResponse, HttpResponsePermanentRedirect, Http404
import newrelic.agent
class RedirectsMiddleware(object):
redirects = {
# domain (without www.) -> full new URL
'living-wage.code4sa.org': 'http://livingwage.code4sa.org/',
'livingwagestory.code4sa.org': 'http://livingwage.code4sa.org/',
'maps.code4sa.org': 'http://mapit.code4sa.org/',
'compliancetracker.org.za': 'http://muni.compliancetracker.org.za/',
'wazimap.org': 'http://wazimap.co.za',
'wazimap.com': 'http://wazimap.co.za',
'wazimap.info': 'http://wazimap.co.za',
'info.speakupmzansi.org.za': 'http://speakupmzansi.org.za',
'speakupmzansi.co.za': 'http://speakupmzansi.org.za',
'speakupmzansi.org': 'http://speakupmzansi.org.za',
'speakup.org.za': 'http://speakupmzansi.org.za',
'hackforwater.org.za': 'http://www.hack4water.org.za',
# this redirects www -> non-www
'vote4thebudget.org': 'http://vote4thebudget.org',
'storycheck.co.za': 'http://storycheck.co.za',
# this redirects non-www -> www
'hack4water.org.za': 'http://www.hack4water.org.za',
}
def process_request(self, request):
host = request.get_host()
if host.startswith("www."):
host = host[4:]
if host in self.redirects:
return HttpResponsePermanentRedirect(self.redirects[host])
if request.path == '/ping':
newrelic.agent.ignore_transaction()
return HttpResponse('pong')
raise Http404()
|
Python
| 0
|
@@ -1031,63 +1031,8 @@
rg',
-%0A 'storycheck.co.za': 'http://storycheck.co.za',
%0A%0A
|
7e6dc283dbecf4bf9674559198b4a2c06e9f4c2e
|
Fix unicode import in test
|
spacy/tests/regression/test_issue1799.py
|
spacy/tests/regression/test_issue1799.py
|
'''Test sentence boundaries are deserialized correctly,
even for non-projective sentences.'''
import pytest
import numpy
from ... tokens import Doc
from ... vocab import Vocab
from ... attrs import HEAD, DEP
def test_issue1799():
problem_sentence = 'Just what I was looking for.'
heads_deps = numpy.asarray([[1, 397], [4, 436], [2, 426], [1, 402],
[0, 8206900633647566924], [18446744073709551615, 440],
[18446744073709551614, 442]], dtype='uint64')
doc = Doc(Vocab(), words='Just what I was looking for .'.split())
doc.vocab.strings.add('ROOT')
doc = doc.from_array([HEAD, DEP], heads_deps)
assert len(list(doc.sents)) == 1
|
Python
| 0.00034
|
@@ -86,16 +86,56 @@
nces.'''
+%0Afrom __future__ import unicode_literals
%0A%0Aimport
|
5feeabbc562e238a27143c376c772b5aa413610c
|
tweak motor control numbers and controls
|
combined_robot/app.py
|
combined_robot/app.py
|
#!/usr/bin/env python
from __future__ import division
import cv
import cv2
import video
from common import RectSelector
from mosse import MOSSE
# Constants
LAUNCH = 0
CARRIAGE = 1
class App:
def __init__(self, video_src, robotq, appq):
self.cap = video.create_capture(video_src)
_, self.frame = self.cap.read()
cv2.namedWindow('frame')
self.row = 0
self.bounceshot = 0
cv2.createTrackbar('row', 'frame', 0, 2, self.onrow)
cv2.createTrackbar('speed', 'frame', 3920, 5000, self.onspeed)
cv2.createTrackbar('bounceshot', 'frame', 0, 1, self.onbounceshot)
cv2.imshow('frame', self.frame)
self.rect_sel = RectSelector('frame', self.onrect)
self.trackers = []
self.robotq = robotq
self.appq = appq
def nothing(*arg):
pass
def onrow(self, row):
'''When the row is changed, update the speed.'''
self.row = row
if self.bounceshot:
if row == 0: speed = 1920
elif row == 1: speed = 1930
elif row == 2: speed = 1940
else:
if row == 0: speed = 3920
elif row == 1: speed = 3930
elif row == 2: speed = 3940
cv2.setTrackbarPos('speed', 'frame', speed)
def onspeed(self, speed):
'''When the speed is changed, send it to the robot.'''
self.robotq.put((0, speed))
def onbounceshot(self, bounceshot):
'''When the speed is changed, send it to the robot.'''
self.bounceshot = bounceshot
self.onrow(self.row)
def onrect(self, rect):
frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
tracker = MOSSE(frame_gray, rect)
self.trackers = [tracker]
def drawcrosshairs(self, img, width, height, color=(0, 255, 255), thickness=1):
p0 = int(width // 2), int(height // 2) - int(height // 10)
p1 = int(width // 2), int(height // 2) + int(height // 10)
cv2.line(img, p0, p1, color, thickness)
p0 = int(width// 2) - int(width // 10), int(height // 2)
p1 = int(width// 2) + int(width // 10), int(height // 2)
cv2.line(img, p0, p1, color, thickness)
def run(self):
direction = 0
while True:
ret, self.frame = self.cap.read()
self.frame = cv2.flip(self.frame, -1)
if not ret:
break
frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
for tracker in self.trackers:
tracker.update(frame_gray)
vis = self.frame.copy()
width = self.cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)
height = self.cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
if len(self.trackers) > 0:
x, _ = self.trackers[0].draw_state(vis)
x = int(x)
# Make the robot move toward the object
if x < width // 2:
if direction >= 0:
print "Going right"
self.robotq.put((1, 100000, 0))
direction = -1
elif x > width // 2:
if direction <= 0:
print "Going left"
self.robotq.put((1, 100000, 1))
direction = 1
else:
print "Cup targeting complete"
self.robotq.put((1, 0, 0))
direction = 0
elif direction != 0:
self.robotq.put((1, 0, 0))
direction = 0
self.drawcrosshairs(vis, width, height)
self.rect_sel.draw(vis)
cv2.imshow('frame', vis)
ch = cv2.waitKey(10)
if ch == 27:
break
if ch == ord(' '):
print "Shooting"
self.robotq.put('shoot')
if ch == ord('c'):
self.trackers = []
cv2.destroyAllWindows()
self.robotq.put('exit')
|
Python
| 0
|
@@ -523,17 +523,18 @@
, 3920,
-5
+10
000, sel
@@ -1468,50 +1468,48 @@
hen
-the speed is changed, send it to the robot
+we toggle bounce shots, update the speed
.'''
@@ -3005,12 +3005,11 @@
ing
-righ
+lef
t%22%0A
@@ -3059,17 +3059,17 @@
100000,
-0
+1
))%0A
@@ -3207,35 +3207,36 @@
print %22Going
-lef
+righ
t%22%0A
@@ -3266,33 +3266,33 @@
put((1, 100000,
-1
+0
))%0A
@@ -3761,16 +3761,16 @@
== 27:%0A
-
@@ -3775,32 +3775,271 @@
break%0A
+ if ch == ord('d'):%0A print %22Manually going right%22%0A self.robotq.put((1, 10, 1))%0A if ch == ord('a'):%0A print %22Manually going left%22%0A self.robotq.put((1, 10, 0))%0A
if c
|
6b121711d4f905ef1795031f142cf3b0ac5063f6
|
Fix user_is_authenticated() for < 1.10
|
src/oscar/core/compat.py
|
src/oscar/core/compat.py
|
import csv
import sys
import django
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from oscar.core.loading import get_model
# A setting that can be used in foreign key declarations
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
try:
AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME = AUTH_USER_MODEL.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form"
" 'app_label.model_name'")
def get_user_model():
"""
Return the User model. Doesn't require the app cache to be fully
initialised.
This used to live in compat to support both Django 1.4's fixed User model
and custom user models introduced thereafter.
Support for Django 1.4 has since been dropped in Oscar, but our
get_user_model remains because code relies on us annotating the _meta class
with the additional fields, and other code might rely on it as well.
"""
try:
model = get_model(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME)
except LookupError:
# Convert exception to an ImproperlyConfigured exception for
# backwards compatibility with previous Oscar versions and the
# original get_user_model method in Django.
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed"
% settings.AUTH_USER_MODEL)
# Test if user model has any custom fields and add attributes to the _meta
# class
core_fields = set([f.name for f in User._meta.fields])
model_fields = set([f.name for f in model._meta.fields])
new_fields = model_fields.difference(core_fields)
model._meta.has_additional_fields = len(new_fields) > 0
model._meta.additional_fields = new_fields
return model
def existing_user_fields(fields):
"""
Starting with Django 1.6, the User model can be overridden and it is no
longer safe to assume the User model has certain fields. This helper
function assists in writing portable forms Meta.fields definitions
when those contain fields on the User model
Usage:
class UserForm(forms.Form):
...
class Meta:
# won't break if first_name is not defined on User model
fields = existing_user_fields(['first_name', 'last_name'])
"""
user_fields = get_user_model()._meta.fields
user_field_names = [field.name for field in user_fields]
return [field for field in fields if field in user_field_names]
# Supprt new Django 1.10 middleware
if django.VERSION >= (1, 10):
from django.utils.deprecation import MiddlewareMixin
else:
MiddlewareMixin = object
def user_is_authenticated(user):
if django.VERSION >= (1, 10):
return user.is_authenticated
else:
return user_is_authenticated(user)
def user_is_anonymous(user):
if django.VERSION >= (1, 10):
return user.is_anonymous
else:
return user.is_anonymous()
def assignment_tag(register):
if django.VERSION >= (1, 9):
return register.simple_tag
else:
return register.assignment_tag
# Python3 compatibility layer
"""
Unicode compatible wrapper for CSV reader and writer that abstracts away
differences between Python 2 and 3. A package like unicodecsv would be
preferable, but it's not Python 3 compatible yet.
Code from http://python3porting.com/problems.html
Changes:
- Classes renamed to include CSV.
- Unused 'codecs' import is dropped.
- Added possibility to specify an open file to the writer to send as response
of a view
"""
PY3 = sys.version > '3'
class UnicodeCSVReader:
def __init__(self, filename, dialect=csv.excel,
encoding="utf-8", **kw):
self.filename = filename
self.dialect = dialect
self.encoding = encoding
self.kw = kw
def __enter__(self):
if PY3:
self.f = open(self.filename, 'rt',
encoding=self.encoding, newline='')
else:
self.f = open(self.filename, 'rbU')
self.reader = csv.reader(self.f, dialect=self.dialect,
**self.kw)
return self
def __exit__(self, type, value, traceback):
self.f.close()
def next(self):
row = next(self.reader)
if PY3:
return row
return [s.decode("utf-8") for s in row]
__next__ = next
def __iter__(self):
return self
class UnicodeCSVWriter:
"""
Python 2 and 3 compatible CSV writer. Supports two modes:
* Writing to an open file or file-like object:
writer = UnicodeCSVWriter(open_file=your_file)
...
your_file.close()
* Writing to a new file:
with UnicodeCSVWriter(filename=filename) as writer:
...
"""
def __init__(self, filename=None, open_file=None, dialect=csv.excel,
encoding="utf-8", **kw):
if filename is open_file is None:
raise ImproperlyConfigured(
"You need to specify either a filename or an open file")
self.filename = filename
self.f = open_file
self.dialect = dialect
self.encoding = encoding
self.kw = kw
self.writer = None
def __enter__(self):
assert self.filename is not None
if PY3:
self.f = open(self.filename, 'wt',
encoding=self.encoding, newline='')
else:
self.f = open(self.filename, 'wb')
return self
def __exit__(self, type, value, traceback):
assert self.filename is not None
if self.filename is not None:
self.f.close()
def writerow(self, row):
if self.writer is None:
self.writer = csv.writer(self.f, dialect=self.dialect, **self.kw)
if not PY3:
row = [six.text_type(s).encode(self.encoding) for s in row]
self.writer.writerow(list(row))
def writerows(self, rows):
for row in rows:
self.writerow(row)
|
Python
| 0.000024
|
@@ -2912,25 +2912,25 @@
return user
-_
+.
is_authentic
@@ -2934,20 +2934,16 @@
ticated(
-user
)%0A%0A%0Adef
|
b4214ec8cbacfe383b0f436df5cf49653a609e12
|
Fix formatting of print in run function.
|
scripts/lib/zulip_tools.py
|
scripts/lib/zulip_tools.py
|
#!/usr/bin/env python
from __future__ import print_function
import datetime
import errno
import os
import pwd
import shutil
import subprocess
import sys
import time
if False:
from typing import Sequence, Any
DEPLOYMENTS_DIR = "/home/zulip/deployments"
LOCK_DIR = os.path.join(DEPLOYMENTS_DIR, "lock")
TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S'
# Color codes
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def subprocess_text_output(args):
# type: (Sequence[str]) -> str
return subprocess.check_output(args, universal_newlines=True).strip()
def su_to_zulip():
# type: () -> None
pwent = pwd.getpwnam("zulip")
os.setgid(pwent.pw_gid)
os.setuid(pwent.pw_uid)
os.environ['HOME'] = os.path.abspath(os.path.join(DEPLOYMENTS_DIR, '..'))
def make_deploy_path():
# type: () -> str
timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
return os.path.join(DEPLOYMENTS_DIR, timestamp)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'make_deploy_path':
print(make_deploy_path())
def mkdir_p(path):
# type: (str) -> None
# Python doesn't have an analog to `mkdir -p` < Python 3.2.
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_deployment_lock(error_rerun_script):
# type: (str) -> None
start_time = time.time()
got_lock = False
while time.time() - start_time < 300:
try:
os.mkdir(LOCK_DIR)
got_lock = True
break
except OSError:
print(WARNING + "Another deployment in progress; waiting for lock... "
+ "(If no deployment is running, rmdir %s)" % (LOCK_DIR,) + ENDC)
sys.stdout.flush()
time.sleep(3)
if not got_lock:
print(FAIL + "Deployment already in progress. Please run\n"
+ " %s\n" % (error_rerun_script,)
+ "manually when the previous deployment finishes, or run\n"
+ " rmdir %s\n" % (LOCK_DIR,)
+ "if the previous deployment crashed."
+ ENDC)
sys.exit(1)
def release_deployment_lock():
# type: () -> None
shutil.rmtree(LOCK_DIR)
def run(args, **kwargs):
# type: (Sequence[str], **Any) -> int
# Output what we're doing in the `set -x` style
print("+ %s" % (" ".join(args)))
process = subprocess.Popen(args, **kwargs)
rc = process.wait()
if rc:
raise subprocess.CalledProcessError(rc, args) # type: ignore # https://github.com/python/typeshed/pull/329
return 0
|
Python
| 0.000003
|
@@ -2468,16 +2468,135 @@
args)))%0A
+%0A if kwargs.get('shell'):%0A # With shell=True we can only pass string to Popen%0A args = %22 %22.join(args)%0A%0A
proc
|
780963217b05efa4ff3937efa3101972e3fc9901
|
Add timeouts
|
commands/wikipedia.py
|
commands/wikipedia.py
|
import json
import requests
from CommandTemplate import CommandTemplate
import Constants
import GlobalStore
import SharedFunctions
class Command(CommandTemplate):
triggers = ['wikipedia', 'wiki', 'wikirandom']
helptext = "Searches for the provided text on Wikipedia, and returns the start of the article, if it's found. " \
"{commandPrefix}wiki only returns the first sentence, {commandPrefix}wikipedia returns the first paragraph. " \
"{commandPrefix}wikirandom returns a random wikipedia page"
def onLoad(self):
GlobalStore.commandhandler.addCommandFunctions(__file__, 'searchWikipedia', self.searchWikipedia,
'getWikipediaArticle', self.getArticleText, 'getRandomWikipediaArticle', self.getRandomWikipediaArticle)
def getRandomWikipediaArticle(self, addExtendedText=False):
page = requests.get('http://en.m.wikipedia.org/wiki/Special:Random/#/random')
self.logDebug("[wiki] Random page url: {}".format(page.url))
articleName = page.url.split('/wiki/', 1)[1] #Get the part of the URL that is the article title
return self.getArticleText(articleName, addExtendedText)
def searchWikipedia(self, searchterm, addExtendedText=False):
url = u'https://en.wikipedia.org/w/api.php?format=json&utf8=1&action=query&list=search&srwhat=nearmatch&srlimit=1&srsearch={}&srprop='.format(searchterm)
result = requests.get(url)
result = json.loads(result.text)
if 'error' in result:
self.logError("[wiki] An error occurred while searching. Search term: '{}'; Search url: '{}'; error: '{}'".format(searchterm, url, result['error']['info']))
return (False, "Sorry, an error occurred while searching. Please tell my owner(s) to check my logs ({})".format(result['error']['code']))
#Check if any results were found
elif 'search' not in result['query'] or len(result['query']['search']) == 0:
return (False, "No search results for '{}'".format(searchterm))
else:
return self.getArticleText(result['query']['search'][0]['title'], addExtendedText)
def getArticleText(self, pagename, addExtendedText=False, limitLength=True):
replyLengthLimit = 310
url = u'https://en.wikipedia.org/w/api.php'
params = {'format': 'json', 'utf8': '1', 'action': 'query', 'prop': 'extracts', 'redirects': '1',
'exintro': '1', 'explaintext': '1', 'exsectionformat': 'plain', 'titles': pagename}
#If we need to be verbose, get as many characters as we can
if addExtendedText:
params['exchars'] = replyLengthLimit
#Otherwise just get the first sentence
else:
params['exsentences'] = '1'
apireply = requests.get(url, params=params)
result = json.loads(apireply.text)
if 'error' in result:
self.logError("[wiki] An error occurred while retrieving an article. Page name: '{}'; url: '{}'; error: '{}'".format(pagename, url, result['error']['info']))
return (False, "Sorry, an error occurred while retrieving the page. Please tell my owner(s) to check my logs ({})".format(result['error']['info']))
result = result['query']
if 'pages' not in result or '-1' in result['pages']:
return (False, "No page about '{}' found, sorry".format(pagename))
else:
#The 'pages' dictionary contains a single key-value pair. The key is the (unknown) revision number. So just get the single entry
pagedata = result['pages'].popitem()[1]
replytext = pagedata['extract']
#Check if it's not a disambiguation page (rstrip('.') because sometimes it ends with dots and we want to catch that too)
if replytext.split('\n', 1)[0].rstrip('.').endswith("may refer to:"):
replytext = "'{}' has multiple meanings".format(pagename)
else:
replytext = replytext.replace('\n', ' ').replace(' ', ' ')
#Make sure the text isn't too long
if limitLength and len(replytext) > replyLengthLimit:
replytext = replytext[:replyLengthLimit]
#Try not to chop up words
lastSpaceIndex = replytext.rfind(' ')
if lastSpaceIndex > -1:
replytext = replytext[:lastSpaceIndex]
replytext += ' [...]'
#Add the URL
replytext += u'{}http://en.wikipedia.org/wiki/{}'.format(Constants.GREY_SEPARATOR, pagedata['title'].replace(u' ', u'_'))
return (True, replytext)
def execute(self, message):
if message.trigger == 'wikirandom':
replytext = self.getRandomWikipediaArticle()[1]
elif message.messagePartsLength == 0:
replytext = "Please provide a term to search for"
else:
replytext = self.searchWikipedia(message.message, message.trigger=='wikipedia')[1]
message.reply(replytext, "say")
|
Python
| 0.000001
|
@@ -801,16 +801,24 @@
False):%0A
+%09%09try:%0A%09
%09%09page =
@@ -887,16 +887,202 @@
/random'
+, timeout=10.0)%0A%09%09except requests.exceptions.Timeout:%0A%09%09%09return (False, %22Apparently Wikipedia couldn't pick between all of its interesting articles, so it took too long to reply. Sorry!%22
)%0A%09%09self
@@ -1516,16 +1516,24 @@
chterm)%0A
+%09%09try:%0A%09
%09%09result
@@ -1551,16 +1551,197 @@
.get(url
+, timeout=10.0)%0A%09%09except requests.exceptions.Timeout:%0A%09%09%09return (False, %22Either that's a difficult search query, or Wikipedia is tired. Either way, that search took too long, sorry%22
)%0A%09%09resu
@@ -2914,16 +2914,24 @@
%5D = '1'%0A
+%09%09try:%0A%09
%09%09apirep
@@ -2966,16 +2966,128 @@
s=params
+, timeout=10.0)%0A%09%09except requests.exceptions.Timeout:%0A%09%09%09return (False, %22Article retrieval took too long, sorry%22
)%0A%09%09resu
|
ebdf2f9eb47ac52adb02ad87f40dcc8874910c21
|
Change witness heading information; sort reference texts to the top.
|
scripts/proteus-cluster.py
|
scripts/proteus-cluster.py
|
from __future__ import print_function
import sys
from re import sub
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col, udf, regexp_replace
def formatPassage(r):
text = ""
if r.url:
text += "<h2><a href=\"%s\">%s</a></h2>" % (r.url, (r.title or r.id))
else:
text += "<h2>%s</h2>" % (r.title or r.id)
cluster = "cl" + str(r.cluster)
dateline = '<date tokenizetagcontent="false">%s</date>' % r.date
if r.placeOfPublication:
dateline += ' · <place>%s</place>' % r.placeOfPublication
text += "<h4>%s</h4>" % dateline
text += sub('(?<!\\\\)\\n', '<br/>\\n', r.text)
text += ' <archiveid tokenizetagcontent="false">%s</archiveid>' % cluster
text += ' <series tokenizetagcontent="false">%s</series>' % r.series
text += ' <id tokenizetagcontent="false">%s</id>' % r.id
if r.subject:
text += ' <subject>%s</subject>' % r.subject
image = None
thumb = None
if r.corpus == 'ca' and r.page_access != None:
image = r.page_access.replace('/print/', '/').rstrip('/') + '.jpg'
thumb = image.replace('_600x600_', '_80x100_')
return Row(archiveid=cluster, id=r.id, imagecount=r.size, title=r.title, date=r.date, placeOfPublication=r.placeOfPublication,
text=text, page_access=r.page_access, page_image=image, page_thumb=thumb)
def formatPassages(x):
(cluster, riter) = x
rows = list(riter)
rows.sort(key=lambda z: z.date)
res = list()
for i in range(len(rows)):
r = rows[i].asDict()
name = "%s_%d" % (cluster, i)
r['name'] = name
r['seq'] = i
r['identifier'] = cluster
r['pageNumber'] = i
res.append(Row(**r))
return res
def formatCluster(x):
(cluster, riter) = x
rows = list(riter)
rows.sort(key=lambda z: z.date)
text = ""
for i in range(len(rows)):
text += "<div class=\"page-break\" page=\"%d\">%s</div>\n" % (i, rows[i].text)
return Row(name=cluster, identifier=cluster, imagecount=rows[0].imagecount, date=rows[0].date,
title=("%d reprints from %s to %s [%s]" % (len(rows), rows[0].date, rows[len(rows)-1].date, cluster)), text=text, pageNumber=0, seq=0, id=cluster+'_0')
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: proteus-cluster.py <input> <page-out> <cluster-out>", file=sys.stderr)
exit(-1)
spark = SparkSession.builder.appName('Proteus Formatting').getOrCreate()
df = spark.read.load(sys.argv[1])
clusters = df \
.drop("locs")\
.drop("pages")\
.drop("regions")\
.withColumn('page_access', col('url')) \
.withColumn('text', regexp_replace(col('text'), '</?[A-Za-z][^>]*>', ''))\
.rdd \
.map(formatPassage)\
.groupBy(lambda r: r.archiveid)
clusters.flatMap(formatPassages).toDF().write.option('compression', 'gzip').json(sys.argv[2])
clusters.map(formatCluster).toDF().write.option('compression', 'gzip').json(sys.argv[3])
spark.stop()
|
Python
| 0
|
@@ -197,24 +197,130 @@
text = %22%22%0A
+ source = 'From ' + (('%3Ccite%3E%25s%3C/cite%3E' %25 r.source) or 'unknown source')%0A title = r.title or source%0A
if r.url
@@ -380,33 +380,21 @@
(r.url,
-(r.
title
- or r.id)
)%0A el
@@ -433,25 +433,123 @@
%22 %25
-(r.title or r.id)
+title%0A if r.creator: text += '%3Ch4%3Eby %25s%3C/h4%3E' %25 r.creator%0A if title != source: text += '%3Ch4%3E%25s%3C/h4%3E' %25 source
%0A
@@ -1128,223 +1128,8 @@
ect%0A
- image = None%0A thumb = None%0A if r.corpus == 'ca' and r.page_access != None:%0A image = r.page_access.replace('/print/', '/').rstrip('/') + '.jpg'%0A thumb = image.replace('_600x600_', '_80x100_')%0A
@@ -1259,16 +1259,27 @@
ication,
+ ref=r.ref,
%0A
@@ -1335,16 +1335,23 @@
e_image=
+r.page_
image, p
@@ -1360,16 +1360,23 @@
e_thumb=
+r.page_
thumb)%0A
@@ -1475,30 +1475,46 @@
y=lambda z:
-z.date
+(-z.ref, z.date, z.id)
)%0A res =
@@ -1869,22 +1869,38 @@
mbda z:
-z.date
+(-z.ref, z.date, z.id)
)%0A te
|
a2f66370843658090ccca3fbdbb2ff9d12c7605a
|
Update to current spotpy code stlye
|
spotpy/examples/tutorial_own_database.py
|
spotpy/examples/tutorial_own_database.py
|
'''
Copyright 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This example implements the Rosenbrock function into SPOT.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import spotpy
from spotpy.objectivefunctions import rmse
class spot_setup(object):
slow = 1000
def __init__(self):
self.params = [spotpy.parameter.List('x', [1, 2, 3, 4, 6, 7, 8, 9, 0]), #Give possible x values as a List
spotpy.parameter.List('y', [0, 1, 2, 5, 7, 8, 9, 0, 1])] #Give possible y values as a List
self.db_headers = ["obj_functions", "parameters", "simulations"]
self.database = open('MyOwnDatabase.txt', 'w')
self.database.write("\t".join(self.db_headers) + "\n")
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self, vector):
x = np.array(vector)
for i in range(self.slow):
_ = np.sin(i)
simulations = [sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)]
return simulations
def evaluation(self):
observations = [0]
return observations
def objectivefunction(self, simulation, evaluation):
objectivefunction = -rmse(evaluation=evaluation, simulation=simulation)
return objectivefunction
def save(self, objectivefunctions, parameter, simulations, *args, **kwargs):
param_str = "|".join((str(p) for p in parameter))
sim_str = "|".join((str(s) for s in simulations))
line = "\t".join([str(objectivefunctions), param_str, sim_str]) + '\n'
self.database.write(line)
spot_setup = spot_setup()
# Leave out dbformat and dbname and spotpy will return results in spot_setup.save function
sampler = spotpy.algorithms.mc(spot_setup)
sampler.sample(9) # Choose equal or less repetitions as you have parameters in your List
spot_setup.database.close() # Close the created txt file
|
Python
| 0
|
@@ -447,67 +447,12 @@
-slow = 1000%0A def __init__(self):%0A self.params
+a
=
-%5B
spot
@@ -468,197 +468,108 @@
ter.
-List('x', %5B1, 2, 3, 4, 6, 7, 8, 9, 0%5D), #Give possible x values as a List%0A spotpy.parameter.List('y', %5B0, 1, 2, 5, 7, 8, 9, 0, 1%5D)%5D #Give possible y values as a List%0A
+Uniform(low=0, high=1)%0A b = spotpy.parameter.Uniform(low=0, high=1)%0A %0A def __init__(self):%0A
%0A
@@ -762,89 +762,8 @@
%22)%0A%0A
- def parameters(self):%0A return spotpy.parameter.generate(self.params)%0A%0A
@@ -825,69 +825,8 @@
or)%0A
- for i in range(self.slow):%0A _ = np.sin(i)%0A
@@ -1278,33 +1278,34 @@
param_str = %22
-%7C
+%5Ct
%22.join((str(p) f
@@ -1347,9 +1347,10 @@
= %22
-%7C
+%5Ct
%22.jo
@@ -1497,16 +1497,47 @@
(line)%0A%0A
+if __name__ == %22__main__%22:%0A
spot_set
@@ -1558,19 +1558,21 @@
p()%0A
-%0A# Leave ou
+ %0A # se
t db
@@ -1582,18 +1582,17 @@
mat
-and dbname
+to custom
and
@@ -1647,16 +1647,20 @@
unction%0A
+
sampler
@@ -1692,18 +1692,41 @@
ot_setup
-)%0A
+, dbformat='custom')%0A
sampler.
@@ -1736,9 +1736,11 @@
ple(
-9
+100
) #
@@ -1808,16 +1808,20 @@
ur List%0A
+
spot_set
|
2ccdaf4ac5397a7fb1795cf3f3e52348e775dbbb
|
Bump version.
|
settingsjs/__init__.py
|
settingsjs/__init__.py
|
__version__ = (0, 1, 2)
def get_version():
return '.'.join(map(str, __version__))
|
Python
| 0
|
@@ -18,9 +18,9 @@
1,
-2
+3
)%0A%0A%0A
|
2b299ab6b26ab5f0cfdbe08d3bff57a03483ca93
|
update import script for Epsom & Ewell (closes #2134)
|
polling_stations/apps/data_collection/management/commands/import_epsom_and_ewell.py
|
polling_stations/apps/data_collection/management/commands/import_epsom_and_ewell.py
|
from addressbase.models import Address
from uk_geo_utils.helpers import Postcode
from data_collection.github_importer import BaseGitHubImporter
class Command(BaseGitHubImporter):
srid = 27700
districts_srid = 27700
council_id = "E07000208"
elections = ["europarl.2019-05-23"]
scraper_name = "wdiv-scrapers/DC-PollingStations-EpsomAndEwell"
geom_type = "gml"
# districts file has station address and UPRN for district
# parse the districts file twice
stations_query = "districts"
def geocode_from_uprn(self, uprn, station_postcode):
uprn = uprn.lstrip("0").strip()
ab_rec = Address.objects.get(uprn=uprn)
ab_postcode = Postcode(ab_rec.postcode)
station_postcode = Postcode(station_postcode)
if ab_postcode != station_postcode:
print(
"Using UPRN {uprn} for station ID but '{pc1}' != '{pc2}'".format(
uprn=uprn,
pc1=ab_postcode.with_space,
pc2=station_postcode.with_space,
)
)
return ab_rec.location
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid("districts"))
return {
"internal_council_id": record["wardcode"],
"name": record["wardcode"],
"area": poly,
"polling_station_id": record["wardcode"],
}
def station_record_to_dict(self, record):
postcode = " ".join(record["address"].split(" ")[-2:])
try:
location = self.geocode_from_uprn(record["uprn"].strip(), postcode)
except Address.DoesNotExist:
location = None
return {
"internal_council_id": record["wardcode"],
"address": record["address"],
"postcode": "",
"location": location,
}
|
Python
| 0
|
@@ -270,12 +270,8 @@
= %5B%22
-euro
parl
@@ -280,13 +280,13 @@
019-
-05-23
+12-12
%22%5D%0A
|
ce3e9044ed915450d44056a360a8908c02f08d0b
|
Remove iterm2 from system startup
|
conf/mac.py
|
conf/mac.py
|
# type: ignore
# silence linter errors
defaults = defaults
run = run
### trackpad settings ###
for key in (
'com.apple.AppleMultitouchTrackpad',
'com.apple.driver.AppleBluetoothMultitouch.trackpad'
):
trackpad = defaults[key]
trackpad['Clicking'] = True # touch to click
# enable *both* methods of right clicking
trackpad['TrackpadRightClick'] = True # two finger tap
trackpad['TrackpadCornerSecondaryClick'] = 2 # pushing to click in right corner
# disable "smart zoom" because it puts a delay on two-finger-tap right click
trackpad['TrackpadTwoFingerDoubleTapGesture'] = False
trackpad['TrackpadThreeFingerDrag'] = True
# disable dashboard
defaults['com.apple.dashboard']['mcx-disabled'] = True
dock = defaults['com.apple.dock']
dock['autohide'] = False
dock['autohide-delay'] = .05
dock['autohide-time-modifier'] = 0.4
dock['show-recents'] = False
# http://www.defaults-write.com/enable-highlight-hover-effect-for-grid-view-stacks/
dock['mouse-over-hilite-stack'] = True
# Spaces
dock['mru-spaces'] = False # don't reorder spaces based on use
defaults.g['AppleSpacesSwitchOnActivate'] = False # don't switch to another space when alt tabbing
# hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
dock['wvous-bl-corner'] = 10 # bottom left: sleep
dock['wvous-bl-modifier'] = 0
dock['wvous-br-corner'] = 3 # bottom right: application windows
dock['wvous-br-modifier'] = 0
dock['wvous-tl-corner'] = 2 # top left: mission control
dock['wvous-tl-modifier'] = 0
dock['wvous-tr-corner'] = 4 # top right: desktop
dock['wvous-tr-modifier'] = 0
finder = defaults['com.apple.finder']
finder['ShowPathbar'] = True
finder['ShowStatusBar'] = True
# show battery % in menubar
defaults['com.apple.menuextra.battery']['ShowPercent'] = True
# key repeat rate and delay
defaults.g['InitialKeyRepeat'] = 10
defaults.g['KeyRepeat'] = 2
# turn on "shake mouse pointer to locate"
defaults.g['CGDisableCursorLocationMagnification'] = False
# set file-type associations
associations = {
'com.microsoft.vscode': [
# plain-text association also sets default text editor (open -t)
'public.plain-text',
'public.python-script',
'public.yaml',
],
'org.videolan.vlc': [
'public.mp3',
'public.mpeg-4',
'org.matroska.mkv',
'org.videolan.webm',
],
'org.libreoffice.script': [
'public.comma-separated-values-text',
],
}
for program, types in associations.items():
for type in types:
run(['duti', '-s', program, type, 'all'])
# make tab move between "All Controls" (System Prefs -> Keyboard -> Shortcuts)
defaults.g['AppleKeyboardUIMode'] = 3
# show the date in the clock
defaults['com.apple.menuextra.clock']['DateFormat'] = "EEE MMM d h:mm a"
# use function keys as function keys
defaults.g['com.apple.keyboard.fnState'] = True
# don't close windows when quitting program (required for iterm2 to restore windows)
defaults.g['NSQuitAlwaysKeepsWindows'] = True
# zoom with ctrl+mouse wheel (System Prefs -> Accessibility -> Zoom)
defaults['com.apple.universalaccess']['closeViewScrollWheelToggle'] = True
flycut = defaults['com.generalarcade.flycut']
# shortcut to ctrl+cmd v
flycut["ShortcutRecorder mainHotkey"] = {'keyCode': 47, 'modifierFlags': 1310720}
flycut['loadOnStartup'] = 1
flycut['pasteMovesToTop'] = 1
flycut['removeDuplicates'] = 1
flycut['savePreference'] = 2 # "after each clip"
iterm = defaults['com.googlecode.iterm2']
iterm['PrefsCustomFolder'] = '~/.config/iterm2'
iterm['LoadPrefsFromCustomFolder'] = True
iterm['HotkeyTermAnimationDuration'] = 0
dash = defaults['com.kapeli.dashdoc']
dash['syncFolderPath'] = "~/Documents/Dash"
dash['snippetSQLPath'] = "~/Documents/Dash/snippets.dash"
caffeine = defaults['com.intelliscapesolutions.caffeine']
caffeine['ActivateOnLaunch'] = False
caffeine['SuppressLaunchMessage'] = True
# startup items - https://apple.stackexchange.com/a/310502/
required_login_apps = {'Flycut', 'SpotMenu', 'Flux', 'iTerm', 'Alfred 4', 'Horo', 'Caffeine'}
current_login_apps = set(
filter(None,
run(['osascript', '-e' 'tell application "System Events" to get the name of every login item'], cap='stdout').strip().split(', ')
)
)
script = 'tell application "System Events" to make login item at end with properties {{path:"/Applications/{app}.app", hidden:false}}'
print(f"Current login apps: {current_login_apps}. Required login apps: {required_login_apps}")
for app in required_login_apps - current_login_apps:
print(f"Setting '{app}' to run on login")
run(['osascript', '-e', script.format(app=app)])
# menubar items
menus = [
'/System/Library/CoreServices/Menu Extras/{}.menu'.format(m)
for m in ['Bluetooth', 'Volume', 'AirPort', 'TextInput', 'Battery', 'Clock', 'Displays', 'User']
]
current_menus = defaults['com.apple.systemuiserver']['menuExtras'].read()
menu_items_to_remove = set(current_menus) - set(menus)
if menu_items_to_remove:
print("Removing:", menu_items_to_remove)
defaults['com.apple.systemuiserver']['menuExtras'] = menus
# screenshots
screenshot_dir = '~/Desktop/Screenshots'
run(f"mkdir -p {screenshot_dir}")
screenshots = defaults['com.apple.screencapture']
screenshots['location'] = screenshot_dir
screenshots['show-thumbnail'] = False
screenshots['disable-shadow'] = True
# turn off "hey Siri" (on Mac, triggers more by accident than on purpose)
defaults['com.apple.Siri']['VoiceTriggerUserEnabled'] = False
|
Python
| 0
|
@@ -4198,17 +4198,8 @@
ux',
- 'iTerm',
'Al
|
f3cff666d72d41c8e65cd52bdb438748175d3ee8
|
move import to top of file
|
communityalmanac/controllers/almanac.py
|
communityalmanac/controllers/almanac.py
|
# Community Almanac - A place for your stories.
# Copyright (C) 2009 Douglas Mayle, Robert Marianski,
# Andy Cochran, Chris Patterson
# This file is part of Community Almanac.
# Community Almanac is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# Community Almanac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Community Almanac. If not, see <http://www.gnu.org/licenses/>.
import logging
from communityalmanac.model import Almanac
from communityalmanac.model import Page
from communityalmanac.model import IndexLine
from communityalmanac.model import meta
from communityalmanac.model.meta import Session as s
from formencode import Invalid
from formencode import Schema
from formencode import validators
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from pylons.decorators import jsonify
from pylons.decorators import validate
from pylons.decorators.rest import dispatch_on
from shapely.geometry.geo import asShape
from sqlalchemy.orm import exc
from sqlalchemy.sql import func
from sqlalchemy import desc
import communityalmanac.lib.helpers as h
import simplejson
from communityalmanac.lib.base import BaseController, render
log = logging.getLogger(__name__)
class AlmanacCreateForm(Schema):
name = validators.String(not_empty=True)
almanac_center = validators.String(not_empty=True)
class AlmanacController(BaseController):
@dispatch_on(POST='_do_create')
def create(self):
redirect_to(h.url_for('home'))
@validate(schema=AlmanacCreateForm(), form='create')
def _do_create(self):
name = self.form_result['name']
# Prevent creation of duplicates
try:
almanac = meta.Session.query(Almanac).filter(Almanac.name==name).one()
return redirect_to(h.url_for('page_create', almanac_slug=almanac.slug))
except exc.NoResultFound:
pass
json = self.form_result['almanac_center']
shape = simplejson.loads(json)
# We've requested a LonLat from OpenLayers, so it gives us a point in
# Plate Carree (4326)
point = asShape(shape)
point.srid = 4326
slug = Almanac.name_almanac(name)
almanac = Almanac(name, slug)
almanac.location = point
meta.Session.save(almanac)
meta.Session.commit()
redirect_to(h.url_for('page_create', almanac_slug=slug))
def view(self, almanac_slug):
c.almanac = h.get_almanac_by_slug(almanac_slug)
loc = c.almanac.location_4326
c.lng, c.lat = loc.x, loc.y
page_idx = request.GET.get('page', 1)
try:
page_idx = int(page_idx)
except ValueError:
page_idx = 1
pages_query = meta.Session.query(Page).filter(Page.almanac_id==c.almanac.id).filter(Page.published == True).order_by(Page.modified.desc())
try:
c.next_page = pages_query[:1][0]
except IndexError:
pass
else:
c.next_page_url = h.url_for('page_view', almanac=c.almanac, page=c.next_page)
c.next_page_text = c.next_page.name
from webhelpers.paginate import Page as PaginationPage
per_page = 10
pagination = PaginationPage(pages_query, page=page_idx, items_per_page=per_page)
c.toc_pagination_data = h.pagination_data(pagination)
c.pages = pagination.items
c.npages = pagination.item_count
cur_page = pagination.page
return render('/almanac/view.mako')
@dispatch_on(POST='_search')
def search(self, almanac_slug, query):
c.almanac = h.get_almanac_by_slug(almanac_slug)
loc = c.almanac.location_4326
c.lng, c.lat = loc.x, loc.y
page_idx = request.GET.get('page', 1)
try:
page_idx = int(page_idx)
except ValueError:
page_idx = 1
c.pagination = h.setup_pagination(c.almanac.search(query), page_idx)
c.pages = c.pagination.items
c.npages = c.pagination.item_count
c.query = query
return render('/almanac/search.mako')
def _search(self, almanac_slug, query=''):
redirect_to(h.url_for('almanac_search', almanac_slug=almanac_slug, query=request.params.get('query','')))
@jsonify
def center(self, almanac_slug):
c.almanac = h.get_almanac_by_slug(almanac_slug)
loc = c.almanac.location
return dict(lat=loc.x, lng=loc.y)
def pages_kml(self, almanac_slug, query):
c.almanac = h.get_almanac_by_slug(almanac_slug)
if query:
c.pages = c.almanac.search(query).all()
else:
c.pages = c.almanac.pages
response.content_type = 'application/vnd.google-earth.kml+xml kml'
return render('/page/kml.mako')
|
Python
| 0
|
@@ -1549,16 +1549,71 @@
rt desc%0A
+from webhelpers.paginate import Page as PaginationPage%0A
import c
@@ -3652,71 +3652,8 @@
me%0A%0A
- from webhelpers.paginate import Page as PaginationPage%0A
|
55d7749ca06dda5c636aced8aed3f0bd9a3d6acc
|
Print tracebacks on exception
|
commands.py
|
commands.py
|
import asyncio
import sys
client = None
class Event: #Class to store event data
def __init__(self, f, triggerType, name, **kwargs):
self.handler = f
self.name=name
self.triggerType = triggerType
triggerHandlers = {
"\\message" : {},
"\\messageNoBot" : {},
"\\command" : {},
"\\commandNotFound" : {},
"\\channelUpdate" : {},
"\\botException" : {},
"\\set_root_context_on_load" : {},
"\\timeTick" : {},
"\\reactionChanged" : {},
"\\messageEdit": {}
}
helpString = ""
def messageHandlerFilter(triggerFilter, filterType="eq"):
def decorator(func):
if filterType=="eq":
async def handler(triggerMessage):
if triggerMessage.content == triggerFilter:
await func(triggerMessage)
return handler
elif filterType=="contains":
async def handler(triggerMessage):
if triggerFilter in triggerMessage.content:
await func(triggerMessage)
return handler
elif filterType=="cqc":
async def handler(triggerMessage):
if triggerFilter in triggerMessage.content.lower():
await func(triggerMessage)
return handler
return decorator
commandMutexes = []
def registerEventHandler(triggerType="\\command", name=None, helpText=None, exclusivity=None, **kwargs):
"""Decorator that registers event handlers
Stay tuned for kwargs
"""
def decorator(f):
if exclusivity == "global" and name is not None:
print("Global exclusive command " + name)
async def excluder(triggerMessage):
global commandMutexes
print(commandMutexes)
if name not in commandMutexes:
commandMutexes.append(name)
print("Locked " + name)
try:
await f(triggerMessage)
except:
raise
finally:
commandMutexes.remove(name)
print("Unlocked " + name)
else:
await triggerMessage.channel.send( "One at a time please")
event = Event(excluder, triggerType, name, **kwargs)
else:
event = Event(f, triggerType, name, **kwargs)
if triggerType in triggerHandlers:
if helpText is not None:
global helpString
helpString += helpText + "\n"
if name is not None:
if name in triggerHandlers[triggerType]:
print("Duplicate command")
triggerHandlers[triggerType].update({name : event})
else:
triggerHandlers[triggerType].update({f.__name__, event})
else:
print("Invalid trigger type registered")
return f
return decorator
async def executeEvent(triggerType="\\command", name=None, **kwargs):
if not triggerType in triggerHandlers:
print("Called with invalid event type " + triggerType)
return
if name is not None:
#print(triggerType + " " + name)
try:
if triggerType == "\\command" and not name in triggerHandlers[triggerType]:
await executeEvent(triggerType="\\commandNotFound", name=None, **kwargs)
else:
await triggerHandlers[triggerType][name].handler(**kwargs)
except:
if sys.exc_info()[0].__name__ != "RestartException":
print("Unexpected error:", sys.exc_info())
else:
raise
else:
for k, v in triggerHandlers[triggerType].items():
#print("Running " + k)
await v.handler(**kwargs)
|
Python
| 0.000001
|
@@ -20,16 +20,34 @@
ort sys%0D
+%0Aimport traceback%0D
%0A%0D%0Aclien
@@ -3799,16 +3799,74 @@
c_info()
+%5B1%5D)%0D%0A traceback.print_tb(sys.exc_info()%5B2%5D
)%0D%0A
|
f4f600a82d652d25cb7fe8fcc3bbea8efa49c0b8
|
update help
|
commands.py
|
commands.py
|
import requests
import random
import config
def help():
msg = """
*Commands:*
`/js <library>`: checks if library is cool or not
`/help`: return this
"""
return 'message', {'text': msg, 'parse_mode': 'Markdown'}
def js(string):
if string:
return 'message', {'text': '{} is gay'.format(string)}
else:
return None, None
def sadness():
url = 'https://api.tumblr.com/v2/blog/{}/posts/photo?api_key={}'\
.format('vaporwave.tumblr.com', config.TUMBLR_KEY)
r = requests.get(url)
if r.ok:
data = r.json()
post = random.choice(data['response']['posts'])
text = post['summary']
url = post['photos'][0]['original_size']['url']
return 'photo', {'photo': url, 'caption': text}
else:
return None, None
|
Python
| 0
|
@@ -73,18 +73,21 @@
*Command
-s:
+ list
*%0A%0A%60/js
@@ -96,17 +96,18 @@
ibrary%3E%60
-:
+ -
checks
@@ -139,17 +139,18 @@
%0A%60/help%60
-:
+ -
return
@@ -154,16 +154,33 @@
rn this%0A
+%60/sadness%60 - cry%0A
%22%22%22%0A
|
cf1728cd472b6675ce3e253ec350972c09e7c5f6
|
fix update-ordinals.py for posterity to reflect best scripting practices
|
scripts/update-ordinals.py
|
scripts/update-ordinals.py
|
import sys
import logging
from pylons import c
from ming.orm import session
from ming.orm.ormsession import ThreadLocalORMSession
from allura import model as M
log = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
log.addHandler(handler)
def main():
test = sys.argv[-1] == 'test'
num_projects_examined = 0
log.info('Examining all projects for mount order.')
for some_projects in chunked_project_iterator({}):
for project in some_projects:
c.project = project
mounts = project.ordered_mounts(include_search=True)
# ordered_mounts() means duplicate ordinals (if any) will be next to each other
duplicates_found = False
prev_ordinal = None
for mount in mounts:
if mount['ordinal'] == prev_ordinal:
duplicates_found = True
break
prev_ordinal = mount['ordinal']
if duplicates_found:
if test:
log.info('Would renumber mounts for project "%s".' % project.shortname)
else:
log.info('Renumbering mounts for project "%s".' % project.shortname)
for i, mount in enumerate(mounts):
if 'ac' in mount:
mount['ac'].options['ordinal'] = i
elif 'sub' in mount:
mount['sub'].ordinal = i
ThreadLocalORMSession.flush_all()
num_projects_examined += 1
session(project).clear()
log.info('%s projects examined.' % num_projects_examined)
PAGESIZE=1024
def chunked_project_iterator(q_project):
'''shamelessly copied from refresh-all-repos.py'''
page = 0
while True:
results = (M.Project.query
.find(q_project)
.skip(PAGESIZE*page)
.limit(PAGESIZE)
.all())
if not results: break
yield results
page += 1
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -184,28 +184,42 @@
ger(
-__name__)%0Ah
+'update-ordinals')%0Alog.addH
andler
- =
+(
logg
@@ -251,31 +251,8 @@
out)
-%0Alog.addHandler(handler
)%0A%0Ad
|
a9b7115f2dadf568317dfa26d5835c669133308e
|
implement builder + operator
|
simpleplotly/figure.py
|
simpleplotly/figure.py
|
# -*- coding: utf-8 -*-
import plotly.graph_objs as go
from plotly import tools
from .layout import ElementBuilder
from .plot import AtomBuilder
from .subplot import SubPlotSpec, PlotCanvas
import itertools
class FigureHolder(object):
def __init__(self, figure):
self.figure = figure
def plot(self, mode='offline'):
if mode == 'offline':
import plotly.offline as py
py.iplot(self.figure)
else:
raise ValueError('online not supported yet, please check later!')
def update_layout(self, **kwargs):
self.figure.layout.update(**kwargs)
return self
def drop_key_layout(self, key):
if key in self.figure.layout:
del self.figure.layout[key]
return self
class FigureBuilder(object):
def __init__(self, *builders):
self.builders = list(builders)
self.specs = [None for _ in range(len(builders))]
self.layout = {}
self.canvas = PlotCanvas()
def __add__(self,fig_builder,default_layout='blank'):
new_fig_builder = FigureBuilder()
new_fig_builder.builders.extend(self.builders)
new_fig_builder.builders.extend(fig_builder.builders)
if default_layout=='left':
new_fig_builder.layout=self.layout
elif default_layout=='right':
new_fig_builder.layout=fig_builder.layout
else:
new_fig_builder.layout={}
return new_fig_builder
def add(self, builder, row=None, col=None, row_span=1, col_span=1):
if isinstance(builder, AtomBuilder):
self.builders.append(builder)
spec = self._validated_spec(row, col, row_span, col_span)
if spec is not None:
self.canvas.occupy_area(spec)
self.specs.append(spec)
elif isinstance(builder, ElementBuilder):
builder(self.layout)
else:
raise ValueError('The type of builder is {} which is not known'.format(type(builder)))
return self
@staticmethod
def _validated_spec(row, col, row_span, col_span):
if row is None or col is None:
return None
else:
return SubPlotSpec(row, col, row_span, col_span)
def update_layout(self, **kwargs):
self.layout.update(kwargs)
return self
def build(self):
data = [b.data for b in self.builders]
layout = {}
for b in self.builders:
layout.update(b.layout)
layout.update(self.layout)
return FigureHolder(go.Figure(data=data, layout=layout))
def plot(self):
self.build().plot()
def build_subplot(self, print_grid=True, **kwargs):
fig = tools.make_subplots(
rows=self.canvas.max_rows, cols=self.canvas.max_cols,
specs=self.canvas.make_plotly_spec(),
print_grid=print_grid, **kwargs)
for idx, builder in enumerate(self.builders):
spec = self.specs[idx]
fig.append_trace(builder.data, spec.r, spec.c)
holder = FigureHolder(go.Figure(data=fig.data, layout=fig.layout))
holder.update_layout(**self.layout)
holder.drop_key_layout('xaxis').drop_key_layout('yaxis')
return holder
def subplot(self, row=None, col=None, print_grid=True, **kwargs):
if col is not None and row is not None:
self.specs = []
self.canvas = PlotCanvas()
for row, col in itertools.product(range(1, row + 1), range(1, col + 1)):
spec = self._validated_spec(row, col, row_span=1, col_span=1)
if spec is not None:
self.canvas.occupy_area(spec)
self.specs.append(spec)
self.build_subplot(print_grid=print_grid, **kwargs).plot()
|
Python
| 0.000004
|
@@ -1043,24 +1043,46 @@
t='blank'):%0A
+ # new builder%0A
new_
|
0c5daf77caaa6674a011b94ac06c308dbb430cc5
|
add PDF and TXT minetype
|
composer.py
|
composer.py
|
#!/usr/bin/python
from types import *
import db
import json
class Composer():
def __init__(self, output_format, db):
self.output_format = output_format
self.db = db
def compose(self, content, path):
if self.output_format == "JSON":
if type(content) is ListType: # directory list, DIR
return ("application/json; charset=UTF-8", json.dumps(content))
else:
return ("application/json; charset=UTF-8", json.dumps({
"content": content,
"last_modified": self.db.get_last_modified(path.split("/")),
}))
else:
if type(content) is ListType: # directory list, DIR
ret = "<HTML><BODY><UL>"
for entry in content:
if entry["type"] == db.Db.DIR:
name = entry["name"] + "/"
else:
name = entry["name"]
if path[-1] != "/":
path = path + "/"
link = "%s%s" % (path, entry["name"])
ret = ret + "<LI><A HREF='%s'>%s</A>" % (link, name)
ret = ret + "</UL></BODY></HTML>"
content_type = 'text/html'
# TODO: Use magic lib to tell the MIME type.
elif content.startswith('\xff\xd8\xff\xe0\x00\x10\x4a\x46\x49\x46'):
content_type = 'image/jpeg'
ret = content
elif content.startswith('\x00\x00\x00\x20\x66\x74\x79\x70\x69'):
content_type = 'video/mp4'
ret = content
elif content.startswith('\x52\x49\x46\x46'):
content_type = 'video/avi'
ret = content
elif content.startswith('\x89\x50\x4e\x47\x0d\x0a'):
content_type = 'image/png'
ret = content
elif path.endswith('.css'):
content_type = 'text/css'
ret = content
else:
content_type = 'text/html'
ret = content
return (content_type, ret)
|
Python
| 0
|
@@ -1643,47 +1643,238 @@
h('.
-css'):%0A content_type = 'text/css
+txt'):%0A content_type = 'text/plain'%0A ret = content%0A elif path.endswith('.css'):%0A content_type = 'text/css'%0A ret = content%0A elif path.endswith('.pdf'):%0A content_type = 'application/x-pdf
'%0A
|
e9cb40d1c044aa48beb221686df3cbfb47524a72
|
Remove unused import
|
statirator/blog/views.py
|
statirator/blog/views.py
|
from __future__ import absolute_import
from django.db.models import Count
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.base import TemplateView
from .models import Post, I18NTag
class PostView(DetailView):
models = Post
def get_queryset(self):
qs = Post.objects.filter(language=self.request.LANGUAGE_CODE)
return qs
class ArchiveView(ListView):
models = Post
def get_queryset(self):
qs = Post.objects.filter(language=self.request.LANGUAGE_CODE,
is_published=True).order_by('-pubdate')
return qs
class TagView(DetailView):
model = I18NTag
def get_object(self):
return I18NTag.objects.get(language=self.request.LANGUAGE_CODE,
slug_no_locale=self.kwargs['slug'])
def get_context_data(self, **kwargs):
ctx = super(TagView, self).get_context_data(**kwargs)
tag = ctx['object']
ctx['posts'] = Post.objects.filter(
language=self.request.LANGUAGE_CODE,
is_published=True,
tags__slug__in=[tag.slug]).order_by('-pubdate')
return ctx
class TagsView(TemplateView):
template_name = 'blog/i18ntag_list.html'
|
Python
| 0.000001
|
@@ -37,43 +37,8 @@
rt%0A%0A
-from django.db.models import Count%0A
from
|
c767ec0308d61c73b41bf20f9c5bf069238a04e4
|
change in the iteration state to make things slightly more readable.
|
src/python/Problem066.py
|
src/python/Problem066.py
|
'''
Created on Feb 21, 2012
@author: mchrzanowski
'''
from math import sqrt
from Problem064 import getQuotients
from Problem065 import findRationalApproximation
from time import time
LIMIT = 10 ** 3
def getConvergentPair(i, difference=1):
''' a generator that will return iterations of increasingly-precise numerator,denominator tuples
of sqrt(i) '''
quotientList = getQuotients(i) # get the quotient list. this method will only return up to one full period.
# we might need more to get convergence, though.
convergentState = 0 # expansion of the quotient list causes some state to be reset.
# save state because we don't want to send convergents that we've already sent
while True:
quotientList.extend(quotientList[1:]) # here, we just double the number of periods in the list. The first value is the integer portion.
numerators, denominators = findRationalApproximation(quotientList) # indexed numerator, denominator dicts
for j in xrange(convergentState, len(numerators) - 1): # these dicts are weirdly indexed; they begin at -1.
if numerators[j] ** 2 - i * denominators[j] ** 2 == difference:
yield numerators[j], denominators[j]
convergentState = j + 1
def main():
'''
http://en.wikipedia.org/wiki/Pell's_equation
here we use two methods from previous problems:
Problem064 dealt with getting the list of quotients for use in the continued fraction
Problem065 then dealt with constructing the continued fraction from these quotients
'''
start = time()
maxD = 0
maxX = 0
for i in xrange(1, LIMIT + 1):
if sqrt(i).is_integer(): continue # skip perfect squares.
numerator, denominator = getConvergentPair(i).next() # first pair will do as we want minimal solutions.
if numerator > maxX:
maxX = numerator
maxD = i
print "D <=", LIMIT, "that maximizes x:", maxD
end = time()
print "Runtime: ", end - start, " seconds."
if __name__ == '__main__':
main()
|
Python
| 0.000002
|
@@ -591,18 +591,18 @@
State =
-0
+-1
@@ -1140,16 +1140,20 @@
entState
+ + 1
, len(nu
@@ -1400,20 +1400,16 @@
tate = j
- + 1
%0A%0Adef ma
@@ -1719,27 +1719,8 @@
'''%0A
- start = time()%0A
@@ -2165,24 +2165,81 @@
, maxD%0A %0A
+if __name__ == '__main__':%0A start = time()%0A main()%0A
end = ti
@@ -2295,42 +2295,4 @@
s.%22%0A
-%0Aif __name__ == '__main__':%0A main()
|
ae95e372fd625c5fb78750675ff2eca2c8b41a82
|
Fix due date bug
|
library.py
|
library.py
|
# coding=utf-8
"""
GOAL: Use object-oriented Python to model a public library (w/ three classes:
Library, Shelf, & Book). The library should be aware of a number of shelves.
Each shelf should know what books it contains. Make the book object have
"enshelf" and "unshelf" methods that control what shelf the book is sitting on.
The library should have a have a method to report all books it contains. Note:
this should *not* be a Django (or any other) app - just a single file with
three classes (pls commands at the bottom showing it works) is all that is
needed. In addition to pushing this Python file to your Github account, please
also setup a repl.it account and enter the saved URL.
"""
import copy
import datetime
def set_due_date(days=0, weeks=2):
"""Set a due date based on time delta in days or weeks from today."""
today = datetime.date.today()
due_date = today + datetime.timedelta(days, weeks)
return due_date
class Library(object):
"""A class for a library."""
def __init__(self, name):
self.name = name
self.shelves = {}
# self.unshelved = {}
# self.checked_out = {}
def add_shelf(self, name):
shelf = Shelf(name, self)
self.shelves[name] = shelf
return shelf
def remove_shelf(self, shelf):
del self.shelves[shelf]
def report_books(self):
"""Report all books in library."""
for shelf in self.shelves.itervalues():
print("\n{shelf}".format(shelf=shelf.name))
for book in shelf.books.itervalues():
print(
"{:^20} | {:^20} | {:^10} | {}"
.format(book.book_id, book.author, book.status, book.due)
)
class Shelf(object):
"""A class for shelves in a library."""
def __init__(self, name, library=None):
self.name = name
self.library = library
self.books = {}
class Book(object):
"""A class for books."""
def __init__(self, title="", author="", copy=1, due=None, shelf=None,
**kwargs):
self.title = title
self.author = author
self.copy = copy
self.status = "Checked In"
self.due = due
self.shelf = shelf
self.last_shelf = shelf
self.details = kwargs
@property
def book_id(self):
return (
"{title} c.{copy}".format(title=self.title, copy=self.copy)
)
@property
def status(self):
today = datetime.date.today()
if self.due and self.due < today:
return "Overdue"
else:
return self._status
@status.setter
def status(self, status):
self._status = status
def enshelf(self, shelf):
"""Add a book to a shelf."""
if self.shelf == shelf:
print("Book already in shelf.")
elif self.book_id in shelf.books:
print("The same copy is already on the shelf!")
else:
self.unshelf()
self.shelf = shelf
shelf.books[self.book_id] = self
def unshelf(self):
"""Remove a book from its shelf."""
if self.shelf:
self.shelf.books.pop(self.book_id)
self.last_shelf = self.shelf
self.shelf = None
def reshelf(self):
"""Add a book to its last visited shelf."""
if self.last_shelf:
self.enshelf(self.last_shelf)
else:
print("Cannot reshelf: no previous shelf on record.")
def check_out(self, days=0, weeks=2):
"""Check out book from library."""
self.enshelf(checked_out)
self.status = "Checked Out"
self.due = set_due_date(days=days, weeks=weeks)
def check_in(self):
"""Check in book to library."""
self.reshelf()
self.status = "Checked In"
self.due = None
def latest_copy(self, search_lib=None):
"""Find the lastest copy of a book."""
library_to_search = search_lib or self.shelf.library
latest_copy = 1
for shelf in library_to_search.shelves.values():
for book in shelf.books.values():
if book.title == self.title and book.copy > latest_copy:
latest_copy = book.copy
return latest_copy
def add_copies(self, copy_num=1):
"""Add a given number of book copies to same location."""
for x in range(copy_num):
new_book_copy = copy.deepcopy(self)
new_book_copy.shelf = None
new_book_copy.copy = self.latest_copy() + 1
new_book_copy.enshelf(self.shelf)
return new_book_copy
library = Library("Lake City Public Library")
checked_out = library.add_shelf("Checked Out")
shelf1 = library.add_shelf("Shelf 1")
shelf2 = library.add_shelf("Shelf 2")
shelf3 = library.add_shelf("Shelf 3")
book1 = Book(
title="The Scar",
author="Virginia Wolfe",
call_num="332.024 R3903o 2015",
ISBN=9781591847557,
page_count=211
)
book2 = Book(
title="Winning",
author="Charlie Sheen",
call_num="364.989 R3903o 2010",
ISBN=9783434147557,
page_count=150
)
book3 = Book(
title="Wounded",
author="Jim Bush",
call_num="334.053 R3903o 2014",
ISBN=9781593434427,
page_count=100
)
book1.enshelf(shelf1)
book2.enshelf(shelf2)
book3.enshelf(shelf3)
|
Python
| 0.000024
|
@@ -904,26 +904,37 @@
medelta(days
-,
+=days, weeks=
weeks)%0A r
|
4532880091643d48bfb9df2a0a028d80c7db6864
|
Use same metavar for --scripts in up and provision
|
cloudenvy/commands/up.py
|
cloudenvy/commands/up.py
|
import logging
from cloudenvy import exceptions
import cloudenvy.envy
class Up(cloudenvy.envy.Command):
def _build_subparser(self, subparsers):
help_str = 'Create and optionally provision an ENVy.'
subparser = subparsers.add_parser('up', help=help_str,
description=help_str)
subparser.set_defaults(func=self.run)
subparser.add_argument('-n', '--name', action='store', default='',
help='Specify custom name for an ENVy.')
subparser.add_argument('-s', '--scripts', default=None, nargs='*',
help='Override provision_script_paths option '
'in project config.')
subparser.add_argument('--no-files', action='store_true',
help='Prevent files from being uploaded')
subparser.add_argument('--no-provision', action='store_true',
help='Prevent provision scripts from running.')
return subparser
def run(self, config, args):
envy = cloudenvy.envy.Envy(config)
if not envy.server():
logging.info('Triggering ENVy boot.')
try:
envy.build_server()
except exceptions.ImageNotFound:
logging.error('Could not find image.')
return
except exceptions.NoIPsAvailable:
logging.error('Could not find available IP.')
return
if not args.no_files:
self.commands['files'].run(config, args)
if not args.no_provision \
and (envy.project_config.get("auto_provision", True) \
and 'provision_scripts' in envy.project_config):
try:
self.commands['provision'].run(config, args)
except SystemExit:
raise SystemExit('You have not specified any provision '
'scripts in your Envyfile. '
'If you would like to run your ENVy '
'without a provision script; use the '
'`--no-provision` command line flag.')
if envy.ip():
print envy.ip()
else:
logging.error('Could not determine IP.')
|
Python
| 0
|
@@ -577,16 +577,74 @@
cripts',
+ nargs='*', metavar='PATH',%0A
default
@@ -649,27 +649,16 @@
lt=None,
- nargs='*',
%0A
|
d6b51c610ac6002217785107858eb4f749e6367b
|
Fix bug in project get_yaml method
|
clowder/model/project.py
|
clowder/model/project.py
|
"""Model representation of clowder.yaml project"""
import os
from clowder.utility.print_utilities import (
print_project_status,
print_validation,
print_verbose_status
)
from clowder.utility.git_utilities import (
git_groom,
git_stash,
git_is_dirty
)
from clowder.utility.git_utilities import (
git_clone_url_at_path,
git_current_sha,
git_herd,
git_herd_version,
git_validate_repo_state
)
class Project(object):
"""Model class for clowder.yaml project"""
def __init__(self, root_directory, project, defaults, sources):
self.root_directory = root_directory
self.name = project['name']
self.path = project['path']
if 'ref' in project:
self.ref = project['ref']
else:
self.ref = defaults['ref']
if 'remote' in project:
self.remote_name = project['remote']
else:
self.remote_name = defaults['remote']
if 'source' in project:
source_name = project['source']
else:
source_name = defaults['source']
for source in sources:
if source.name == source_name:
self.source = source
self.url = self.source.get_url_prefix() + self.name + ".git"
def full_path(self):
"""Return full path to project"""
return os.path.join(self.root_directory, self.path)
def get_yaml(self):
"""Return python object representation for saving yaml"""
return {'name': self.name,
'path': self.path,
'ref': git_current_sha(self.full_path()),
'remote': self.source.name}
def groom(self):
"""Discard changes for project"""
if self.is_dirty():
self._print_status()
git_groom(self.full_path())
def herd(self):
"""Clone project or update latest from upstream"""
self._print_status()
git_herd(self.full_path(), self.ref, self.remote_name, self.url)
def herd_version(self, version):
"""Check out fixed version of project"""
self._print_status()
if not os.path.isdir(os.path.join(self.full_path(), '.git')):
git_clone_url_at_path(self.url, self.full_path(), self.ref, self.remote_name)
git_herd_version(self.full_path(), version, self.ref)
def is_dirty(self):
"""Check if project is dirty"""
return git_is_dirty(self.full_path())
def meow(self):
"""Print status for project"""
self._print_status()
def meow_verbose(self):
"""Print verbose status for project"""
self._print_status()
print_verbose_status(self.full_path())
def stash(self):
"""Stash changes for project if dirty"""
if self.is_dirty():
self._print_status()
git_stash(self.full_path())
def is_valid(self):
"""Validate status of project"""
return git_validate_repo_state(self.full_path())
def _print_status(self):
"""Print formatted project status"""
print_project_status(self.root_directory, self.path, self.name)
def print_validation(self):
"""Print validation message for project"""
if not self.is_valid():
self._print_status()
print_validation(self.full_path())
|
Python
| 0
|
@@ -1641,16 +1641,60 @@
remote':
+ self.remote_name,%0A 'source':
self.so
|
e8055b821cc534b10964f670d3b41af594bd19cf
|
Fix for makefile with coverage
|
setupext/build_makefile.py
|
setupext/build_makefile.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from setuptools.command.build_ext import build_ext
# This setup target constructs a prototype Makefile suitable for compiling
# the _jpype extension module. It is intended to help with development
# of the extension library on unix systems.
#
# To create a Makefile use
# python setup.py build_makefile
#
# Then edit with the desired options
class FeatureNotice(Warning):
""" indicate notices about features """
class Makefile(object):
def __init__(self, actual):
self.actual = actual
self.compile_command = None
self.compile_pre = None
self.compile_post = None
self.objects = []
self.sources = []
def captureCompile(self, x):
command = x[0]
x = x[1:]
includes = [i for i in x if i.startswith("-I")]
x = [i for i in x if not i.startswith("-I")]
i0 = None
i1 = None
for i, v in enumerate(x):
if v == '-c':
i1 = i
elif v == '-o':
i0 = i
pre = set(x[:i1])
post = x[i0+2:]
self.compile_command = command
self.compile_pre = pre
self.compile_post = post
self.includes = includes
self.sources.append(x[i1+1])
def captureLink(self, x):
self.link_command = x[0]
x = x[1:]
self.library = x[-1]
print(x[-3:])
x = x[:-3]
self.objects = [i for i in x if i.endswith(".o")]
self.link_options = [i for i in x if not i.endswith(".o")]
u = self.objects[0].split("/")
self.build_dir = "/".join(u[:2])
def compile(self, *args, **kwargs):
self.actual.spawn = self.captureCompile
return self.actual.compile(*args, **kwargs)
def link_shared_object(self, *args, **kwargs):
self.actual.spawn = self.captureLink
return self.actual.link_shared_object(*args, **kwargs)
def detect_language(self, x):
return self.actual.detect_language(x)
def write(self):
library = os.path.basename(self.library)
link_command = self.link_command
compile_command = self.compile_command
compile_pre = " ".join(list(self.compile_pre))
compile_post = " ".join(list(self.compile_post))
build = self.build_dir
link_flags = " ".join(self.link_options)
includes = " ".join(self.includes)
sources = " \\\n ".join(self.sources)
with open("Makefile", "w") as fd:
print("LIB = %s" % library, file=fd)
print("CC = %s" % compile_command, file=fd)
print("LINK = %s" % link_command, file=fd)
print("CFLAGS = %s %s" % (compile_pre, compile_post), file=fd)
print("INCLUDES = %s" % includes, file=fd)
print("BUILD = %s" % build, file=fd)
print("LINKFLAGS = %s" % link_flags, file=fd)
print("SRCS = %s" % sources, file=fd)
print("""
all: $(LIB)
rwildcard=$(foreach d,$(wildcard $(1:=/*)),$(call rwildcard,$d,$2) $(filter $(subst *,%,$2),$d))
build/src/jp_thunk.cpp: $(call rwildcard,native/java,*.java)
python setup.py build_thunk
DEPDIR = build/deps
$(DEPDIR): ; @mkdir -p $@
DEPFILES := $(SRCS:%.cpp=$(DEPDIR)/%.d)
deps: $(DEPFILES)
%/:
echo $@
$(DEPDIR)/%.d: %.cpp
mkdir -p $(dir $@)
$(CC) $(INCLUDES) -MT $(patsubst $(DEPDIR)%,'$$(BUILD)%',$(patsubst %.d,%.o,$@)) -MM $< -o $@
OBJS = $(addprefix $(BUILD)/, $(SRCS:.cpp=.o))
$(BUILD)/%.o: %.cpp
mkdir -p $(dir $@)
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(LIB): $(OBJS)
$(LINK) $(LINKFLAGS) $(OBJS) -ldl -o $@
-include $(DEPFILES)
""", file=fd)
# Customization of the build_ext
class BuildMakefileCommand(build_ext):
"""
Override some behavior in extension building:
1. handle compiler flags for different compilers via a dictionary.
2. try to disable warning -Wstrict-prototypes is valid for C/ObjC but not for C++
"""
# extra compile args
copt = {'msvc': [],
'unix': ['-ggdb', ],
'mingw32': [],
}
# extra link args
lopt = {
'msvc': [],
'unix': [],
'mingw32': [],
}
def initialize_options(self, *args):
"""omit -Wstrict-prototypes from CFLAGS since its only valid for C code."""
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
replacement = {
'-Wstrict-prototypes': '',
'-Wimplicit-function-declaration': '',
}
replacement['-O3'] = '-O0'
for k, v in cfg_vars.items():
if not isinstance(v, str):
continue
if not k == "OPT" and not "FLAGS" in k:
continue
for r, t in replacement.items():
if v.find(r) != -1:
v = v.replace(r, t)
cfg_vars[k] = v
build_ext.initialize_options(self)
def _set_cflags(self):
# set compiler flags
c = self.compiler.compiler_type
if c == 'unix' and self.distribution.enable_coverage:
self.extensions[0].extra_compile_args.extend(
['-O0', '--coverage', '-ftest-coverage'])
self.extensions[0].extra_link_args.extend(['--coverage'])
if c in self.copt:
for e in self.extensions:
e.extra_compile_args.extend(self.copt[c])
if c in self.lopt:
for e in self.extensions:
e.extra_link_args.extend(self.lopt[c])
def build_extensions(self):
# We need to create the thunk code
self.run_command("build_java")
self.run_command("build_thunk")
jpypeLib = self.extensions[0]
tracing = self.distribution.enable_tracing
self._set_cflags()
if tracing:
jpypeLib.define_macros.append(('JP_TRACING_ENABLE', 1))
coverage = self.distribution.enable_coverage
if coverage:
jpypeLib.define_macros.append(('JP_INSTRUMENTATION', 1))
self.compiler = Makefile(self.compiler)
# has to be last call
build_ext.build_extensions(self)
self.compiler.write()
def __init__(self, *args):
build_ext.__init__(self, *args)
|
Python
| 0
|
@@ -1380,32 +1380,58 @@
x = x%5B1:%5D%0A
+ i = x.index(%22-o%22)%0A
self.lib
@@ -1439,17 +1439,18 @@
ary = x%5B
--
+i+
1%5D%0A
@@ -1456,39 +1456,32 @@
-print(x%5B-3:%5D)%0A x = x%5B:-3
+del x%5Bi%5D%0A del x%5Bi
%5D%0A
|
da3c550bfd935fcdf18c04c021b3ff0cd33e13b3
|
Fix show_launcher logic
|
sugar/activity/bundle.py
|
sugar/activity/bundle.py
|
import logging
import os
from ConfigParser import ConfigParser
class Bundle:
"""Info about an activity bundle. Wraps the activity.info file."""
def __init__(self, path):
self._name = None
self._icon = None
self._service_name = None
self._show_launcher = False
self._valid = True
self._path = path
self._activity_version = 0
info_path = os.path.join(path, 'activity', 'activity.info')
if os.path.isfile(info_path):
self._parse_info(info_path)
else:
self._valid = False
def _parse_info(self, info_path):
cp = ConfigParser()
cp.read([info_path])
section = 'Activity'
if cp.has_option(section, 'service_name'):
self._service_name = cp.get(section, 'service_name')
else:
self._valid = False
logging.error('%s must specify a service name' % self._path)
if cp.has_option(section, 'name'):
self._name = cp.get(section, 'name')
else:
self._valid = False
logging.error('%s must specify a name' % self._path)
if cp.has_option(section, 'exec'):
self._exec = cp.get(section, 'exec')
else:
self._valid = False
logging.error('%s must specify an exec' % self._path)
if cp.has_option(section, 'show_launcher'):
if cp.get(section, 'show_launcher') == 'yes':
self._show_launcher = True
if cp.has_option(section, 'icon'):
self._icon = cp.get(section, 'icon')
if cp.has_option(section, 'activity_version'):
self._activity_version = int(cp.get(section, 'activity_version'))
def is_valid(self):
return self._valid
def get_path(self):
"""Get the activity bundle path."""
return self._path
def get_name(self):
"""Get the activity user visible name."""
return self._name
def get_service_name(self):
"""Get the activity service name"""
return self._service_name
def get_icon(self):
"""Get the activity icon name"""
return self._icon
def get_activity_version(self):
"""Get the activity version"""
return self._activity_version
def get_exec(self):
"""Get the command to execute to launch the activity factory"""
return self._exec
def get_show_launcher(self):
"""Get whether there should be a visible launcher for the activity"""
return self._show_launcher
# Compatibility with the old activity registry, remove after BTest-1
def get_id(self):
return self._service_name
|
Python
| 0.000002
|
@@ -1219,11 +1219,10 @@
== '
-yes
+no
':%0A%09
@@ -1246,19 +1246,20 @@
ncher =
-Tru
+Fals
e%0A%0A%09%09if
|
a21c0ec3ee4518f9072fbc5181fb419b1973afd3
|
Fix style #373
|
shinken/reactionnerlink.py
|
shinken/reactionnerlink.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.satellitelink import SatelliteLink, SatelliteLinks
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
class ReactionnerLink(SatelliteLink):
"""Please Add a Docstring to describe the class here"""
id = 0
my_type = 'reactionner'
properties = SatelliteLink.properties.copy()
properties.update({
'reactionner_name': StringProp(fill_brok=['full_status'], to_send=True),
'port': IntegerProp(default='7769', fill_brok=['full_status']),
'passive' : BoolProp(default='0', fill_brok=['full_status'], to_send=True),
'min_workers': IntegerProp(default='1', fill_brok=['full_status'], to_send=True),
'max_workers': IntegerProp(default='30', fill_brok=['full_status'], to_send=True),
'processes_by_worker': IntegerProp(default='256', fill_brok=['full_status'], to_send=True),
'reactionner_tags': ListProp(default='None', to_send=True),
})
def get_name(self):
return self.reactionner_name
def register_to_my_realm(self):
self.realm.reactionners.append(self)
class ReactionnerLinks(SatelliteLinks):#(Items):
"""Please Add a Docstring to describe the class here"""
name_property = "reactionner_name"
inner_class = ReactionnerLink
|
Python
| 0
|
@@ -1164,18 +1164,9 @@
%22%22%22%0A
- %0A
%0A
+
@@ -1353,17 +1353,16 @@
d=True),
-
%0A
@@ -1455,18 +1455,18 @@
passive'
-
:
+
@@ -1901,21 +1901,17 @@
%0A %7D)%0A
-
%0A
+
def
@@ -1964,17 +1964,16 @@
r_name%0A%0A
-%0A
def
@@ -2047,17 +2047,16 @@
self)%0A%0A%0A
-%0A
class Re
@@ -2156,20 +2156,16 @@
here%22%22%22%0A
-
%0A nam
|
8d59abab151cbdecb623b7b184dade6193144497
|
Store Group enums as strings in Database (internally still as enums)
|
sigma_core/models/group.py
|
sigma_core/models/group.py
|
from django.db import models
class Group(models.Model):
class Meta:
pass
VIS_PUBLIC = 0
VIS_PRIVATE = 1
VISIBILITY_CHOICES = (
(VIS_PUBLIC, 'PUBLIC'),
(VIS_PRIVATE, 'PRIVATE')
)
MEMBER_ANYONE = 0
MEMBER_REQUEST = 1
MEMBER_INVITATION = 2
MEMBERSHIP_CHOICES = (
(MEMBER_ANYONE, 'ANYONE'),
(MEMBER_REQUEST, 'REQUEST'),
(MEMBER_INVITATION, 'INVITATION')
)
VALID_ADMINS = 0
VALID_MEMBERS = 1
VALIDATION_CHOICES = (
(VALID_ADMINS, 'ADMINS'),
(VALID_MEMBERS, 'MEMBERS')
)
TYPE_BASIC = 0
TYPE_CURSUS = 1
TYPE_ASSO = 2
TYPE_PROMO = 3
TYPE_SCHOOL = 4
TYPE_CHOICES = (
(TYPE_BASIC, 'BASIC'),
(TYPE_CURSUS, 'CURSUS/DEPARTMENT'),
(TYPE_ASSO, 'ASSOCIATION'),
(TYPE_PROMO, 'PROMOTION'),
(TYPE_SCHOOL, 'SCHOOL')
)
name = models.CharField(max_length=254)
visibility = models.SmallIntegerField(choices=VISIBILITY_CHOICES, default=VIS_PRIVATE)
membership_policy = models.SmallIntegerField(choices=MEMBERSHIP_CHOICES, default=MEMBER_INVITATION)
validation_policy = models.SmallIntegerField(choices=VALIDATION_CHOICES, default=VALID_ADMINS)
type = models.SmallIntegerField(choices=TYPE_CHOICES, default=TYPE_BASIC)
def __str__(self):
return "%s (%s)" % (self.name, self.get_type_display())
|
Python
| 0
|
@@ -100,31 +100,63 @@
LIC
-= 0%0A VIS_PRIVATE = 1
+ = 'public'%0A VIS_PRIVATE = 'private'
%0A
@@ -201,22 +201,40 @@
UBLIC, '
-PUBLIC
+Anyone can see the group
'),%0A
@@ -252,23 +252,36 @@
IVATE, '
-PRIVATE
+Group is not visible
')%0A )
@@ -300,19 +300,32 @@
_ANYONE
-= 0
+ = 'anyone'
%0A MEM
@@ -336,19 +336,32 @@
REQUEST
-= 1
+ = 'request'
%0A MEM
@@ -375,19 +375,37 @@
ITATION
-= 2
+ = 'upon_invitation'
%0A MEM
@@ -450,21 +450,40 @@
YONE, 'A
-NYONE
+nyone can join the group
'),%0A
@@ -508,58 +508,116 @@
T, '
-REQUEST'),%0A (MEMBER_INVITATION, 'INVITATION
+Anyone can request to join the group'),%0A (MEMBER_INVITATION, 'Can join the group only upon invitation
')%0A
@@ -639,19 +639,33 @@
_ADMINS
-= 0
+ = 'admins'
%0A VAL
@@ -675,19 +675,33 @@
MEMBERS
-= 1
+ = 'members'
%0A VAL
@@ -748,155 +748,344 @@
S, '
-ADMINS'),%0A (VALID_MEMBERS, 'MEMBERS')%0A )%0A%0A TYPE_BASIC = 0%0A TYPE_CURSUS = 1%0A TYPE_ASSO = 2%0A TYPE_PROMO = 3%0A TYPE_SCHOOL = 4
+Only admins can accept join requests or invite members'),%0A (VALID_MEMBERS, 'Every member can accept join requests or invite members')%0A )%0A%0A TYPE_BASIC = 'basic'%0A TYPE_CURSUS = 'cursus'%0A TYPE_ASSO = 'association'%0A TYPE_PROMO = 'school_promotion'%0A TYPE_SCHOOL = 'school'
%0A
@@ -1124,21 +1124,28 @@
BASIC, '
-BASIC
+Simple group
'),%0A
@@ -1168,24 +1168,27 @@
, 'C
-URSUS/DEPARTMENT
+ursus or department
'),%0A
@@ -1213,18 +1213,18 @@
, 'A
-SSOCIATION
+ssociation
'),%0A
@@ -1249,17 +1249,24 @@
O, '
-PROMOTION
+School promotion
'),%0A
@@ -1289,21 +1289,21 @@
HOOL, 'S
-CHOOL
+chool
')%0A )
|
5e66929d051047385cce9d7e910ce02b61fa1afe
|
Use joined loading for follower lists
|
skylines/model/follower.py
|
skylines/model/follower.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy import ForeignKey, Column
from sqlalchemy.types import Integer, DateTime
from sqlalchemy.orm import relationship
from .base import DeclarativeBase
from .session import DBSession
class Follower(DeclarativeBase):
__tablename__ = 'followers'
id = Column(Integer, autoincrement=True, primary_key=True)
source_id = Column(
Integer, ForeignKey('tg_user.id', ondelete='CASCADE'), index=True)
source = relationship(
'User', foreign_keys=[source_id], backref='following')
destination_id = Column(
Integer, ForeignKey('tg_user.id', ondelete='CASCADE'), index=True)
destination = relationship(
'User', foreign_keys=[destination_id], backref='followers')
time = Column(DateTime, nullable=False, default=datetime.utcnow)
@classmethod
def follows(cls, source, destination):
return cls.query(source=source, destination=destination).count() > 0
@classmethod
def follow(cls, source, destination):
f = cls.query(source=source, destination=destination).first()
if not f:
f = Follower(source=source, destination=destination)
DBSession.add(f)
@classmethod
def unfollow(cls, source, destination):
cls.query(source=source, destination=destination).delete()
|
Python
| 0
|
@@ -539,24 +539,47 @@
%5Bsource_id%5D,
+%0A lazy='joined',
backref='fo
@@ -771,16 +771,39 @@
ion_id%5D,
+%0A lazy='joined',
backref
|
244d46b9cfb3d3420979dd3d747b20ab384707d3
|
Fix ImportError in test
|
smbackend/test/conftest.py
|
smbackend/test/conftest.py
|
import os
import json
import pytest
import haystack
from django.utils import timezone
from django.conf import settings
from django.core.management import call_command
from services.models import Unit, UnitConnection, Organization, Service
def read_config(name):
return json.load(open(
os.path.join(
settings.BASE_DIR,
'smbackend',
'elasticsearch/{}.json'.format(name))))
TEST_INDEX = {
'default': {
'ENGINE': 'multilingual_haystack.backends.MultilingualSearchEngine',
},
'default-fi': {
'ENGINE': 'multilingual_haystack.backends.LanguageSearchEngine',
'BASE_ENGINE': 'multilingual_haystack.custom_elasticsearch_search_backend.CustomEsSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'servicemap-fi-test',
'MAPPINGS': read_config('mappings_finnish')['modelresult']['properties'],
'SETTINGS': read_config('settings_finnish')
},
'default-sv': {
'ENGINE': 'multilingual_haystack.backends.LanguageSearchEngine',
'BASE_ENGINE': 'multilingual_haystack.custom_elasticsearch_search_backend.CustomEsSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'servicemap-sv-test',
},
'default-en': {
'ENGINE': 'multilingual_haystack.backends.LanguageSearchEngine',
'BASE_ENGINE': 'multilingual_haystack.custom_elasticsearch_search_backend.CustomEsSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'servicemap-en-test',
},
}
@pytest.fixture
def haystack_test():
""" Set up testing haystack. """
settings.HAYSTACK_CONNECTIONS = TEST_INDEX
haystack.connections.reload('default')
yield haystack
call_command('clear_index', interactive=False, verbosity=0)
@pytest.fixture
def db_content():
""" Generate some content to test against """
s = Service(id=1, name='Kirjasto', unit_count=0, last_modified_time=timezone.now())
s.save()
o = Organization(id=1, name="Helsingin kaupunki")
o.save()
u = Unit(id=27586,
provider_type=1,
organization=o,
origin_last_modified_time=timezone.now(),
name='Kallion kirjasto',
description='Kirjasto kallion keskustassa',
street_address='Arentikuja 3')
u.save()
u.services.add(s)
uc = UnitConnection(unit=u, name='John Doe', phone='040 123 1234', type=999)
uc.save()
call_command('update_index', interactive=False, verbosity=0)
return {'service': s, 'unit': u}
|
Python
| 0.000003
|
@@ -224,23 +224,28 @@
zation,
-Service
+OntologyWord
%0A%0A%0Adef r
@@ -346,33 +346,8 @@
IR,%0A
- 'smbackend',%0A
@@ -1865,15 +1865,20 @@
s =
-Service
+OntologyWord
(id=
@@ -2534,9 +2534,8 @@
it': u%7D%0A
-%0A
|
40f4f721b59108a929ca0f8a8f9df6619ebccea3
|
Fix persona backend
|
social/backends/persona.py
|
social/backends/persona.py
|
"""
BrowserID support
"""
from social.backends.base import BaseAuth
from social.exceptions import AuthFailed, AuthMissingParameter
class PersonaAuth(BaseAuth):
"""BrowserID authentication backend"""
name = 'persona'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
return details['email']
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': 'name@server.com',
# 'issuer': 'browserid.org'}
email = response['email']
return {'username': email.split('@', 1)[0],
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
def extra_data(self, user, uid, response, details):
"""Return users extra data"""
return {'audience': response['audience'],
'issuer': response['issuer']}
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not 'assertion' in self.data:
raise AuthMissingParameter(self, 'assertion')
response = self.get_json('https://browserid.org/verify', params={
'assertion': self.data['assertion'],
'audience': self.strategy.request_host()
})
if response.get('status') == 'failure':
raise AuthFailed(self)
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
|
Python
| 0.000002
|
@@ -1321,14 +1321,12 @@
y',
-params
+data
=%7B%0A
@@ -1435,16 +1435,31 @@
%7D
+, method='POST'
)%0A
|
538bae320ba46764fbe8cce3aef19f22ddd1b1ec
|
Add simple Lede to ClustersInJSON
|
articulate/clustering/clusterformats.py
|
articulate/clustering/clusterformats.py
|
"""Clusters for Humans"""
import itertools
import simplejson as json
from articulate.pymotherlode.api import *
def clustersToJSON(articles, assignments, insertContent):
tag = "kmeans"
clusters = list(set(assignments))
clustersForHumans = []
if insertContent:
print "Inserting content into ClusterInJSON"
else:
print "Not inserting content into ClusterInJSON"
for i in clusters:
articlesInCluster = []
for j, cluster in enumerate(assignments):
if cluster == i:
if insertContent:
#With Content
articlesInCluster.append({'title':articles[j].title, 'feed_title':articles[j].feed_title, 'link':articles[j].link, 'author':articles[j].author, 'content':articles[j].content, 'updated_at':articles[j].updated_at})
else:
#And Without
articlesInCluster.append({'title':articles[j].title, 'feed_title':articles[j].feed_title, 'link':articles[j].link, 'author':articles[j].author, 'updated_at':articles[j].updated_at})
clustersForHumans.append({'cluster': i,'articles':articlesInCluster})
storeCluster(json.dumps(clustersForHumans),tag)
#if __name__ == "__main__":
# hello()
# articles = []
# assignments = []
# clustersToJSON(articles,assignments)
|
Python
| 0.000002
|
@@ -106,16 +106,201 @@
port *%0A%0A
+def getLede(content):%0A%09#ledeRE = re.compile('%5E(.*?(?%3C!%5Cb%5Cw)%5B.?!%5D)%5Cs+%5BA-Z0-9%5D')%0A%09#ledes = ledeRE.match(content)%0A%09#return ledes.group(0)%0A%09lede = content%5B:50%5D%0A%09lede += %22...%22%0A%09return lede%0A%0A
def clus
@@ -660,16 +660,139 @@
r == i:%0A
+%09%09%09%09#try:%0A%09%09%09%09#%09lede = getLede(articles%5Bj%5D.content)%0A%09%09%09%09#except:%0A%09%09%09%09#%09lede = ''%0A%0A%09%09%09%09lede = getLede(articles%5Bj%5D.content)%0A%0A
%09%09%09%09if i
@@ -972,16 +972,29 @@
.author,
+ 'lede':lede,
'conten
@@ -1231,16 +1231,29 @@
.author,
+ 'lede':lede,
'update
|
223ccc18239d7417a19fb0ec29da6bc5579949fa
|
Add muse to the list of tested returned instruments
|
astroquery/eso/tests/test_eso_remote.py
|
astroquery/eso/tests/test_eso_remote.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import tempfile
import shutil
from astropy.tests.helper import pytest, remote_data
try:
import keyring
HAS_KEYRING = True
except ImportError:
HAS_KEYRING = False
try:
from ...eso import Eso
ESO_IMPORTED = True
except ImportError:
ESO_IMPORTED = False
from ...exceptions import LoginError
SKIP_TESTS = not(HAS_KEYRING and ESO_IMPORTED)
@pytest.mark.skipif('SKIP_TESTS')
@remote_data
class TestEso:
@pytest.fixture()
def temp_dir(self, request):
my_temp_dir = tempfile.mkdtemp()
def fin():
shutil.rmtree(my_temp_dir)
request.addfinalizer(fin)
return my_temp_dir
def test_SgrAstar(self, temp_dir):
eso = Eso()
eso.cache_location = temp_dir
instruments = eso.list_instruments()
# in principle, we should run both of these tests
# result_i = eso.query_instrument('midi', target='Sgr A*')
# Equivalent, does not depend on SESAME:
result_i = eso.query_instrument('midi', coord1=266.41681662, coord2=-29.00782497)
surveys = eso.list_surveys()
# result_s = eso.query_survey('VVV', target='Sgr A*')
# Equivalent, does not depend on SESAME:
result_s = eso.query_survey('VVV', coord1=266.41681662, coord2=-29.00782497)
assert 'midi' in instruments
assert result_i is not None
assert 'VVV' in surveys
assert result_s is not None
assert 'Object' in result_s.colnames
assert 'b333' in result_s['Object']
def test_nologin(self):
# WARNING: this test will fail if you haven't cleared your cache and
# you have downloaded this file!
eso = Eso()
with pytest.raises(LoginError) as exc:
eso.retrieve_data('AMBER.2006-03-14T07:40:19.830')
assert exc.value.args[0] == "If you do not pass a username to login(), you should configure a default one!"
def test_empty_return(self):
# test for empty return with an object from the North
eso = Eso()
surveys = eso.list_surveys()
# result_s = eso.query_survey(surveys[0], target='M51')
# Avoid SESAME
result_s = eso.query_survey(surveys[0], coord1=202.469575, coord2=47.195258)
assert result_s is None
def test_SgrAstar_remotevslocal(self, temp_dir):
eso = Eso()
# Remote version
instruments = eso.list_instruments()
# result1 = eso.query_instrument(instruments[0], target='Sgr A*')
result1 = eso.query_instrument(instruments[0], coord1=266.41681662, coord2=-29.00782497)
# Local version
eso.cache_location = temp_dir
instruments = eso.list_instruments()
# result2 = eso.query_instrument(instruments[0], target='Sgr A*')
result2 = eso.query_instrument(instruments[0], coord1=266.41681662, coord2=-29.00782497)
assert result1 == result2
def test_list_instruments(self):
# If this test fails, we may simply need to update it
inst = Eso.list_instruments()
assert inst == [u'fors1', u'fors2', u'vimos', u'omegacam', u'hawki', u'isaac',
u'naco', u'visir', u'vircam', u'apex', u'uves', u'giraffe',
u'xshooter', u'crires', u'kmos', u'sinfoni', u'amber',
u'midi', u'harps']
# REQUIRES LOGIN!
# Can we get a special login specifically for astroquery testing?
# def test_data_retrieval():
#
# data_product_id = 'AMBER.2006-03-14T07:40:03.741'
# data_files = eso.retrieve_data([data_product_id])
# # How do we know if we're going to get .fits or .fits.Z?
# assert 'AMBER.2006-03-14T07:40:03.741.fits' in data_files[0]
@pytest.mark.xfail
def test_retrieve_data(self):
eso = Eso()
eso.login()
result = eso.retrieve_data("MIDI.2014-07-25T02:03:11.561")
assert len(result)>0
assert "MIDI.2014-07-25T02:03:11.561" in result[0]
|
Python
| 0
|
@@ -3308,16 +3308,25 @@
hooter',
+ u'muse',
u'crire
|
273a0b264955780dbc0341a46936e2cc8941f5cd
|
Simplify get_name
|
binstar_client/utils/projects/models.py
|
binstar_client/utils/projects/models.py
|
import inspect
import os
import tarfile
from tempfile import SpooledTemporaryFile
from binstar_client.errors import BinstarError
class CondaProject(object):
# TODO: This class will be moved into Anaconda-Project
def __init__(self, project_path, *args, **kwargs):
self.project_path = project_path
self._name = None
self._tar = None
self._size = None
self.pfiles = []
self.metadata = {
'summary': kwargs.get('summary', None),
'description': kwargs.get('description', None),
'version': kwargs.get('version', None)
}
self.metadata = dict((k, v) for k, v in self.metadata.items() if v)
def tar_it(self, fd=SpooledTemporaryFile()):
with tarfile.open(mode='w', fileobj=fd) as tar:
for pfile in self.pfiles:
tar.add(pfile.fullpath, arcname=pfile.relativepath)
fd.seek(0)
self._tar = fd
return fd
def to_project_creation(self):
return {
'name': self.name,
'access': 'public',
'profile': {
'description': self.metadata.get('description', ''),
'summary': self.metadata.get('summary', ''),
}
}
def to_stage(self):
return {
'basename': self.basename,
'configuration': self.configuration,
'size': self.size
}
@property
def tar(self):
if self._tar is None:
self.tar_it()
return self._tar
@property
def configuration(self):
output = self.metadata.get('configuration', {})
output.update({
'size': self.size,
'num_of_files': len(self.pfiles)
})
return output
@property
def basename(self):
return "{}.tar".format(self.name)
@property
def size(self):
if self._size is None:
spos = self._tar.tell()
self._tar.seek(0, os.SEEK_END)
self._size = self._tar.tell() - spos
self._tar.seek(spos)
return self._size
@property
def name(self):
if self._name is None:
self._name = self._get_project_name()
return self._name
def _get_project_name(self):
if self.project_path == ".":
return os.path.basename(os.path.abspath("."))
if os.path.isdir(self.project_path):
return os.path.basename(self.project_path)
else:
return os.path.splitext(os.path.basename(self.project_path))[0]
class PFile(object):
def __init__(self, **kwargs):
self.fullpath = kwargs.get('fullpath', None)
self.basename = kwargs.get('basename', None)
self.relativepath = kwargs.get('relativepath', None)
self.size = kwargs.get('size', None)
self.populate()
def __str__(self):
if self.is_dir():
return self.relativepath
else:
return "[{}] {}".format(self.size, self.relativepath)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.fullpath == other.fullpath
def is_dir(self):
return os.path.isdir(self.fullpath)
def validate(self, validator):
if inspect.isfunction(validator):
return validator(basename=self.basename,
relativepath=self.relativepath,
fullpath=self.fullpath)
elif inspect.isclass(validator):
return validator(self)()
raise BinstarError("Invalid validator {}".format(validator))
def populate(self):
if self.size is None:
self.size = os.stat(self.fullpath).st_size
if self.basename is None:
self.basename = os.path.basename(self.fullpath)
def to_dict(self):
return {
'basename': self.basename,
'size': self.size,
'relativepath': self.relativepath
}
|
Python
| 0.011797
|
@@ -2286,24 +2286,38 @@
%0A if
+os.path.isdir(
self.project
@@ -2325,15 +2325,9 @@
path
- == %22.%22
+)
:%0A
@@ -2343,17 +2343,16 @@
return
-
os.path.
@@ -2380,95 +2380,8 @@
ath(
-%22.%22))%0A if os.path.isdir(self.project_path):%0A return os.path.basename(
self
@@ -2389,24 +2389,25 @@
project_path
+)
)%0A el
|
f15ebf385bfc6ac706b2344db12a7be9967540ef
|
Test for symm.symmetrize_space
|
symm/test/test_addons.py
|
symm/test/test_addons.py
|
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf.symm import addons
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'cc-pvdz',
symmetry = 1,
)
mf = scf.RHF(mol)
mf.scf()
class KnowValues(unittest.TestCase):
def test_label_orb_symm(self):
l = addons.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff)
lab0 = ['A1', 'A1', 'B1', 'A1', 'B2', 'A1', 'B1', 'B1',
'A1', 'A1', 'B2', 'B1', 'A1', 'A2', 'B2', 'A1',
'B1', 'B1', 'A1', 'B2', 'A2', 'A1', 'A1', 'B1']
self.assertEqual(l, lab0)
def test_symmetrize_orb(self):
c = addons.symmetrize_orb(mol, mf.mo_coeff)
self.assertTrue(numpy.allclose(c, mf.mo_coeff))
numpy.random.seed(1)
c = addons.symmetrize_orb(mol,
numpy.random.random((mf.mo_coeff.shape)))
self.assertAlmostEqual(numpy.linalg.norm(c), 10.148003411042838)
def test_route(self):
orbsym = [0, 3, 0, 2, 5, 6]
res = addons.route(7, 3, orbsym)
self.assertEqual(res, [0, 3, 4])
if __name__ == "__main__":
print("Full Tests for symm.addons")
unittest.main()
|
Python
| 0
|
@@ -1123,16 +1123,510 @@
42838)%0A%0A
+ def test_symmetrize_space(self):%0A from pyscf import gto, symm, scf%0A mol = gto.M(atom = 'C 0 0 0; H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1',%0A basis = 'sto3g', verbose=0)%0A mf = scf.RHF(mol).run()%0A mol.build(0, 0, symmetry='D2')%0A mo = symm.symmetrize_space(mol, mf.mo_coeff)%0A irreps = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo)%0A self.assertEqual(irreps, %5B'A','A','A','B1','B1','B2','B2','B3','B3'%5D)%0A%0A
def
|
cd0eb0f8dc0f9cceb370407ba1fa876238951af5
|
Fix Windows unittest build on VC2012 R=mark at https://breakpad.appspot.com/539003/
|
src/client/windows/unittests/testing.gyp
|
src/client/windows/unittests/testing.gyp
|
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'includes': [
'../build/common.gypi',
],
'target_defaults': {
},
'targets': [
{
'target_name': 'gtest',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/testing/include',
'<(DEPTH)/testing/gtest',
'<(DEPTH)/testing/gtest/include',
],
'sources': [
'<(DEPTH)/testing/gtest/src/gtest-all.cc',
],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/testing/include',
'<(DEPTH)/testing/gtest/include',
]
},
},
{
'target_name': 'gmock',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/testing/include',
'<(DEPTH)/testing/',
'<(DEPTH)/testing/gtest',
'<(DEPTH)/testing/gtest/include',
],
'sources': [
'<(DEPTH)/testing/src/gmock-all.cc',
'<(DEPTH)/testing/src/gmock_main.cc',
],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/testing/include',
'<(DEPTH)/testing/gtest/include',
]
},
},
],
}
|
Python
| 0.000003
|
@@ -2062,32 +2062,332 @@
',%0A %5D
-%0A %7D
+,%0A # Visual C++ implements variadic templates strangely, and%0A # VC++2012 broke Google Test by lowering this value. See%0A # http://stackoverflow.com/questions/12558327/google-test-in-visual-studio-2012%0A 'defines': %5B'_VARIADIC_MAX=10'%5D,%0A %7D,%0A 'defines': %5B'_VARIADIC_MAX=10'%5D
,%0A %7D,%0A
@@ -2898,24 +2898,105 @@
%5D
-%0A %7D
+,%0A 'defines': %5B'_VARIADIC_MAX=10'%5D,%0A %7D,%0A 'defines': %5B'_VARIADIC_MAX=10'%5D
,%0A %7D,
|
57d07fa351795dff22c7d706534b047c6720f829
|
Default to 'text' rather than 'python'
|
splunk_handler/__init__.py
|
splunk_handler/__init__.py
|
import logging
import socket
import traceback
from threading import Thread
import requests
class SplunkHandler(logging.Handler):
"""
A logging handler to send events to a Splunk Enterprise instance
"""
def __init__(self, host, port, username, password, index, hostname=None, source=None, sourcetype='python', verify=True):
logging.Handler.__init__(self)
self.host = host
self.port = port
self.username = username
self.password = password
self.index = index
self.source = source
self.sourcetype = sourcetype
self.verify = verify
if hostname is None:
self.hostname = socket.gethostname()
else:
self.hostname = hostname
# prevent infinite recursion by silencing requests logger
requests_log = logging.getLogger('requests')
requests_log.propagate = False
def emit(self, record):
thread = Thread(target=self._async_emit, args=(record, ))
thread.start()
def _async_emit(self, record):
try:
if self.source is None:
source = record.pathname
else:
source = self.source
params = {
'host': self.hostname,
'index': self.index,
'source': source,
'sourcetype': self.sourcetype
}
url = 'https://%s:%s/services/receivers/simple' % (self.host, self.port)
payload = self.format(record)
auth = (self.username, self.password)
r = requests.post(
url,
auth=auth,
data=payload,
params=params,
verify=self.verify
)
r.close()
except Exception, e:
print "Traceback:\n" + traceback.format_exc()
print "Exception in Splunk logging handler: %s" % str(e)
|
Python
| 0.999589
|
@@ -318,14 +318,12 @@
pe='
-python
+text
', v
|
30f9348dc93d1cec8466b0663cb522c9843b11f0
|
rename .cli.utils.show_psrs to make its purpose clear
|
src/anyconfig/cli/utils.py
|
src/anyconfig/cli/utils.py
|
#
# Copyright (C) 2011 - 2021 Satoru SATOH <satoru.satoh gmail.com>
# SPDX-License-Identifier: MIT
#
"""Utilities for anyconfig.cli.*.
"""
import functools
import os
import sys
import warnings
from .. import api, parser, utils
from . import parse_args
@functools.lru_cache(None)
def list_parser_types():
"""An wrapper to api.list_types() to memoize its result.
"""
return api.list_types()
def exit_with_output(content, exit_code=0):
"""
Exit the program with printing out messages.
:param content: content to print out
:param exit_code: Exit code
"""
(sys.stdout if exit_code == 0 else sys.stderr).write(content + os.linesep)
sys.exit(exit_code)
def show_psrs():
"""Show list of info of parsers available
"""
sep = os.linesep
types = "Supported types: " + ", ".join(api.list_types())
cids = "IDs: " + ", ".join(c for c, _ps in api.list_by_cid())
x_vs_ps = [" %s: %s" % (x, ", ".join(p.cid() for p in ps))
for x, ps in api.list_by_extension()]
exts = "File extensions:" + sep + sep.join(x_vs_ps)
exit_with_output(sep.join([types, exts, cids]))
def exit_if_load_failure(cnf, msg):
"""
:param cnf: Loaded configuration object or None indicates load failure
:param msg: Message to print out if failure
"""
if cnf is None:
exit_with_output(msg, 1)
def try_parse_args(argv):
"""
Show supported config format types or usage.
:param argv: Argument list to parse or None (sys.argv will be set).
:return: argparse.Namespace object or None (exit before return)
"""
apsr = parse_args.make_parser()
args = apsr.parse_args(argv)
if args.loglevel:
warnings.simplefilter("always")
if args.inputs:
if '-' in args.inputs:
args.inputs = sys.stdin
else:
if args.list:
show_psrs()
elif args.env:
cnf = os.environ.copy()
output_result(cnf, args)
sys.exit(0)
else:
apsr.print_usage()
sys.exit(1)
if args.validate and args.schema is None:
exit_with_output("--validate option requires --scheme option", 1)
return args
def do_get(cnf, get_path):
"""
:param cnf: Configuration object to print out
:param get_path: key path given in --get option
:return: updated Configuration object if no error
"""
(cnf, err) = api.get(cnf, get_path)
if cnf is None: # Failed to get the result.
exit_with_output("Failed to get result: err=%s" % err, 1)
return cnf
def output_type_by_input_path(inpaths, itype, fmsg):
"""
:param inpaths: List of input file paths
:param itype: Input type or None
:param fmsg: message if it cannot detect otype by 'inpath'
:return: Output type :: str
"""
msg = ("Specify inpath and/or outpath type[s] with -I/--itype "
"or -O/--otype option explicitly")
if itype is None:
try:
otype = api.find(inpaths[0]).type()
except api.UnknownFileTypeError:
exit_with_output((fmsg % inpaths[0]) + msg, 1)
except (ValueError, IndexError):
exit_with_output(msg, 1)
else:
otype = itype
return otype
def try_dump(cnf, outpath, otype, fmsg, extra_opts=None):
"""
:param cnf: Configuration object to print out
:param outpath: Output file path or None
:param otype: Output type or None
:param fmsg: message if it cannot detect otype by 'inpath'
:param extra_opts: Map object will be given to api.dump as extra options
"""
if extra_opts is None:
extra_opts = {}
try:
api.dump(cnf, outpath, otype, **extra_opts)
except api.UnknownFileTypeError:
exit_with_output(fmsg % outpath, 1)
except api.UnknownProcessorTypeError:
exit_with_output("Invalid output type '%s'" % otype, 1)
def output_result(cnf, args, inpaths=None, extra_opts=None):
"""
:param cnf: Configuration object to print out
:param args: :class:`argparse.Namespace` object
:param inpaths: List of input file paths
:param extra_opts: Map object will be given to api.dump as extra options
"""
fmsg = ("Uknown file type and cannot detect appropriate backend "
"from its extension, '%s'")
(outpath, otype) = (args.output, args.otype or "json")
if not utils.is_dict_like(cnf):
exit_with_output(str(cnf)) # Print primitive types as it is.
if not outpath or outpath == "-":
outpath = sys.stdout
if otype is None:
otype = output_type_by_input_path(inpaths, args.itype, fmsg)
try_dump(cnf, outpath, otype, fmsg, extra_opts=extra_opts)
def load_diff(args, extra_opts):
"""
:param args: :class:`argparse.Namespace` object
:param extra_opts: Map object given to api.load as extra options
"""
try:
diff = api.load(args.inputs, args.itype,
ac_ignore_missing=args.ignore_missing,
ac_merge=args.merge,
ac_template=args.template,
ac_schema=args.schema,
**extra_opts)
except api.UnknownProcessorTypeError:
exit_with_output("Wrong input type '%s'" % args.itype, 1)
except api.UnknownFileTypeError:
exit_with_output("No appropriate backend was found for given file "
"type='%s', inputs=%s" % (args.itype,
", ".join(args.inputs)),
1)
exit_if_load_failure(diff,
"Failed to load: args=%s" % ", ".join(args.inputs))
return diff
def do_filter(cnf, args):
"""
:param cnf: Mapping object represents configuration data
:param args: :class:`argparse.Namespace` object
:return: 'cnf' may be updated
"""
if args.query:
cnf = api.try_query(cnf, args.query)
elif args.get:
cnf = do_get(cnf, args.get)
elif args.set:
(key, val) = args.set.split('=')
api.set_(cnf, key, parser.parse(val))
return cnf
# vim:sw=4:ts=4:et:
|
Python
| 0.000013
|
@@ -693,25 +693,28 @@
%0A%0Adef show_p
-s
+arse
rs():%0A %22%22
@@ -1871,17 +1871,20 @@
show_p
-s
+arse
rs()%0A
|
86a34765774a5ac909cad769d711a41a2429a8ee
|
Prepare for Python 3.
|
minipkg.py
|
minipkg.py
|
#!/usr/bin/env python
"""minipkg.py - install pkgsrc
Usage: python minipkg.py [-h | --help] [-v | --version]
"""
from __future__ import print_function
import hashlib
import os
import subprocess
import sys
import urllib2
__author__ = 'Mansour Moufid'
__copyright__ = 'Copyright 2015, Mansour Moufid'
__email__ = 'mansourmoufid@gmail.com'
__license__ = 'ISC'
__status__ = 'Development'
__version__ = '1.0'
supported_sys = ('Linux', 'Darwin')
supported_mach = {
'i386': '32',
'x86_64': '64',
}
default_compiler = {
'Linux': 'gcc',
'Darwin': 'clang',
}
archives = [
'http://minipkg.eliteraspberries.com/pkgsrc-2015Q3.tar.gz',
'http://minipkg.eliteraspberries.com/pkgsrc-eliteraspberries-0.3.tar.gz',
]
hash_algorithm = hashlib.sha256
archive_hashes = [
'f56599dece253113f64d92c528989b7fcb899f3888c7c9fc40f70f08ac91fea6',
'db8ebcd1c12229e9d2da92f888cdb8505f9c486381f3f9b2cd8948a14a04b671',
]
def uname():
p = subprocess.Popen(['uname', '-sm'], stdout=subprocess.PIPE)
p.wait()
assert p.returncode == 0, 'uname'
(sys, mach) = p.stdout.read().split()
return (sys, mach)
def fetch(url, hash):
filename = os.path.basename(url)
if not os.path.exists(filename):
req = urllib2.Request(url)
res = urllib2.urlopen(req)
dat = res.read()
with open(filename, 'wb') as f:
f.write(dat)
with open(filename, 'r') as f:
dat = f.read()
h = hash_algorithm(dat)
assert h.hexdigest() == hash
def extract(tgz, path):
if not os.path.exists(path):
os.mkdir(path)
tar = tgz.rstrip('.gz')
if not os.path.exists(tar):
err = subprocess.call(['gunzip', tgz])
assert err == 0, 'gunzip'
err = subprocess.call(['tar', '-xf', tar, '-C', path])
assert err == 0, 'tar'
if __name__ == '__main__':
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2:
if sys.argv[1] in ('-h', '--help'):
print(__doc__)
print('Supported systems:', supported_sys)
print('Supported architectures:', supported_mach.keys())
sys.exit(os.EX_OK)
elif sys.argv[1] in ('-v', '--version'):
print('minipkg version', __version__)
sys.exit(os.EX_OK)
else:
print(__doc__)
sys.exit(os.EX_USAGE)
print('minipkg: version', __version__)
# Step 1:
# Determine some information about the machine.
HOME = os.environ['HOME']
OPSYS, mach = uname()
assert OPSYS in supported_sys, 'unsupported system'
assert mach in supported_mach, 'unsupported architecture'
ABI = supported_mach[mach]
CC = os.environ.get('CC', None) or default_compiler[OPSYS]
print('minipkg: HOME:', HOME)
print('minipkg: OPSYS:', OPSYS)
print('minipkg: ABI:', ABI)
print('minipkg: CC:', CC)
# Step 2:
# Fetch the pkgsrc archive.
for (archive, hash) in zip(archives, archive_hashes):
print('minipkg: fetching', archive, '...')
fetch(archive, hash)
# Step 3:
# Extract the pkgsrc archive.
home_usr = os.path.join(HOME, 'usr')
for tgz in map(os.path.basename, archives):
print('minipkg: extracting', tgz, '...')
extract(tgz, home_usr)
# Step 4:
# Bootstrap pkgsrc.
print('minipkg: bootstrapping ...')
sh = os.environ.get('SH', '/bin/bash')
sh = sh.split(os.pathsep)[0]
assert os.path.exists(sh), sh
os.putenv('SH', sh)
bootstrap_path = os.path.join(HOME, 'usr', 'pkgsrc', 'bootstrap')
if not os.path.exists(os.path.join(bootstrap_path, 'work')):
os.chdir(bootstrap_path)
p = subprocess.Popen(
[
'./bootstrap',
'--unprivileged',
'--abi', ABI,
'--compiler', CC,
'--make-jobs', '4',
'--prefer-pkgsrc', 'no',
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
log = os.path.join(HOME, 'pkgsrc-bootstrap-log.txt')
with open(log, 'w') as f:
f.write(out)
f.write(err)
assert p.returncode == 0, 'bootstrap'
# Step 5:
# Set environment variables.
print('minipkg: setting environment variables ...')
vars = [
('PATH', '$HOME/pkg/bin'),
('PATH', '$HOME/pkg/sbin'),
('MANPATH', '$HOME/pkg/man'),
]
for (key, val) in vars:
os.putenv(key, val + os.pathsep + os.environ.get(key, ''))
script = [
'export %s="%s:$%s"' % (key, val, key)
for (key, val) in vars
]
dotfile = os.path.join(HOME, '.bash_profile')
try:
with open(dotfile, 'r') as f:
profile = f.read()
except IOError:
profile = ''
with open(dotfile, 'w') as f:
print('# generated by minipkg', file=f)
for line in script:
print(line, file=f)
print('export SH=%s' % sh, file=f)
print(profile, file=f)
print('minipkg: done!')
|
Python
| 0
|
@@ -206,22 +206,220 @@
sys%0A
-import urllib2
+try:%0A from urllib2 import (%0A Request as url_request,%0A urlopen as url_open,%0A )%0Aexcept ImportError:%0A import urllib.request.Request as url_request%0A import urllib.request.urlopen as url_open
%0A%0A%0A_
@@ -1440,14 +1440,10 @@
url
-lib2.R
+_r
eque
@@ -1471,16 +1471,9 @@
url
-lib2.url
+_
open
@@ -2298,24 +2298,29 @@
tectures:',
+list(
supported_ma
@@ -2329,16 +2329,17 @@
.keys())
+)
%0A
|
abebb922de7bfbaed75e14b2a53c9cf45534b72f
|
check loader
|
src/apps/loader/handler.py
|
src/apps/loader/handler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from children.models import Child
from history.models import ParamHistory, Param
from common.utils import get_dictionary_item
def values_is_in_dictionary(values_list, type_name):
for value in values_list:
if get_dictionary_item(value, type_name):
return True
return False
def set_children_address(child, args):
locality = args[5]
street = args[6]
house = args[7]
flat = args[8]
child.locality = get_dictionary_item(locality, 'locality')
child.street = get_dictionary_item(street, 'streets')
child.house = house
child.flat = flat
child.save()
def get_child(args):
last_name = args[1]
first_name = args[2]
middle_name = args[3]
birthday = args[4]
child, created = Child.objects.get_or_create(
last_name=last_name,
first_name=first_name,
middle_name=middle_name,
birthday=birthday)
return child
def date_is_free(child, parameter, date):
return not ParamHistory.objects.filter(child=child, parameter__slug=parameter, first_date=date)
def set_children_education(child, date, args):
institution = args[9]
group = args[10]
grade = args[11]
try:
parameter = Param.objects.get(slug='education')
if date_is_free(child, 'education', date):
ParamHistory.objects.create(
first_date=date,
parameter=parameter,
child=child,
institution=get_dictionary_item(institution, 'institutions'),
group=get_dictionary_item(group, 'groups'),
grade=get_dictionary_item(grade, 'grades'),
risk_group=0
)
except Param.DoesNotExist:
pass
def set_children_m2m_param(param_type, child, date, values):
try:
parameter = Param.objects.get(slug=param_type)
values = values.replace(' ', '').split(',')
if date_is_free(child, param_type, date) and values_is_in_dictionary(values, param_type):
history = ParamHistory.objects.create(
first_date=date,
parameter=parameter,
child=child,
risk_group=0
)
for value in values:
dict_value = get_dictionary_item(value, param_type)
if dict_value:
if param_type == 'health':
history.health_states.add(dict_value)
if param_type == 'parents':
history.parents_status.add(dict_value)
except Param.DoesNotExist:
pass
def set_children_simple_param(param_type, child, date, value):
try:
parameter = Param.objects.get(slug=param_type)
if param_type == 'risk':
value = list(filter(lambda val_list: val_list[1] == value, ParamHistory.BOOL_CHOICES))
if date_is_free(child, param_type, date) and value:
if param_type == 'risk':
ParamHistory.objects.create(
first_date=date,
parameter=parameter,
child=child,
risk_group=value[0]
)
else:
ParamHistory.objects.create(
first_date=date,
parameter=parameter,
child=child,
risk_group=0,
note=value
)
except Param.DoesNotExist:
pass
def loader(data, on_date):
for item in data:
child = get_child(item)
set_children_address(child, item)
set_children_education(child, on_date, item)
set_children_m2m_param('health', child, on_date, item[12])
set_children_m2m_param('parents', child, on_date, item[13])
set_children_simple_param('risk', child, on_date, item[14])
set_children_simple_param('note', child, on_date, item[15])
|
Python
| 0.000002
|
@@ -776,16 +776,74 @@
rgs%5B4%5D%0A%0A
+ print(last_name, first_name , middle_name, birthday)%0A%0A
chil
@@ -1001,24 +1001,42 @@
=birthday)%0A%0A
+ print(child)%0A%0A
return c
|
9839aa026986a35c1c13363169116193f4602355
|
Coding is hard
|
conftest.py
|
conftest.py
|
#
# See https://github.com/dials/dials/wiki/pytest for documentation on how to
# write and run pytest tests, and an overview of the available features.
#
from __future__ import absolute_import, division, print_function
import os
import pytest
@pytest.fixture(scope="session")
def dials_regression():
'''Return the absolute path to the dials_regression module as a string.
Skip the test if dials_regression is not installed.'''
try:
import dials_regression as dr
except ImportError:
pytest.skip("dials_regression required for this test")
return os.path.dirname(dr.__file__)
@pytest.fixture(scope="session")
def xia2_regression():
'''Return the absolute path to the xia2_regression module as a string.
Skip the test if dials_regression is not installed.'''
try:
import xia2_regression as xr
except ImportError:
pytest.skip("xia2_regression required for this test")
return os.path.dirname(xr.__file__)
@pytest.fixture(scope="session")
def xia2_regression_build():
'''Return the absolute path to the xia2_regression directory within the build
path as a string. Skip the test if xia2_regression is not installed.'''
try:
x2rpath = os.path.join(os.environ.get('LIBTBX_BUILD'), 'xia2_regression')
except AttributeError:
x2rpath = ''
if not os.path.exists(x2rpath):
pytest.skip("xia2_regression required for this test")
if 'test_data' not in os.listdir(x2rpath)
pytest.skip("xia2_regression files need to be downloaded for this test. Run xia2_regression.fetch_test_data")
return x2rpath
from libtbx.test_utils.pytest import libtbx_collector
pytest_collect_file = libtbx_collector()
|
Python
| 0.999986
|
@@ -1420,16 +1420,17 @@
x2rpath)
+:
%0A pyt
|
b79d3dc3af2f2b3871bbf9cf1a8d7cab909c152c
|
Fix case when files missing and download disabled
|
conftest.py
|
conftest.py
|
#
# See https://github.com/dials/dials/wiki/pytest for documentation on how to
# write and run pytest tests, and an overview of the available features.
#
from __future__ import absolute_import, division, print_function
import os
import re
import procrunner
import py.path
import pytest
from dials.conftest import (dials_regression, xia2_regression,
xia2_regression_build, run_in_tmpdir)
def pytest_addoption(parser):
'''Add '--runslow' and '--regression' options to pytest.'''
parser.addoption("--runslow", action="store_true", default=False,
help="run slow tests")
parser.addoption("--regression", action="store_true", default=False,
help="run regression tests")
parser.addoption("--regression-only", action="store_true", default=False,
help="run only regression tests")
def pytest_collection_modifyitems(config, items):
'''Tests marked as slow will not be run unless slow tests are enabled with
the '--runslow' parameter or the test is selected specifically. The
latter allows running slow tests via the libtbx compatibility layer.
Tests marked as regression are only run with --regression.
'''
if not config.getoption("--runslow") and len(items) > 1 and not config.getoption("--regression"):
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
if config.getoption("--regression-only"):
skip_regression = pytest.mark.skip(reason="Test only runs without --regression-only")
for item in items:
if "regression" not in item.keywords:
item.add_marker(skip_regression)
elif not config.getoption("--regression"):
skip_regression = pytest.mark.skip(reason="Test only runs with --regression")
for item in items:
if "regression" in item.keywords:
item.add_marker(skip_regression)
@pytest.fixture(scope="session")
def ccp4():
'''Return information about the CCP4 installation.
Skip the test if CCP4 is not installed.'''
if not os.getenv('CCP4'):
pytest.skip("CCP4 installation required for this test")
try:
result = procrunner.run(['refmac5', '-i'], print_stdout=False)
except OSError:
pytest.skip("CCP4 installation required for this test - Could not find CCP4 executable")
if result['exitcode'] or result['timeout']:
pytest.skip("CCP4 installation required for this test - Could not run CCP4 executable")
version = re.search('patch level *([0-9]+)\.([0-9]+)\.([0-9]+)', result['stdout'])
if not version:
pytest.skip("CCP4 installation required for this test - Could not determine CCP4 version")
return {
'path': os.getenv('CCP4'),
'version': [int(v) for v in version.groups()],
}
@pytest.fixture(scope="session")
def xds():
'''Return information about the XDS installation.
Skip the test if XDS is not installed.'''
try:
result = procrunner.run(['xds'], print_stdout=False)
except OSError:
pytest.skip("XDS installation required for this test")
if result['exitcode'] or result['timeout']:
pytest.skip("XDS installation required for this test - Could not run XDS")
if 'license expired' in result['stdout']:
pytest.skip("XDS installation required for this test - XDS license is expired")
version = re.search('BUILT=([0-9]+)\)', result['stdout'])
if not version:
pytest.skip("XDS installation required for this test - Could not determine XDS version")
return {
'version': int(version.groups()[0])
}
@pytest.fixture(scope="session")
def regression_data():
'''Return the location of a regression data set as py.path object.
Download the files if they are not on disk already.
Skip the test if the data can not be downloaded.
'''
dls_dir = '/dls/science/groups/scisoft/DIALS/repositories/current/xia2_regression_data'
read_only = False
if os.getenv('REGRESSIONDATA'):
target_dir = os.getenv('REGRESSIONDATA')
elif os.path.exists(os.path.join(dls_dir, 'filelist.json')):
target_dir = dls_dir
read_only = True
elif os.getenv('LIBTBX_BUILD'):
target_dir = os.path.join(os.getenv('LIBTBX_BUILD'), 'xia2_regression')
else:
pytest.skip('Can not determine regression data location. Use environment variable REGRESSIONDATA')
from xia2.Test.fetch_test_data import download_lock, fetch_test_data
class DataFetcher():
_cache = {}
def __call__(self, test_data):
if test_data not in self._cache:
with download_lock(target_dir):
self._cache[test_data] = fetch_test_data(target_dir, pre_scan=True, file_group=test_data, read_only=read_only)
self._cache[test_data] = str(self._cache[test_data]) # https://github.com/cctbx/cctbx_project/issues/234
if not self._cache[test_data]:
pytest.skip('Automated download of test data failed. Run xia2.fetch_test_data')
return py.path.local(self._cache[test_data])
def __repr__(self):
return "<%sDataFetcher: %s>" % ('R/O ' if read_only else '', target_dir)
return DataFetcher()
|
Python
| 0.000001
|
@@ -4676,16 +4676,55 @@
d_only)%0A
+ if self._cache%5Btest_data%5D:%0A
@@ -4859,32 +4859,191 @@
che%5Btest_data%5D:%0A
+ if read_only:%0A pytest.skip('Regression data is required to run this test. Run with --regression or run xia2.fetch_test_data')%0A else:%0A
pytest.s
|
509deaf89a5c6dbe8844319469998c0958947656
|
test send_mail
|
src/bccf/util/emailutil.py
|
src/bccf/util/emailutil.py
|
import logging
log = logging.getLogger(__name__)
from django.db.models.loading import get_model
from django.core.mail import send_mail, BadHeaderError, EmailMultiAlternatives
from django.template import Context
from django.template.loader import render_to_string
from mezzanine.utils.email import send_mail_template
from bccf.models import Event, EventRegistration, Settings
from cartridge.shop.models import Order
TEMPLATE_DIR = "email/%s"
NO_EMAIL = Settings.get_setting('NO_REPLY_EMAIL')
MOD_EMAIL = Settings.get_setting('MODERATOR_EMAIL')
def send_welcome(request, subject="Welcome to BCCF", fr=NO_EMAIL, template="email_welcome.txt", template_html="email_welcome.html"):
"""
Helper function that sends a welcome email to users upon registration.
"""
to = request.user.email
c = Context({'user': request.user})
plain_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
html_content = render_to_string(TEMPLATE_DIR % template_html, {}, context_instance=c)
msg = EmailMultiAlternatives(subject, plain_content, fr, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_moderate(request, subject, app_name, model_name, id, to=MOD_EMAIL, fr=NO_EMAIL,
template="email_moderate.txt", template_html="email_moderate.html"):
"""
Helper function that sends an email when something needs moderation.
Things that need moderation include
- campaign creation,
- program request,
- forum post,
- membership cancellation,
- new member sign up,
- and shop orders.
"""
model = get_model(app_name, model_name)
object = model.objects.get(id=id)
c = Context({'obj': object})
plain_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
html_content = render_to_string(TEMPLATE_DIR % template_html, {}, context_instance=c)
msg = EmailMultiAlternatives(subject, plain_content, fr, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_reminder(subject, user, app_name, model_name, id, fr=NO_EMAIL):
"""
Helper function that sends an email when something needs reminding.
Things that need reminding include
- expiring membership,
- membership expired,
- event payment,
- event seat released,
"""
to = 'khcastillo@hotmail.com' #user.email
model = get_model(app_name, model_name)
object = model.objects.get(id=id)
c = Context({'obj': object, 'user': user})
plain_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
html_content = render_to_string(TEMPLATE_DIR % template_html, {}, context_instance=c)
msg = EmailMultiAlternatives(subject, plain_content, fr, [to])
msg.attach_alternative(html_content, "text/html")
# msg.send()
try:
print "Sending Email"
send_mail_template('Test Email', TEMPLATE_DIR % template_html, fr, [to], context=c, fail_silently=settings.DEBUG)
except Exception, e:
print e
def send_receipt(request, user, order, fr=NO_EMAIL):
"""
Helper function that sends an receipt to the user who bought a product
from the BCCF Shop
"""
to = user.email
c = Context({'order': order, 'user': user})
plain_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
html_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
msg = EmailMultiAlternatives(subject, plain_content, fr, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_after_survey(request, id, fr=NO_EMAIL):
"""
Helper function that sends a link to the after survey after the event
has finished.
"""
to = []
event = Event.objects.get(id=id)
registrations = EventRegistration.objects.filter(event=event)
# Grab all the user's emails
for regs in registrations:
to.append(regs.user.email)
c = Context({'event': event, 'user': user})
plain_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
html_content = render_to_string(TEMPLATE_DIR % template, {}, context_instance=c)
msg = EmailMultiAlternatives(subject, plain_content, fr, to)
msg.attach_alternative(html_content, "text/html")
msg.send()
|
Python
| 0.000001
|
@@ -2854,18 +2854,16 @@
ml%22)%0A
- #
msg.sen
@@ -2906,16 +2906,120 @@
Email%22%0A
+ send_mail('Test Email', 'Test Email', fr, %5Bto%5D, fail_silently=False, html_message=html_content)%0A
|
f07cbc4a36c857468869c075bc695e977bea19b8
|
fix CATKIN_IGNORE to affect the folder where it is instead of only ignoring subfolders
|
src/catkin_pkg/packages.py
|
src/catkin_pkg/packages.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library to find packages in the filesystem.
"""
import os
from .package import parse_package, PACKAGE_MANIFEST_FILENAME
def find_package_paths(basepath):
"""
Crawls the filesystem to find package manifest files.
When a subfolder contains a file ``CATKIN_IGNORE`` its
subdirectories are ignored.
:param basepath: The path to search in, ``str``
:returns: A list of relative paths containing package manifest files
``list``
"""
paths = []
for dirpath, dirnames, filenames in os.walk(basepath, followlinks=True):
if PACKAGE_MANIFEST_FILENAME in filenames:
basename = os.path.basename(dirpath)
if basename not in paths:
paths.append(os.path.relpath(dirpath, basepath))
del dirnames[:]
continue
elif 'CATKIN_IGNORE' in filenames:
del dirnames[:]
continue
for dirname in dirnames:
if dirname.startswith('.'):
dirnames.remove(dirname)
return paths
def find_packages(basepath):
"""
Crawls the filesystem to find package manifest files and parses them.
:param basepath: The path to search in, ``str``
:returns: A dict mapping relative paths to ``Package`` objects
``dict``
:raises: :exc:RuntimeError` If multiple packages have the same
name
"""
packages = {}
paths = find_package_paths(basepath)
for path in paths:
package = parse_package(os.path.join(basepath, path))
paths_with_same_name = [p for p, pkg in packages.iteritems() if pkg.name == package.name]
if paths_with_same_name:
raise RuntimeError('Two packages found with the same name "%s":\n- %s\n- %s' % (package.name, path, paths_with_same_name[0]))
packages[path] = package
return packages
def verify_equal_package_versions(packages):
"""
Verifies that all packages have the same version number.
:param packages: The list of ``Package`` objects, ``list``
:returns: The version number
:raises: :exc:RuntimeError` If the version is not equal in all
packages
"""
version = None
for package in packages:
if version is None:
version = package.version
elif package.version != version:
raise RuntimeError('Two packages have different version numbers (%s != %s):\n- %s\n- %s' % (package.version, version, package.filename, packages[0].filename))
return version
|
Python
| 0
|
@@ -1887,32 +1887,11 @@
%60 it
-s%0A subdirectories are
+ is
ign
@@ -2146,16 +2146,108 @@
if
+'CATKIN_IGNORE' in filenames:%0A del dirnames%5B:%5D%0A continue%0A elif
PACKAGE_
@@ -2483,100 +2483,8 @@
nue%0A
- elif 'CATKIN_IGNORE' in filenames:%0A del dirnames%5B:%5D%0A continue%0A
|
a8b50df748115e41cc705be1a0fc6c76fe1b98cc
|
disable cache
|
src/cid/utils/fileUtils.py
|
src/cid/utils/fileUtils.py
|
# -*- encoding: utf-8 -*-
"""
@authors: Sebastián Ortiz V. neoecos@gmail.com
@license: GNU AFFERO GENERAL PUBLIC LICENSE
Caliope Server is the web server of Caliope's Framework
Copyright (C) 2013 Infometrika
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#system, and standard library
import os
import json
import re
import mimetypes
import gzip
import StringIO
#flask
from flask import request, current_app
#werkezug
from werkzeug.datastructures import Headers
from werkzeug.wsgi import wrap_file
from werkzeug.exceptions import NotFound
from cid.utils.jsOptimizer import jsOptimizer
def loadJSONFromFile(filename, root_path):
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(root_path, filename)
if not os.path.isfile(filename):
print "Error : JSON file " + filename + " not found"
raise NotFound("JSON file " + filename + " not found")
try:
json_data = re.sub("(?:/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/)",
'', open(filename).read(), re.MULTILINE)
json_data = json.loads(json_data)
except IOError:
json_data = {}
print "Error: can\'t find file or read data"
except ValueError:
json_data = {}
print "Error, is not a JSON" + filename
else:
return json_data
def loadJSONFromFileNoPath(filename):
if not os.path.isfile(filename):
print "Error : JSON file " + filename + " not found"
raise NotFound("JSON file " + filename + " not found")
try:
json_data = re.sub("(?:/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/)",
'', open(filename).read(), re.MULTILINE)
json_data = json.loads(json_data)
except IOError:
json_data = {}
print "Error: can\'t find file or read data"
except ValueError:
json_data = {}
print "Error, is not a JSON" + filename
else:
return json_data
def send_from_memory(filename):
"""
:param filename: Name of the file to be loaded.
"""
if not os.path.isfile(filename):
raise NotFound()
#if filename is not None:
#if not os.path.isabs(filename):
#filename = os.path.join(current_app.root_path, filename)
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
file = open(filename, 'rb')
data = jsOptimizer().get_file(os.path.abspath(filename), current_app.storekv)
if data:
headers = Headers()
headers['Content-Encoding'] = 'gzip'
headers['Content-Length'] = len(data)
headers['Cache-Control'] = "max-age=172800, public, must-revalidate"
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
else:
data = wrap_file(request.environ, file)
headers = Headers()
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=False)
return rv
#From
#https://github.com/elasticsales/Flask-gzip/blob/master/flask_gzip.py
class Gzip(object):
def __init__(self, compress_level=6, minimum_size=500):
self.compress_level = compress_level
self.minimum_size = minimum_size
def after_request(self, response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
if response.direct_passthrough:
return response
if (200 > response.status_code >= 300) or len(
response.data) < self.minimum_size or 'Content-Encoding' in response.headers:
return response
gzip_buffer = StringIO.StringIO()
gzip_file = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Content-Length'] = len(response.data)
response.headers['Cache-Control'] = "max-age=172800, public, must-revalidate"
return response
|
Python
| 0.000002
|
@@ -3030,24 +3030,25 @@
'rb')%0A%0A
+#
data = jsOpt
|
2cd19b395f4320330b66dff1ef98d149f3a40a31
|
Add test for notify dataset/update
|
ckanext/syndicate/tests/test_plugin.py
|
ckanext/syndicate/tests/test_plugin.py
|
from mock import patch
import unittest
import ckan.model as model
from ckan.model.domain_object import DomainObjectOperation
from ckanext.syndicate.plugin import SyndicatePlugin
class TestPlugin(unittest.TestCase):
def test_notify_syndicates_task(self):
entity = model.Package()
entity.extras = {'syndicate': 'true'}
with patch('ckanext.syndicate.plugin.syndicate_task') as mock_syndicate:
plugin = SyndicatePlugin()
plugin.notify(entity, DomainObjectOperation.new)
mock_syndicate.assert_called_with(entity.id, 'dataset/create')
|
Python
| 0
|
@@ -186,22 +186,22 @@
ass Test
-Plugin
+Notify
(unittes
@@ -225,42 +225,60 @@
def
-test_n
+setUp(self):%0A super(TestN
otify
-_syndicates_task(self):
+, self).setUp()
%0A
@@ -278,24 +278,29 @@
p()%0A
+self.
entity = mod
@@ -320,16 +320,21 @@
+self.
entity.e
@@ -363,25 +363,24 @@
'true'%7D%0A
-%0A
with pat
@@ -371,20 +371,38 @@
-with
+self.syndicate_patch =
patch('
@@ -442,16 +442,146 @@
e_task')
+%0A self.plugin = SyndicatePlugin()%0A%0A def test_syndicates_task_for_dataset_create(self):%0A with self.syndicate_patch
as mock
@@ -608,35 +608,301 @@
+self.
plugin
- = SyndicatePlugin()%0A
+.notify(self.entity, DomainObjectOperation.new)%0A mock_syndicate.assert_called_with(self.entity.id,%0A 'dataset/create')%0A%0A def test_syndicates_task_for_dataset_update(self):%0A with self.syndicate_patch as mock_syndicate:
%0A
@@ -910,16 +910,21 @@
+self.
plugin.n
@@ -929,16 +929,21 @@
.notify(
+self.
entity,
@@ -956,35 +956,39 @@
ObjectOperation.
-new
+changed
)%0A mo
@@ -1019,16 +1019,21 @@
ed_with(
+self.
entity.i
@@ -1034,16 +1034,62 @@
tity.id,
+%0A
'datase
@@ -1078,29 +1078,29 @@
'dataset/
-cre
+upd
ate')%0A
|
a5b673864a25c5fc4f60281e7397d2afa965431a
|
reorder imports
|
biff/tests/test_against_fortran.py
|
biff/tests/test_against_fortran.py
|
# coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
import astropy.units as u
from astropy.constants import G as _G
G = _G.decompose([u.kpc,u.Myr,u.Msun]).value
import numpy as np
# Project
from .._bfe import density, potential, gradient
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import pytest
@pytest.mark.parametrize("basename", [
'simple-hernquist', 'random', 'wang-zhao',
])
@pytest.mark.skipif('True')
def test_density(basename):
pos_path = os.path.abspath(get_pkg_data_filename('../data/positions.dat.gz'))
coeff_path = os.path.abspath(get_pkg_data_filename('../data/{0}.coeff'.format(basename)))
accp_path = os.path.abspath(get_pkg_data_filename('../data/{0}-accp.dat.gz'.format(basename)))
xyz = np.loadtxt(pos_path, skiprows=1)
coeff = np.atleast_2d(np.loadtxt(coeff_path, skiprows=1))
nmax = coeff[:,0].astype(int).max()
lmax = coeff[:,1].astype(int).max()
cos_coeff = np.zeros((nmax+1,lmax+1,lmax+1))
sin_coeff = np.zeros((nmax+1,lmax+1,lmax+1))
for row in coeff:
n,l,m,cc,sc = row
cos_coeff[int(n),int(l),int(m)] = cc
sin_coeff[int(n),int(l),int(m)] = sc
dens = density(xyz, M=1., r_s=1.,
Snlm=cos_coeff, Tnlm=sin_coeff,
nmax=nmax, lmax=lmax)
# TODO: nothing to compare this to....
@pytest.mark.parametrize("basename", [
'simple-hernquist', 'random', 'wang-zhao',
])
def test_potential(basename):
pos_path = os.path.abspath(get_pkg_data_filename('../data/positions.dat.gz'))
coeff_path = os.path.abspath(get_pkg_data_filename('../data/{0}.coeff'.format(basename)))
accp_path = os.path.abspath(get_pkg_data_filename('../data/{0}-accp.dat.gz'.format(basename)))
xyz = np.loadtxt(pos_path, skiprows=1)
coeff = np.atleast_2d(np.loadtxt(coeff_path, skiprows=1))
accp = np.loadtxt(accp_path)
nmax = coeff[:,0].astype(int).max()
lmax = coeff[:,1].astype(int).max()
cos_coeff = np.zeros((nmax+1,lmax+1,lmax+1))
sin_coeff = np.zeros((nmax+1,lmax+1,lmax+1))
for row in coeff:
n,l,m,cc,sc = row
cos_coeff[int(n),int(l),int(m)] = cc
sin_coeff[int(n),int(l),int(m)] = sc
potv = potential(xyz, G=1., M=1., r_s=1.,
Snlm=cos_coeff, Tnlm=sin_coeff,
nmax=nmax, lmax=lmax)
# for some reason, SCF potential is -potential
scf_potv = -accp[:,-1]
np.testing.assert_allclose(potv, scf_potv, rtol=1E-6)
@pytest.mark.parametrize("basename", [
'simple-hernquist', 'random', 'wang-zhao',
])
def test_gradient(basename):
pos_path = os.path.abspath(get_pkg_data_filename('../data/positions.dat.gz'))
coeff_path = os.path.abspath(get_pkg_data_filename('../data/{0}.coeff'.format(basename)))
accp_path = os.path.abspath(get_pkg_data_filename('../data/{0}-accp.dat.gz'.format(basename)))
xyz = np.loadtxt(pos_path, skiprows=1)
coeff = np.atleast_2d(np.loadtxt(coeff_path, skiprows=1))
accp = np.loadtxt(accp_path)
nmax = coeff[:,0].astype(int).max()
lmax = coeff[:,1].astype(int).max()
cos_coeff = np.zeros((nmax+1,lmax+1,lmax+1))
sin_coeff = np.zeros((nmax+1,lmax+1,lmax+1))
for row in coeff:
n,l,m,cc,sc = row
cos_coeff[int(n),int(l),int(m)] = cc
sin_coeff[int(n),int(l),int(m)] = sc
grad = gradient(xyz, G=1., M=1., r_s=1.,
Snlm=cos_coeff, Tnlm=sin_coeff,
nmax=nmax, lmax=lmax)
# I output the acceleration from SCF when I make the files
# so I have no idea why I don't need a minus sign here...
scf_grad = accp[:,:3]
np.testing.assert_allclose(grad, scf_grad, rtol=1E-6)
|
Python
| 0.000009
|
@@ -214,16 +214,109 @@
G as _G%0A
+from astropy.utils.data import get_pkg_data_filename%0Afrom astropy.tests.helper import pytest%0A
G = _G.d
@@ -435,102 +435,8 @@
nt%0A%0A
-from astropy.utils.data import get_pkg_data_filename%0Afrom astropy.tests.helper import pytest%0A%0A
@pyt
|
ef02a2c4fb2c0cd2d0b0d1a243169df87515532e
|
remove default to value for request
|
bin/addons/base/res/res_request.py
|
bin/addons/base/res/res_request.py
|
##############################################################################
#
# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
import time
def _links_get(self, cr, uid, context={}):
obj = self.pool.get('res.request.link')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['object', 'name'], context)
return [(r['object'], r['name']) for r in res]
class res_request(osv.osv):
_name = 'res.request'
def request_send(self, cr, uid, ids, *args):
for id in ids:
cr.execute('update res_request set state=%s,date_sent=%s where id=%d', ('waiting', time.strftime('%Y-%m-%d %H:%M:%S'), id))
cr.execute('select act_from,act_to,body,date_sent from res_request where id=%d', (id,))
values = cr.dictfetchone()
# this will be truncated automatically at creation
values['name'] = values['body'] or '/'
values['req_id'] = id
self.pool.get('res.request.history').create(cr, uid, values)
return True
def request_reply(self, cr, uid, ids, *args):
for id in ids:
cr.execute("update res_request set state='active', act_from=%d, act_to=act_from, trigger_date=NULL, body='' where id=%d", (uid,id))
return True
def request_close(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state':'closed', 'active':False})
return True
def request_get(self, cr, uid):
cr.execute('select id from res_request where act_to=%d and (trigger_date<=%s or trigger_date is null) and active=True', (uid,time.strftime('%Y-%m-%d')))
ids = map(lambda x:x[0], cr.fetchall())
cr.execute('select id from res_request where act_from=%d and (act_to<>%d) and (trigger_date<=%s or trigger_date is null) and active=True', (uid,uid,time.strftime('%Y-%m-%d')))
ids2 = map(lambda x:x[0], cr.fetchall())
return (ids, ids2)
_columns = {
'create_date': fields.datetime('Created date', readonly=True),
'name': fields.char('Subject', states={'waiting':[('readonly',True)],'active':[('readonly',True)],'closed':[('readonly',True)]}, required=True, size=128),
'active': fields.boolean('Active'),
'priority': fields.selection([('0','Low'),('1','Normal'),('2','High')], 'Priority', states={'waiting':[('readonly',True)],'closed':[('readonly',True)]}, required=True),
'act_from': fields.many2one('res.users', 'From', required=True, readonly=True, states={'closed':[('readonly',True)]}),
'act_to': fields.many2one('res.users', 'To', required=True, states={'waiting':[('readonly',True)],'closed':[('readonly',True)]}),
'body': fields.text('Request', states={'waiting':[('readonly',True)],'closed':[('readonly',True)]}),
'date_sent': fields.datetime('Date', readonly=True),
'trigger_date': fields.datetime('Trigger Date', states={'waiting':[('readonly',True)],'closed':[('readonly',True)]}),
'ref_partner_id':fields.many2one('res.partner', 'Partner Ref.', states={'closed':[('readonly',True)]}),
'ref_doc1':fields.reference('Document Ref 1', selection=_links_get, size=128, states={'closed':[('readonly',True)]}),
'ref_doc2':fields.reference('Document Ref 2', selection=_links_get, size=128, states={'closed':[('readonly',True)]}),
'state': fields.selection([('draft','draft'),('waiting','waiting'),('active','active'),('closed','closed')], 'State', required=True, readonly=True),
'history': fields.one2many('res.request.history','req_id', 'History')
}
_defaults = {
'act_from': lambda obj,cr,uid,context={}: uid,
'act_to': lambda obj,cr,uid,context={}: uid,
'state': lambda obj,cr,uid,context={}: 'draft',
'active': lambda obj,cr,uid,context={}: True,
'priority': lambda obj,cr,uid,context={}: '1',
}
_order = 'priority desc, trigger_date, create_date desc'
_table = 'res_request'
res_request()
class res_request_link(osv.osv):
_name = 'res.request.link'
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'object': fields.char('Object', size=64, required=True),
'priority': fields.integer('Priority'),
}
_defaults = {
'priority': lambda *a: 5,
}
_order = 'priority'
res_request_link()
class res_request_history(osv.osv):
_name = 'res.request.history'
_columns = {
'name': fields.char('Summary', size=128, states={'active':[('readonly',True)],'waiting':[('readonly',True)]}, required=True),
'req_id': fields.many2one('res.request', 'Request', required=True, ondelete='cascade', select=True),
'act_from': fields.many2one('res.users', 'From', required=True, readonly=True),
'act_to': fields.many2one('res.users', 'To', required=True, states={'waiting':[('readonly',True)]}),
'body': fields.text('Body', states={'waiting':[('readonly',True)]}),
'date_sent': fields.datetime('Date sent', states={'waiting':[('readonly',True)]}, required=True)
}
_defaults = {
'name': lambda *a: 'NoName',
'act_from': lambda obj,cr,uid,context={}: uid,
'act_to': lambda obj,cr,uid,context={}: uid,
'date_sent': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
res_request_history()
|
Python
| 0
|
@@ -4566,55 +4566,8 @@
id,%0A
-%09%09'act_to': lambda obj,cr,uid,context=%7B%7D: uid,%0A
%09%09's
|
4339393275a347044cd656b84b05a4fe33a91914
|
should have been part of 6ca53737b4a38fe
|
bin/assess-vulnerabilities-risk.py
|
bin/assess-vulnerabilities-risk.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script analyzes Clair generated vulnerabilities.
The script is intended to be incorporated into a CI process
to generate a non-zero exit status when vulnerabilities
exceed an acceptable threshold.
"""
import logging
import optparse
import re
import sys
import time
import clair_cicd
from clair_cicd import io
from clair_cicd.assessor import VulnerabilitiesRiskAssessor
_logger = logging.getLogger(__name__)
def _check_logging_level(option, opt, value):
"""Type checking function for command line parser's 'logginglevel' type."""
reg_ex_pattern = "^(DEBUG|INFO|WARNING|ERROR|CRITICAL)$"
reg_ex = re.compile(reg_ex_pattern, re.IGNORECASE)
if reg_ex.match(value):
return getattr(logging, value.upper())
fmt = (
"option %s: should be one of "
"DEBUG, INFO, WARNING, ERROR, CRITICAL or FATAL"
)
raise optparse.OptionValueError(fmt % opt)
class CommandLineOption(optparse.Option):
"""Adds new option types to the command line parser's base option types."""
new_types = (
'logginglevel',
)
TYPES = optparse.Option.TYPES + new_types
TYPE_CHECKER = optparse.Option.TYPE_CHECKER.copy()
TYPE_CHECKER['logginglevel'] = _check_logging_level
class CommandLineParser(optparse.OptionParser):
def __init__(self):
optparse.OptionParser.__init__(
self,
'usage: %prog [options] <vulnerabilities directory>',
description='cli to analyze results of Clair identified vulnerabilities',
version='%%prog %s' % clair_cicd.__version__,
option_class=CommandLineOption)
default = None
help = 'whitelist - default = %s' % default
self.add_option(
'--whitelist',
'--wl',
action='store',
dest='whitelist',
default=default,
type='string',
help=help)
default = logging.ERROR
fmt = (
"logging level [DEBUG,INFO,WARNING,ERROR,CRITICAL] - "
"default = %s"
)
help = fmt % logging.getLevelName(default)
self.add_option(
"--log",
action="store",
dest="logging_level",
default=default,
type="logginglevel",
help=help)
def parse_args(self, *args, **kwargs):
(clo, cla) = optparse.OptionParser.parse_args(self, *args, **kwargs)
if len(cla) != 1:
sys.stderr.write(self.get_usage())
sys.exit(1)
return (clo, cla)
if __name__ == '__main__':
#
# parse command line
#
clp = CommandLineParser()
(clo, cla) = clp.parse_args()
#
# configure logging ... remember gmt = utc
#
logging.Formatter.converter = time.gmtime
logging.basicConfig(
level=clo.logging_level,
datefmt='%Y-%m-%dT%H:%M:%S',
format='%(asctime)s.%(msecs)03d+00:00 %(levelname)s %(module)s:%(lineno)d %(message)s')
#
# read all the various bits we need into memory
#
whitelist = io.read_whitelist(clo.whitelist)
if whitelist is None:
sys.exit(1)
vulnerabilities_directory = cla[0]
vulnerabilities = io.read_vulnerabilities(vulnerabilities_directory)
if vulnerabilities is None:
sys.exit(2)
#
# this is what it's all been leading up to:-)
#
vra = VulnerabilitiesRiskAssessor(whitelist, vulnerabilities)
sys.exit(0 if vra.assess() else 1)
|
Python
| 0.999972
|
@@ -864,17 +864,19 @@
G, ERROR
-,
+ or
CRITICA
@@ -880,17 +880,8 @@
ICAL
- or FATAL
%22%0A
|
6d6fc2ed77db220ddfeeaa8709a3518724bb278a
|
Correct paths in extraction of shape per msa
|
bin/data_prep/extract_shape_msa.py
|
bin/data_prep/extract_shape_msa.py
|
"""extract_shape_msa.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_bg = {}
with open('data/2000/crosswalks/msa_blockgroup.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
bg = rows[1]
if msa not in msa_to_bg:
msa_to_bg[msa] = []
msa_to_bg[msa].append(bg)
#
# Perform the extraction
#
for msa in msa_to_bg:
states = list(set([b[:2] for b in msa_to_bg[msa]]))
## Get all blockgroups
all_bg = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blockgroups.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_bg[f['properties']['BKGPIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_bg = {bg: all_bg[bg] for bg in msa_to_bg[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BKGPIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blockgroups.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for bg in msa_bg:
rec = {'geometry':msa_bg[bg], 'properties':{'BKGPIDFP00':bg}}
output.write(rec)
|
Python
| 0.00001
|
@@ -203,21 +203,16 @@
n('data/
-2000/
crosswal
@@ -247,24 +247,24 @@
as source:%0A
+
reader =
@@ -684,29 +684,24 @@
.open('data/
-2000/
shp/state/%25s
@@ -1030,37 +1030,32 @@
ath.isdir('data/
-2000/
shp/msa/%25s'%25msa)
@@ -1075,29 +1075,24 @@
mkdir('data/
-2000/
shp/msa/%25s'%25
@@ -1185,16 +1185,16 @@
'str'%7D%7D%0A
+
with
@@ -1215,13 +1215,8 @@
ata/
-2000/
shp/
|
e0a4024a79f65c1b35648603bad2662ccb9a3305
|
Update cloudstack_common.py
|
cloudstack_plugin/cloudstack_common.py
|
cloudstack_plugin/cloudstack_common.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
from cloudify import context
from cloudify.exceptions import NonRecoverableError
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
__author__ = 'uri1803, boul'
# properties
USE_EXTERNAL_RESOURCE_PROPERTY = 'use_external_resource'
# runtime properties
CLOUDSTACK_ID_PROPERTY = 'external_id' # resource's cloudstack id
CLOUDSTACK_TYPE_PROPERTY = 'external_type' # resource's cloudstack type
CLOUDSTACK_NAME_PROPERTY = 'external_name' # resource's cloudstack name
# runtime properties which all types use
COMMON_RUNTIME_PROPERTIES_KEYS = [CLOUDSTACK_ID_PROPERTY,
CLOUDSTACK_TYPE_PROPERTY,
CLOUDSTACK_NAME_PROPERTY]
class ProviderContext(object):
def __init__(self, provider_context):
self._provider_context = provider_context or {}
self._resources = self._provider_context.get('resources', {})
@property
def agents_keypair(self):
return self._resources.get('agents_keypair')
@property
def agents_security_group(self):
return self._resources.get('agents_security_group')
@property
def ext_network(self):
return self._resources.get('ext_network')
@property
def floating_ip(self):
return self._resources.get('floating_ip')
@property
def int_network(self):
return self._resources.get('int_network')
@property
def management_keypair(self):
return self._resources.get('management_keypair')
@property
def management_security_group(self):
return self._resources.get('management_security_group')
@property
def management_server(self):
return self._resources.get('management_server')
@property
def router(self):
return self._resources.get('router')
@property
def subnet(self):
return self._resources.get('subnet')
def __repr__(self):
info = json.dumps(self._provider_context)
return '<' + self.__class__.__name__ + ' ' + info + '>'
def provider(ctx):
return ProviderContext(ctx.provider_context)
def _get_auth_from_context(ctx):
config = Config().get()
alt_config = config = ctx.node.properties.get('cloudstack_config')
secret_probe = alt_config.get(['cs_api_secret'][0], None)
if secret_probe is None:
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get('cloudstack_config')
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get('cloudstack_config')
if not config:
config = ctx.target.node.properties.get('cloudstack_config')
else:
config = Config().get()
return config
def get_cloud_driver(ctx):
#auth_config = Config().get()
auth_config = _get_auth_from_context(ctx)
api_key = auth_config['cs_api_key']
api_secret_key = auth_config['cs_api_secret']
api_url = auth_config['cs_api_url']
driver = get_driver(Provider.CLOUDSTACK)
libcloud.security.VERIFY_SSL_CERT = False
return driver(key=api_key, secret=api_secret_key, url=api_url)
class Config(object):
CLOUDSTACK_CONFIG_PATH_ENV_VAR = 'CLOUDSTACK_CONFIG_PATH'
CLOUDSTACK_CONFIG_PATH_DEFAULT_PATH = '~/cloudstack_config.json'
def get(self):
static_config = self._build_config_from_env_variables()
env_name = self.CLOUDSTACK_CONFIG_PATH_ENV_VAR
default_location_tpl = self.CLOUDSTACK_CONFIG_PATH_DEFAULT_PATH
default_location = os.path.expanduser(default_location_tpl)
config_path = os.getenv(env_name, default_location)
try:
with open(config_path) as f:
Config.update_config(static_config, json.loads(f.read()))
except IOError:
pass
return static_config
@staticmethod
def _build_config_from_env_variables():
cfg = dict()
def take_env_var_if_exists(cfg_key, env_var):
if env_var in os.environ:
cfg[cfg_key] = os.environ[env_var]
take_env_var_if_exists('cs_api_key', 'CS_API_KEY')
take_env_var_if_exists('cs_api_secret', 'CS_API_SECRET')
take_env_var_if_exists('cs_api_url', 'CS_API_URL')
return cfg
@staticmethod
def update_config(overridden_cfg, overriding_cfg):
""" this method is like dict.update() only that it doesn't override
with (or set new) empty values (e.g. empty string) """
for k, v in overriding_cfg.iteritems():
if v:
overridden_cfg[k] = v
def get_nic_by_node_and_network_id(ctx, cloud_driver, node, network_id):
#node = _get_node_by_id(cloud_driver, node_id)
#network = _get_network_by_id(cloud_driver, network_id)
nics = [nic for nic in cloud_driver.ex_list_nics(node) if
network_id == nic.network_id]
if not nics:
ctx.logger.debug('could not find nic by node_id {0} and network_id {1}'
.format(node.id, network_id))
return None
return nics[0]
def get_resource_id(ctx, type_name):
if ctx.node.properties['resource_id']:
return ctx.node.properties['resource_id']
return "{0}_{1}_{2}".format(type_name, ctx.deployment.id, ctx.instance.id)
def get_location(cloud_driver, location_name):
locations = [location for location in cloud_driver
.list_locations() if location.name == location_name]
if locations.__len__() == 0:
raise NonRecoverableError("Zone/Location: {0} cannot be found!"
.format(location_name))
return locations[0]
def get_cloudstack_ids_of_connected_nodes_by_cloudstack_type(ctx, type_name):
type_caps = [caps for caps in ctx.capabilities.get_all().values() if
caps.get(CLOUDSTACK_TYPE_PROPERTY) == type_name]
return [cap[CLOUDSTACK_ID_PROPERTY] for cap in type_caps]
def delete_runtime_properties(ctx, runtime_properties_keys):
for runtime_prop_key in runtime_properties_keys:
if runtime_prop_key in ctx.instance.runtime_properties:
del ctx.instance.runtime_properties[runtime_prop_key]
|
Python
| 0.000009
|
@@ -2997,28 +2997,22 @@
%0A if
-secret_probe
+config
is None
|
80a1912ce69fd356d6c54bb00f946fbc7874a9ce
|
Allow multiple alarms for same metric type
|
bluecanary/set_cloudwatch_alarm.py
|
bluecanary/set_cloudwatch_alarm.py
|
import boto3
from bluecanary.exceptions import NamespaceError
from bluecanary.utilities import throttle
@throttle()
def set_cloudwatch_alarm(identifier, **kwargs):
if not kwargs.get('Dimensions'):
kwargs['Dimensions'] = _get_dimensions(identifier, **kwargs)
if not kwargs.get('AlarmName'):
kwargs['AlarmName'] = '{}_{}'.format(identifier, kwargs.get('MetricName'))
cloudwatch_client = boto3.client('cloudwatch')
return cloudwatch_client.put_metric_alarm(**kwargs)
def _get_dimensions(identifier, **kwargs):
base_dimensions = {
'AWS/ELB': [{u'Name': 'LoadBalancerName', u'Value': identifier}],
'AWS/EC2': [{u'Name': 'InstanceId', u'Value': identifier}],
}
try:
return base_dimensions[kwargs.get('Namespace')]
except KeyError:
message = ('Namespace "{}" is not supported by Blue Canary. '
'If you are using a plugin that supports this Namespace '
'please ensure that the plugin alarm class does not return '
'None when calling the "get_dimensions" method.'
.format(kwargs.get('Namespace')))
raise NamespaceError(message)
|
Python
| 0.00001
|
@@ -363,33 +363,308 @@
ier,
- kwargs.get('MetricName')
+%0A kwargs.get('MetricName'))%0A%0A if kwargs.get('AlarmNameModifier'):%0A kwargs%5B'AlarmName'%5D = '%7B%7D_%7B%7D'.format(kwargs.get('AlarmName'),%0A kwargs.get('AlarmNameModifier'))%0A del(kwargs%5B'AlarmNameModifier'%5D
)%0A%0A
|
ecc5f2a1e1638bf2a97b1a41a22c67802d077fc0
|
fix monogo-insert can not get collection if it is not exist.
|
boliau/plugins/mongo/missionlib.py
|
boliau/plugins/mongo/missionlib.py
|
#!/usr/bin/env python
# -*- encoding=utf8 -*-
#
# File: missionlib.py
#
# Copyright (C) 2013 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <ossug.hychen@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pymongo
from boliau import missionlib
class MongoDatabase(object):
conn = None
def connect(self):
if not self.conn:
self.conn = pymongo.Connection()
def get(self, dbname, collectionname):
self.connect()
return self.conn[dbname][collectionname]
class StartMongoMission(object):
def __init__(self):
self.client = MongoDatabase()
self.acc = missionlib.Mission(self.client)
class MongoStreamMissoin(missionlib.StreamMission):
def __init__(self):
self.client = MongoDatabase()
super(MongoStreamMissoin, self).__init__()
class Find(StartMongoMission):
desc = """Querying for More Than One Document
"""
epilog = """
Type: None -> pymongo.cursor.Cursor
"""
def __call__(self, **opts):
dbname = opts.pop('db')
collectionname = opts.pop('collection')
self.acc.add_task('mongo find',
self.maintask,
dbname,
collectionname,
**opts)
return self.acc
def maintask(client, dbname, collectionname, **opts):
collection = client.get(dbname, collectionname)
return collection.find(opts)
class Insert(MongoStreamMissoin):
desc = """Insert a doucment to Mongo DB. """
epilog = """
Type: dict -> None
"""
def __call__(self, acc, **opts):
dbname = opts['db']
collection_name = opts['collection']
self.client.connect()
collection = self.client[dbname][collection_name]
return collection.insert(acc())
|
Python
| 0
|
@@ -1483,30 +1483,28 @@
t()%0A
-return
+db =
self.conn%5Bd
@@ -1513,24 +1513,74 @@
ame%5D
-%5Bcollectionname%5D
+%0A collection = db%5Bcollectionname%5D%0A return collection
%0A%0Acl
@@ -2823,17 +2823,21 @@
ient
-%5B
+.get(
dbname
-%5D%5B
+,
coll
@@ -2843,34 +2843,73 @@
lection_name
-%5D%0A
+)%0A data = acc()%0A if data:%0A
return colle
@@ -2888,32 +2888,35 @@
data:%0A
+
return collectio
@@ -2931,8 +2931,55 @@
(acc())%0A
+ return %22Can not insert. data is None.%22%0A
|
0bef41a9df057fe37b58ac68f77f4cff7702b360
|
remove dead code
|
cmsplugin_cascade/bootstrap4/mixins.py
|
cmsplugin_cascade/bootstrap4/mixins.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import MediaDefiningClass, widgets
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.bootstrap4.grid import Breakpoint
@python_2_unicode_compatible
class BootstrapUtilitiesMixin(six.with_metaclass(MediaDefiningClass)):
"""
If a Cascade plugin is listed in ``settings.CMSPLUGIN_CASCADE['plugins_with_extra_mixins']``,
then this ``BootstrapUtilsMixin`` class is added automatically to its plugin class in order to
enrich it with utility classes offered by Bootstrap-4.
"""
def __str__(self):
return self.plugin_class.get_identifier(self)
def Xget_form(self, request, obj=None, **kwargs):
glossary_fields = list(kwargs.pop('glossary_fields', self.glossary_fields))
choices = [(None, _("Select CSS"))]
choices.extend([(clsname, clsname) for clsname in ['a', 'b', 'c']])
widget = widgets.Select(choices=choices)
glossary_fields.append(GlossaryField(
widget,
label=_("Customized CSS Classes"),
name='extra_css_classes',
help_text=_("Customized CSS classes to be added to this element.")
))
kwargs.update(glossary_fields=glossary_fields)
return super(BootstrapUtilitiesMixin, self).get_form(request, obj, **kwargs)
@classmethod
def get_css_classes(cls, obj):
"""Enrich list of CSS classes with customized ones"""
css_classes = super(BootstrapUtilitiesMixin, cls).get_css_classes(obj)
for utility_field_name in cls.utility_field_names:
css_classes.append(obj.glossary.get(utility_field_name))
return css_classes
class BootstrapUtilities(type):
"""
Factory for building a class ``BootstrapUtilitiesMixin``. This class then is used as a mixin to
all sorts of Bootstrap-4 plugins. Various Bootstrap-4 plugins are shipped using this mixin class
in different configurations. These configurations can be overridden through the project's
settings using:
```
CMSPLUGIN_CASCADE['plugins_with_extra_mixins'] = {'Bootstrap<ANY>Plugin': BootstrapUtilities(
BootstrapUtilities.background_and_color,
BootstrapUtilities.margins,
BootstrapUtilities.paddings,
…
)
```
or similar.
The class ``BootstrapUtilities`` offers a bunch of property methods which return a list of
input fields and/or select boxes. They then can be added to the plugin's editor. This is
specially useful to add CSS classes from the utilities section of Bootstrap-4, such as
margins, borders, colors, etc.
"""
def __new__(cls, *args):
glossary_fields = []
for arg in args:
if isinstance(arg, property):
arg = arg.fget(cls)
if isinstance(arg, (list, tuple)):
glossary_fields.extend([gf for gf in arg if isinstance(gf, GlossaryField)])
elif isinstance(arg, GlossaryField):
glossary_fields.append(arg)
attrs = {'glossary_fields': glossary_fields, 'utility_field_names': [gf.name for gf in glossary_fields]}
return type(BootstrapUtilitiesMixin.__name__, (BootstrapUtilitiesMixin,), attrs)
@property
def background_and_color(cls):
choices = [
('', _("Default")),
('bg-primary text-white', _("Primary with white text")),
('bg-secondary text-white', _("Secondary with white text")),
('bg-success text-white', _("Success with white text")),
('bg-danger text-white', _("Danger with white text")),
('bg-warning text-white', _("Warning with white text")),
('bg-info text-white', _("Info with white text")),
('bg-light text-dark', _("Light with dark text")),
('bg-dark text-white', _("Dark with white text")),
('bg-white text-dark', _("White with dark text")),
('bg-transparent text-dark', _("Transparent with dark text")),
('bg-transparent text-white', _("Transparent with white text")),
]
return GlossaryField(
widgets.Select(choices=choices),
label=_("Background and color"),
name='background_and_color',
initial=''
)
@property
def margins(cls):
glossary_fields = []
choices_format = [
('m-{}{}', _("4 sided margins ({})")),
('mx-{}{}', _("Horizontal margins ({})")),
('my-{}{}', _("Vertical margins ({})")),
('mt-{}{}', _("Top margin ({})")),
('mr-{}{}', _("Right margin ({})")),
('mb-{}{}', _("Bottom margin ({})")),
('ml-{}{}', _("Left margin ({})")),
]
sizes = list(range(0, 6)) + ['auto']
for bp in Breakpoint.range(Breakpoint.xs, Breakpoint.xl):
if bp == Breakpoint.xs:
choices = [(c.format('', s), l.format('{}'.format(s))) for c, l in choices_format for s in sizes]
choices.insert(0, ('', _("No Margins")))
else:
choices = [(c.format(bp.name + '-', s), l.format('{}'.format(s)))
for c, l in choices_format for s in sizes]
choices.insert(0, ('', _("Inherit from above")))
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Margins for {breakpoint}").format(breakpoint=bp.label),
name='margins_{}'.format(bp.name),
initial=''
))
return glossary_fields
@property
def paddings(cls):
glossary_fields = []
choices_format = [
('p-{}{}', _("4 sided padding ({})")),
('px-{}{}', _("Horizontal padding ({})")),
('py-{}{}', _("Vertical padding ({})")),
('pt-{}{}', _("Top padding ({})")),
('pr-{}{}', _("Right padding ({})")),
('pb-{}{}', _("Bottom padding ({})")),
('pl-{}{}', _("Left padding ({})")),
]
sizes = range(0, 6)
for bp in Breakpoint.range(Breakpoint.xs, Breakpoint.xl):
if bp == Breakpoint.xs:
choices = [(c.format('', s), l.format('{}'.format(s))) for c, l in choices_format for s in sizes]
choices.insert(0, ('', _("No Padding")))
else:
choices = [(c.format(bp.name + '-', s), l.format('{}rem'.format(s)))
for c, l in choices_format for s in sizes]
choices.insert(0, ('', _("Inherit from above")))
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Padding for {breakpoint}").format(breakpoint=bp.label),
name='padding_{}'.format(bp.name),
initial=''
))
return glossary_fields
|
Python
| 0.999454
|
@@ -822,698 +822,8 @@
f)%0A%0A
- def Xget_form(self, request, obj=None, **kwargs):%0A glossary_fields = list(kwargs.pop('glossary_fields', self.glossary_fields))%0A choices = %5B(None, _(%22Select CSS%22))%5D%0A choices.extend(%5B(clsname, clsname) for clsname in %5B'a', 'b', 'c'%5D%5D)%0A widget = widgets.Select(choices=choices)%0A glossary_fields.append(GlossaryField(%0A widget,%0A label=_(%22Customized CSS Classes%22),%0A name='extra_css_classes',%0A help_text=_(%22Customized CSS classes to be added to this element.%22)%0A ))%0A%0A kwargs.update(glossary_fields=glossary_fields)%0A return super(BootstrapUtilitiesMixin, self).get_form(request, obj, **kwargs)%0A%0A
|
b514f1c20e479732a0e636bb5fb484abb9f34662
|
fix test that fails certain times of day
|
springboard_iogt/tests/test_filters.py
|
springboard_iogt/tests/test_filters.py
|
from operator import attrgetter
from datetime import datetime, timedelta
from mock import Mock
from pyramid import testing
from springboard.tests import SpringboardTestCase
from springboard.views.base import SpringboardViews
from springboard_iogt import filters
class TestFilters(SpringboardTestCase):
maxDiff = None
def setUp(self):
self.workspace = self.mk_workspace()
self.config = testing.setUp(settings={
'unicore.repos_dir': self.working_dir,
'unicore.content_repo_urls': self.workspace.working_dir,
})
def tearDown(self):
testing.tearDown()
def test_category_dict(self):
categories = self.mk_categories(self.workspace, count=3)
uuids = [category.uuid for category in categories]
views = SpringboardViews(self.mk_request())
cat_dict = filters.category_dict(views.all_categories, uuids + [None])
self.assertEqual(set(cat_dict.keys()), set(uuids))
self.assertEqual(sorted(cat_dict.values(), key=attrgetter('uuid')),
sorted(categories, key=attrgetter('uuid')))
def test_recent_pages(self):
workspaces = [self.workspace,
self.mk_workspace(name='test_recent_pages-2')]
testing.setUp(settings={
'unicore.repos_dir': self.working_dir,
'unicore.content_repo_urls':
'\n'.join(ws.working_dir for ws in workspaces)
})
views = SpringboardViews(self.mk_request())
now = datetime.utcnow()
def set_created_at(workspace, page, i):
page = page.update({'created_at': (
datetime.utcnow() - timedelta(hours=i)).isoformat()
})
workspace.save(page, 'Updated page')
return page
pages_ws1 = self.mk_pages(workspaces[0], count=3, featured=True)
pages_ws2 = self.mk_pages(workspaces[1], count=3, featured=True)
for i, h in enumerate((0, 1, 5)):
pages_ws1[i] = set_created_at(workspaces[0], pages_ws1[i], i=h)
for i, h in enumerate((2, 3, 4)):
pages_ws2[i] = set_created_at(workspaces[1], pages_ws2[i], i=h)
workspaces[0].refresh_index()
workspaces[1].refresh_index()
result = filters.recent_pages(views.all_pages, 'eng_GB', dt=now)
result2 = filters.recent_pages(views.all_pages, 'eng_GB', dt=now)
self.assertEqual(result, result2)
self.assertEqual(
sorted(result, key=attrgetter('uuid')),
sorted(pages_ws1 + [pages_ws2[0]], key=attrgetter('uuid')))
result2 = filters.recent_pages(views.all_pages, 'eng_GB',
dt=now + timedelta(hours=1))
self.assertNotEqual(result, result2)
self.assertEqual(sorted(result, key=attrgetter('uuid')),
sorted(result2, key=attrgetter('uuid')))
def test_content_section(self):
[page] = self.mk_pages(self.workspace, count=1)
section_obj = filters.content_section(page)
self.assertIs(section_obj, None)
page.es_meta = Mock(index='unicore-cms-content-ffl-za-qa')
section_obj = filters.content_section(page)
self.assertEqual(section_obj.slug, 'ffl')
self.assertEqual(section_obj.title, 'Facts for Life')
|
Python
| 0.000026
|
@@ -1534,16 +1534,32 @@
utcnow()
+.replace(hour=2)
%0A%0A
|
5c1576ae50f18d5ff96a3d564df120465b060e04
|
add tab loop to bundle readme view (#255)
|
conjure/ui/views/bundle_readme_view.py
|
conjure/ui/views/bundle_readme_view.py
|
""" Service Walkthrough view
List out the updated bundle in a cleaner view showing what
charms and their relations will be done.
"""
from glob import glob
import os
from urwid import BoxAdapter, Filler, ListBox, Pile, Text, WidgetWrap
from conjure import utils
from conjure.app_config import app
from ubuntui.ev import EventLoop
from ubuntui.widgets.buttons import PlainButton
from ubuntui.widgets.hr import HR
from ubuntui.utils import Color, Padding
import logging
log = logging.getLogger('conjure')
class BundleReadmeView(WidgetWrap):
def __init__(self, metadata_controller, done_callback, initial_height):
self.metadata_controller = metadata_controller
self.done_callback = done_callback
self.initial_height = initial_height
w = self.build_widgets()
super().__init__(w)
self.pile.focus_position = 1
def selectable(self):
return True
def build_widgets(self):
readme_files = glob(os.path.join(app.config['spell-dir'], 'README.*'))
if len(readme_files) == 0:
self.readme_w = Text("No README found for bundle.")
else:
readme_file = readme_files[0]
if len(readme_files) != 1:
utils.warning("Unexpected: {} files matching README.*"
"- using {}".format(len(readme_files),
readme_file))
with open(readme_file) as rf:
rlines = [Text(l) for l in rf.readlines()]
self.readme_w = BoxAdapter(ListBox(rlines),
self.initial_height)
ws = [Text("About {}:".format(app.config['spell'])),
Padding.right_50(Color.button_primary(
PlainButton("Continue",
self.do_continue),
focus_map='button_primary focus')),
Padding.center(HR()),
Padding.center(self.readme_w, left=2),
Padding.center(HR()),
Padding.center(Text("Use arrow keys to scroll text."))]
self.pile = Pile(ws)
return Padding.center_90(Filler(self.pile, valign="top"))
def handle_readme_updated(self, readme_text_f):
EventLoop.loop.event_loop._loop.call_soon_threadsafe(
self._update_readme_on_main_thread,
readme_text_f)
def _update_readme_on_main_thread(self, readme_text_f):
self.readme_w.set_text(readme_text_f.result().splitlines())
self._invalidate()
def do_continue(self, arg):
self.done_callback()
|
Python
| 0
|
@@ -905,16 +905,263 @@
n True%0A%0A
+ def keypress(self, size, key):%0A if key == 'tab':%0A cur = self.pile.focus_position%0A self.pile.focus_position = 3 if cur == 1 else 1%0A else:%0A return super(BundleReadmeView, self).keypress(size, key)%0A%0A
def
@@ -2332,16 +2332,82 @@
oll text
+ %22%0A %22and TAB to select the button
.%22))%5D%0A%0A
|
5f1e539f5c7a49670c7dee6737595027fba22ad1
|
Remove unused variable
|
cinder/db/migration.py
|
cinder/db/migration.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
import os
from alembic import command as alembic_api
from alembic import config as alembic_config
from alembic import migration as alembic_migration
from migrate import exceptions as migrate_exceptions
from migrate.versioning import api as migrate_api
from migrate.versioning import repository as migrate_repo
from oslo_config import cfg
from oslo_db import options
from oslo_log import log as logging
from cinder.db.sqlalchemy import api as db_api
options.set_defaults(cfg.CONF)
LOG = logging.getLogger(__name__)
MIGRATE_INIT_VERSION = 134
MIGRATE_MIGRATIONS_PATH = ALEMBIC_INIT_VERSION = '921e1a36b076'
def _find_migrate_repo():
"""Get the project's change script repository
:returns: An instance of ``migrate.versioning.repository.Repository``
"""
path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'legacy_migrations',
)
return migrate_repo.Repository(path)
def _find_alembic_conf():
"""Get the project's alembic configuration
:returns: An instance of ``alembic.config.Config``
"""
path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'alembic.ini')
config = alembic_config.Config(os.path.abspath(path))
# we don't want to use the logger configuration from the file, which is
# only really intended for the CLI
# https://stackoverflow.com/a/42691781/613428
config.attributes['configure_logger'] = False
return config
def _is_database_under_migrate_control(engine, repository):
try:
migrate_api.db_version(engine, repository)
return True
except migrate_exceptions.DatabaseNotControlledError:
return False
def _is_database_under_alembic_control(engine):
with engine.connect() as conn:
context = alembic_migration.MigrationContext.configure(conn)
return bool(context.get_current_revision())
def _init_alembic_on_legacy_database(engine, repository, config):
"""Init alembic in an existing environment with sqlalchemy-migrate."""
LOG.info(
'The database is still under sqlalchemy-migrate control; '
'applying any remaining sqlalchemy-migrate-based migrations '
'and fake applying the initial alembic migration'
)
migrate_api.upgrade(engine, repository)
# re-use the connection rather than creating a new one
with engine.begin() as connection:
config.attributes['connection'] = connection
alembic_api.stamp(config, ALEMBIC_INIT_VERSION)
def _upgrade_alembic(engine, config, version):
# re-use the connection rather than creating a new one
with engine.begin() as connection:
config.attributes['connection'] = connection
alembic_api.upgrade(config, version or 'head')
def db_version():
"""Get database version."""
repository = _find_migrate_repo()
engine = db_api.get_engine()
migrate_version = None
if _is_database_under_migrate_control(engine, repository):
migrate_version = migrate_api.db_version(engine, repository)
alembic_version = None
if _is_database_under_alembic_control(engine):
with engine.connect() as conn:
m_context = alembic_migration.MigrationContext.configure(conn)
alembic_version = m_context.get_current_revision()
return alembic_version or migrate_version
def db_sync(version=None, engine=None):
"""Migrate the database to `version` or the most recent version.
We're currently straddling two migration systems, sqlalchemy-migrate and
alembic. This handles both by ensuring we switch from one to the other at
the appropriate moment.
"""
# if the user requested a specific version, check if it's an integer: if
# so, we're almost certainly in sqlalchemy-migrate land and won't support
# that
if version is not None and version.isdigit():
raise ValueError(
'You requested an sqlalchemy-migrate database version; this is '
'no longer supported'
)
if engine is None:
engine = db_api.get_engine()
repository = _find_migrate_repo()
config = _find_alembic_conf()
# discard the URL encoded in alembic.ini in favour of the URL configured
# for the engine by the database fixtures, casting from
# 'sqlalchemy.engine.url.URL' to str in the process. This returns a
# RFC-1738 quoted URL, which means that a password like "foo@" will be
# turned into "foo%40". This in turns causes a problem for
# set_main_option() because that uses ConfigParser.set, which (by design)
# uses *python* interpolation to write the string out ... where "%" is the
# special python interpolation character! Avoid this mismatch by quoting
# all %'s for the set below.
engine_url = str(engine.url).replace('%', '%%')
config.set_main_option('sqlalchemy.url', str(engine_url))
# if we're in a deployment where sqlalchemy-migrate is already present,
# then apply all the updates for that and fake apply the initial alembic
# migration; if we're not then 'upgrade' will take care of everything
# this should be a one-time operation
if (
_is_database_under_migrate_control(engine, repository) and
not _is_database_under_alembic_control(engine)
):
_init_alembic_on_legacy_database(engine, repository, config)
# apply anything later
LOG.info('Applying migration(s)')
_upgrade_alembic(engine, config, version)
LOG.info('Migration(s) applied')
|
Python
| 0.000002
|
@@ -1321,34 +1321,8 @@
134%0A
-MIGRATE_MIGRATIONS_PATH =
ALEM
|
67d0388102c2bbf7abff17a23979bbfd02940ee1
|
fix gmock/gtest installation
|
build/fbcode_builder/specs/gmock.py
|
build/fbcode_builder/specs/gmock.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def fbcode_builder_spec(builder):
return {
'steps': [
# google mock also provides the gtest libraries
builder.github_project_workdir('google/googletest', 'googlemock/build'),
builder.cmake_install('google/googletest'),
],
}
|
Python
| 0.000005
|
@@ -208,95 +208,138 @@
-return %7B%0A 'steps': %5B%0A # google mock also provides the gtest libraries
+builder.add_option(%0A 'google/googletest:cmake_defines',%0A %7B'BUILD_GTEST': 'ON'%7D%0A )%0A return %7B%0A 'steps': %5B
%0A
@@ -404,19 +404,8 @@
', '
-googlemock/
buil
|
e65dd84adb878d073383bb681e4a38c8b94444e4
|
Fix from_torch paths
|
src/models/from_torch.py
|
src/models/from_torch.py
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import pickle as pkl
import torchfile
CONV_TRANSPOSE = (2, 3, 1, 0)
def from_torch(torch_model):
def expand_module(module):
if 'weight' in module._obj or b'weight' in module._obj:
return [module]
if 'modules' in module._obj or b'modules' in module._obj:
# return module._obj[b'modules']
lst = [expand_module(submodule) for submodule in module._obj[b'modules']]
return [sublist for item in lst for sublist in item]
return [None]
enet = torchfile.load(filename=torch_model)
all_enet_modules = [module for module in expand_module(enet) if module is not None]
all_enet_modules = [module for module in all_enet_modules if b'weight' in module._obj]
weights = []
# for module in all_enet_modules:
for module in all_enet_modules:
item = {}
if module.torch_typename() == b'cudnn.SpatialConvolution':
item['weight'] = module[b'weight']
if b'bias' in module._obj:
item['bias'] = module[b'bias']
elif module.torch_typename() == b'nn.SpatialBatchNormalization':
item = {
'gamma': module[b'weight'],
'beta': module[b'bias'],
'moving_mean': module[b'running_mean'],
'moving_variance': module[b'running_var'],
}
elif module.torch_typename() == b'nn.PReLU':
weight = np.expand_dims(np.expand_dims(module[b'weight'], 0), 0)
item['weight'] = weight
elif module.torch_typename() == b'nn.SpatialDilatedConvolution':
item['weight'] = module[b'weight']
if b'bias' in module._obj:
item['bias'] = module[b'bias']
elif module.torch_typename() == b'nn.SpatialFullConvolution':
item['weight'] = module[b'weight']
if b'bias' in module._obj:
item['bias'] = module[b'bias']
else:
print('Unhandled torch layer: {}'.format(module.torch_typename()))
item['torch_typename'] = module.torch_typename().decode()
if 'Convolution' in item['torch_typename']:
item['weight'] = np.transpose(item['weight'], CONV_TRANSPOSE)
weights.append(item)
return weights
if __name__ == "__main__":
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
torch_model = os.path.join(DIR_PATH, os.pardir, os.pardir, 'models', 'pretrained', 'model-best.net')
weights = from_torch(torch_model=torch_model)
# weights = [module['weight'] for module in all_enet_modules]
with open('../models/pretrained/torch_enet.pkl', 'wb') as fout:
pkl.dump(obj=weights, file=fout)
|
Python
| 0.000007
|
@@ -2520,18 +2520,8 @@
dir,
- 'models',
'pr
@@ -2683,18 +2683,69 @@
pen(
-'../models
+os.path.join(DIR_PATH, os.pardir, os.pardir, 'pretrained', '.
/pre
@@ -2767,16 +2767,17 @@
net.pkl'
+)
, 'wb')
|
03b2337c9786975de242a2fad4f400a4fc99de1c
|
Add aspect ratio setting
|
notes-processor.py
|
notes-processor.py
|
#!/usr/bin/env python
# Python script to automatically convert poor-quality
# photos of paper with writing on them into duotone images.
import sys
import cv2
import numpy as np
import os.path
import multiprocessing
from itertools import starmap
NBHD_SIZE = 19
UNSHARP_T = 48
ADAPT_T = 24
INVERT = False
def processImage(fname):
print "Processing %s" % fname
source = cv2.imread(fname,cv2.CV_LOAD_IMAGE_GRAYSCALE)
if INVERT:
source = 255 - source
return bitone(warpSheet(source))
def bitone(image):
'''
Convert a greyscale image to a bitone image,
in such a way that we preserve as much detail as possible,
and have the least amount of speckles.
'''
# First, sharpen the image: unsharp mask w/ threshold.
blur = cv2.blur(image,(NBHD_SIZE,NBHD_SIZE))
diff = cv2.absdiff(image,blur)
# Apparently OpenCV doesn't have a way to
# apply a mask to a weighted sum, so we do it ourselves.
_,mask = cv2.threshold(blur,UNSHARP_T,1,cv2.THRESH_BINARY)
blur = cv2.multiply(blur,mask)
sharpened = cv2.addWeighted(image,2,blur,-1,0)
cv2.imwrite('sharp.png',sharpened)
# Now threshold the sharpened image.
thresh = cv2.adaptiveThreshold(sharpened, 255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
NBHD_SIZE, ADAPT_T)
return thresh
def findPaper(image):
'''
Try to find a sheet of paper contained in the image.
Return the contour or raise ValueError if none found.
'''
squares = []
# Blur image to emphasize bigger features.
blur = cv2.blur(image,(2,2))
retval, edges = cv2.threshold(blur,0,255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(edges,
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
clen = cv2.arcLength(c,True)
c = cv2.approxPolyDP(c,0.02*clen,True)
area = abs(cv2.contourArea(c))
if len(c) == 4 and \
0.1*edges.size <= area <= 0.9*edges.size and \
cv2.isContourConvex(c):
squares.append(c)
return max(squares,key=lambda s: cv2.arcLength(s,True))
def warpSheet(image):
'''
Automatically crops an image to paper size if possible.
'''
try:
sheet = findPaper(image)
except ValueError:
return image
h, w = image.shape
src = sheet[::,0,::].astype('float32')
# Compute distances from topleft corner (0,0)
# to find topleft and bottomright
d = np.sum(np.abs(src)**2,axis=-1)**0.5
t_l = np.argmin(d)
b_r = np.argmax(d)
# Compute distances from topright corner (w,0)
# to find topright and bottomleft
y = np.array([[w,0],]*4)
d = np.sum(np.abs(src-y)**2,axis=-1)**0.5
t_r = np.argmin(d)
b_l = np.argmax(d)
#Now assemble these together
if h >= w:
destH, destW = h, int(h*8.5/11.0)
else:
destW, destH = h, int(h*8.5/11.0)
dest = np.zeros(src.shape,dtype='float32')
dest[t_l] = np.array([0,0])
dest[t_r] = np.array([destW,0])
dest[b_l] = np.array([0,destH])
dest[b_r] = np.array([destW,destH])
transform = cv2.getPerspectiveTransform(src,dest)
return cv2.warpPerspective(image,transform,(destW,destH))
def rename(originalName):
d,f = os.path.split(originalName)
f,ext = os.path.splitext(f)
return os.path.join(d,'p_%s.png' %f)
if __name__ == "__main__":
if len(sys.argv) == 1:
print "Usage: notes-processor.py [-i] files"
print "-i inverts images."
else:
if sys.argv[1] == "-i":
INVERT = True
files = sys.argv[2:]
else:
files = sys.argv[1:]
pool = multiprocessing.Pool()
processed = pool.map(processImage,files)
newnames = map(rename,files)
for n,i in zip(newnames,processed):
cv2.imwrite(n,i)
|
Python
| 0
|
@@ -301,16 +301,34 @@
= False
+%0AASPECT = 8.5/11.0
%0A%0Adef pr
@@ -3022,32 +3022,30 @@
= h, int(h*
-8.5/11.0
+ASPECT
)%0A else:%0A
@@ -3076,24 +3076,22 @@
, int(h*
-8.5/11.0
+ASPECT
)%0A de
|
f5d419c95ea86719afff9924dbd837f574c216ae
|
Revert "Restore proper default color for Toolbar"
|
kivymd/toolbar.py
|
kivymd/toolbar.py
|
# -*- coding: utf-8 -*-
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ListProperty, StringProperty, OptionProperty
from kivy.uix.boxlayout import BoxLayout
from kivymd.backgroundcolorbehavior import BackgroundColorBehavior
from kivymd.button import MDIconButton
from kivymd.theming import ThemableBehavior
from kivymd.elevationbehavior import ElevationBehavior
Builder.load_string('''
#:import m_res kivymd.material_resources
<Toolbar>
size_hint_y: None
height: root.theme_cls.standard_increment
background_color: root.background_color
padding: [root.theme_cls.horizontal_margins - dp(12), 0]
opposite_colors: True
elevation: 6
BoxLayout:
id: left_actions
orientation: 'horizontal'
size_hint_x: None
padding: [0, (self.height - dp(48))/2]
BoxLayout:
padding: dp(12), 0
MDLabel:
font_style: 'Title'
opposite_colors: root.opposite_colors
theme_text_color: root.title_theme_color
text_color: root.title_color
text: root.title
shorten: True
shorten_from: 'right'
BoxLayout:
id: right_actions
orientation: 'horizontal'
size_hint_x: None
padding: [0, (self.height - dp(48))/2]
''')
class Toolbar(ThemableBehavior, ElevationBehavior, BackgroundColorBehavior,
BoxLayout):
left_action_items = ListProperty()
"""The icons on the left of the Toolbar.
To add one, append a list like the following:
['icon_name', callback]
where 'icon_name' is a string that corresponds to an icon definition and
callback is the function called on a touch release event.
"""
right_action_items = ListProperty()
"""The icons on the left of the Toolbar.
Works the same way as :attr:`left_action_items`
"""
title = StringProperty()
"""The text displayed on the Toolbar."""
title_theme_color = OptionProperty(None, allownone=True,
options=['Primary', 'Secondary', 'Hint',
'Error', 'Custom'])
title_color = ListProperty(None, allownone=True)
background_color = ListProperty(None, allownone=True)
def __init__(self, **kwargs):
super(Toolbar, self).__init__(**kwargs)
if self.background_color == []:
self.background_color = self.theme_cls.primary_color
Clock.schedule_once(
lambda x: self.on_left_action_items(0, self.left_action_items))
Clock.schedule_once(
lambda x: self.on_right_action_items(0,
self.right_action_items))
def on_left_action_items(self, instance, value):
self.update_action_bar(self.ids['left_actions'], value)
def on_right_action_items(self, instance, value):
self.update_action_bar(self.ids['right_actions'], value)
def update_action_bar(self, action_bar, action_bar_items):
action_bar.clear_widgets()
new_width = 0
for item in action_bar_items:
new_width += dp(48)
action_bar.add_widget(MDIconButton(icon=item[0],
on_release=item[1],
opposite_colors=True,
text_color=self.title_color,
theme_text_color=self.title_theme_color))
action_bar.width = new_width
|
Python
| 0
|
@@ -2283,36 +2283,28 @@
roperty(
-None, allownone=True
+%5B0, 0, 0, 1%5D
)%0A%0A d
@@ -2384,113 +2384,8 @@
gs)%0A
- if self.background_color == %5B%5D:%0A self.background_color = self.theme_cls.primary_color%0A
|
a84ccc871b5f85f80844d1cd413ddbf44194da17
|
Add version information to documentation
|
doc/conf.py
|
doc/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ibei'
copyright = '2022, Joshua Ryan Smith'
author = 'Joshua Ryan Smith'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex'
]
# Configuration for `autodoc`.
autodoc_member_order = "bysource"
# Configuration for `sphinxcontrib-bibtex`.
bibtex_bibfiles = ['bib.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
Python
| 0
|
@@ -1,24 +1,53 @@
+# coding=utf-8%0Aimport ibei%0A%0A%0A
# Configuration file for
@@ -809,16 +809,43 @@
Smith'%0A
+version = ibei.__version__%0A
%0A%0A# -- G
|
6ffd01cd9578d008d757da86755ca1c3927bf910
|
remove default JSONEncoder
|
backend/geonature/app.py
|
backend/geonature/app.py
|
"""
Démarrage de l'application
"""
import logging
from json import JSONEncoder
from flask import Flask
from flask_mail import Message
from flask_cors import CORS
from sqlalchemy import exc as sa_exc
from flask_sqlalchemy import before_models_committed
from pkg_resources import iter_entry_points
from geonature.utils.config import config
from geonature.utils.env import MAIL, DB, MA, migrate
from geonature.utils.logs import config_loggers
from geonature.utils.module import import_backend_enabled_modules
@migrate.configure
def configure_alembic(alembic_config):
"""
This function add to the 'version_locations' parameter of the alembic config the
'migrations' entry point value of the 'gn_module' group for all modules having such entry point.
Thus, alembic will find migrations of all installed geonature modules.
"""
version_locations = alembic_config.get_main_option('version_locations', default='').split()
for entry_point in iter_entry_points('gn_module', 'migrations'):
# TODO: define enabled module in configuration (skip disabled module, raise error on missing module)
_, migrations = str(entry_point).split('=', 1)
version_locations += [ migrations.strip() ]
alembic_config.set_main_option('version_locations', ' '.join(version_locations))
return alembic_config
if config.get('SENTRY_DSN'):
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
config['SENTRY_DSN'],
integrations=[FlaskIntegration()],
traces_sample_rate=1.0,
)
def create_app(with_external_mods=True, with_flask_admin=True):
app = Flask(__name__, static_folder="../static")
app.config.update(config)
# change default flask JSONEncoder for perf reaseon and because "as_dict" already manage non JSON Types
app.json_encoder = JSONEncoder
# Bind app to DB
DB.init_app(app)
migrate.init_app(app, DB)
MAIL.init_app(app)
# For deleting files on "delete" media
@before_models_committed.connect_via(app)
def on_before_models_committed(sender, changes):
for obj, change in changes:
if change == "delete" and hasattr(obj, "__before_commit_delete__"):
obj.__before_commit_delete__()
# Bind app to MA
MA.init_app(app)
# Pass parameters to the usershub authenfication sub-module, DONT CHANGE THIS
app.config["DB"] = DB
# Pass parameters to the submodules
app.config["MA"] = MA
# Pass the ID_APP to the submodule to avoid token conflict between app on the same server
app.config["ID_APP"] = app.config["ID_APPLICATION_GEONATURE"]
# set logging config
config_loggers(app.config)
if with_flask_admin:
from geonature.core.admin.admin import admin
admin.init_app(app)
with app.app_context():
from pypnusershub.routes import routes
app.register_blueprint(routes, url_prefix="/auth")
from pypn_habref_api.routes import routes
app.register_blueprint(routes, url_prefix="/habref")
from pypnusershub import routes_register
app.register_blueprint(routes_register.bp, url_prefix="/pypn/register")
from pypnnomenclature.routes import routes
app.register_blueprint(routes, url_prefix="/nomenclatures")
from geonature.core.gn_commons.routes import routes
app.register_blueprint(routes, url_prefix="/gn_commons")
from geonature.core.gn_permissions.routes import routes
app.register_blueprint(routes, url_prefix="/permissions")
from geonature.core.gn_permissions.backoffice.views import routes
app.register_blueprint(routes, url_prefix="/permissions_backoffice")
from geonature.core.routes import routes
app.register_blueprint(routes, url_prefix="")
from geonature.core.users.routes import routes
app.register_blueprint(routes, url_prefix="/users")
from geonature.core.gn_synthese.routes import routes
app.register_blueprint(routes, url_prefix="/synthese")
from geonature.core.gn_meta.routes import routes
app.register_blueprint(routes, url_prefix="/meta")
from geonature.core.ref_geo.routes import routes
app.register_blueprint(routes, url_prefix="/geo")
from geonature.core.gn_exports.routes import routes
app.register_blueprint(routes, url_prefix="/exports")
from geonature.core.auth.routes import routes
app.register_blueprint(routes, url_prefix="/gn_auth")
from geonature.core.gn_monitoring.routes import routes
app.register_blueprint(routes, url_prefix="/gn_monitoring")
# Errors
from geonature.core.errors import routes
CORS(app, supports_credentials=True)
# Emails configuration
if app.config["MAIL_CONFIG"]:
conf = app.config.copy()
conf.update(app.config["MAIL_CONFIG"])
app.config = conf
MAIL.init_app(app)
app.config['TEMPLATES_AUTO_RELOAD'] = True
# disable cache for downloaded files (PDF file stat for ex)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# Loading third-party modules
if with_external_mods:
for module_object, module_config, module_blueprint in import_backend_enabled_modules():
app.config[module_config['MODULE_CODE']] = module_config
app.register_blueprint(module_blueprint, url_prefix=module_config['MODULE_URL'])
_app = app
return app
|
Python
| 0.000005
|
@@ -49,38 +49,8 @@
ng%0A%0A
-from json import JSONEncoder%0A%0A
from
@@ -1702,151 +1702,8 @@
fig)
-%0A # change default flask JSONEncoder for perf reaseon and because %22as_dict%22 already manage non JSON Types%0A app.json_encoder = JSONEncoder
%0A%0A
|
e3f5900d62f702bc543ada6991648cd98e7ce917
|
add libmagic rule for debian jessie's `file`
|
snoop/content_types.py
|
snoop/content_types.py
|
import mimetypes
import magic
mimetypes.add_type('message/x-emlx', '.emlx')
mimetypes.add_type('message/x-emlxpart', '.emlxpart')
mimetypes.add_type('application/vnd.ms-outlook', '.msg')
mimetypes.add_type('application/x-hoover-pst', '.pst')
mimetypes.add_type('application/x-hoover-pst', '.ost')
mimetypes.add_type('application/x-pgp-encrypted-ascii', '.asc')
mimetypes.add_type('application/x-pgp-encrypted-binary', '.pgp')
def guess_content_type(filename):
return mimetypes.guess_type(filename, strict=False)[0] or ''
FILE_TYPES = {
'application/x-directory': 'folder',
'application/pdf': 'pdf',
'text/plain': 'text',
'text/html': 'html',
'application/x-hush-pgp-encrypted-html-body': 'html',
'message/x-emlx': 'email',
'message/rfc822': 'email',
'application/vnd.ms-outlook': 'email',
'application/x-hoover-pst': 'email-archive',
'application/msword': 'doc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'doc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.template': 'doc',
'application/vnd.ms-word.document.macroEnabled.12': 'doc',
'application/vnd.ms-word.template.macroEnabled.12': 'doc',
'application/vnd.oasis.opendocument.text': 'doc',
'application/vnd.oasis.opendocument.text-template': 'doc',
'application/rtf': 'doc',
'application/vnd.ms-excel': 'xls',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': 'xls',
'application/vnd.openxmlformats-officedocument.spreadsheetml.template': 'xls',
'application/vnd.ms-excel.sheet.macroEnabled.12': 'xls',
'application/vnd.ms-excel.template.macroEnabled.12': 'xls',
'application/vnd.ms-excel.addin.macroEnabled.12': 'xls',
'application/vnd.ms-excel.sheet.binary.macroEnabled.12': 'xls',
'application/vnd.oasis.opendocument.spreadsheet-template': 'xls',
'application/vnd.oasis.opendocument.spreadsheet': 'xls',
'application/vnd.openxmlformats-officedocument.presentationml.presentation': 'ppt',
'application/vnd.openxmlformats-officedocument.presentationml.template': 'ppt',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow': 'ppt',
'application/vnd.ms-powerpoint': 'ppt',
'application/vnd.ms-powerpoint.addin.macroEnabled.12': 'ppt',
'application/vnd.ms-powerpoint.presentation.macroEnabled.12': 'ppt',
'application/vnd.ms-powerpoint.template.macroEnabled.12': 'ppt',
'application/vnd.ms-powerpoint.slideshow.macroEnabled.12': 'ppt',
'application/vnd.oasis.opendocument.presentation': 'ppt',
'application/vnd.oasis.opendocument.presentation-template': 'ppt',
'application/zip': 'archive',
'application/rar': 'archive',
'application/x-7z-compressed': 'archive',
'application/x-tar': 'archive',
'application/x-bzip2': 'archive',
'application/x-zip': 'archive',
'application/x-gzip': 'archive',
'application/x-zip-compressed': 'archive',
'application/x-rar-compressed': 'archive',
}
MAGIC_DESCRIPTION_TYPES = {
"Microsoft Outlook email folder (>=2003)": "application/x-hoover-pst",
}
MAGIC_READ_LIMIT = 24 * 1024 * 1024
def libmagic_guess_content_type(file, filesize):
buffer = file.read(min(MAGIC_READ_LIMIT, filesize))
content_type = magic.from_buffer(buffer, mime=True)
if content_type in FILE_TYPES:
return content_type
magic_description = magic.from_buffer(buffer, mime=False)
return MAGIC_DESCRIPTION_TYPES.get(magic_description, content_type or '')
def guess_filetype(doc):
content_type = doc.content_type.split(';')[0]
if content_type in FILE_TYPES:
return FILE_TYPES[content_type]
else:
supertype = content_type.split('/')[0]
if supertype in ['audio', 'video', 'image']:
return supertype
return None
|
Python
| 0
|
@@ -3115,16 +3115,89 @@
r-pst%22,%0A
+ %22Composite Document File V2 Document%22: %22application/vnd.ms-outlook%22,%0A
%7D%0A%0AMAGIC
|
c2d622e1d800c7d89c20ba224550a1692d5e497f
|
update intersphinx for sqlalchemy
|
doc/conf.py
|
doc/conf.py
|
# -*- coding: utf-8 -*-
import os
import sys
import alagitpull
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../unihan_db/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'releases',
'alagitpull',
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/cihai/unihan-db/issues/%s"
releases_release_uri = "https://github.com/cihai/unihan-db/tree/v%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme_path = [alagitpull.get_path()]
html_favicon = 'favicon.ico'
html_theme = 'alagitpull'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_options = {
'logo': 'img/cihai.svg',
'github_user': 'cihai',
'github_repo': 'unihan-db',
'github_type': 'star',
'github_banner': True,
'projects': alagitpull.projects,
'project_name': 'db',
}
html_theme_path = ['_themes']
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
'sqlalchemy': ('http://sqlalchemy.readthedocs.org/en/latest/', None),
}
|
Python
| 0
|
@@ -2360,16 +2360,21 @@
'http://
+docs.
sqlalche
@@ -2376,28 +2376,16 @@
alchemy.
-readthedocs.
org/en/l
|
94c09cb1442f064496a80be7f49781f640bd3a70
|
Merge [14531] from 1.0-stable
|
sample-plugins/workflow/StatusFixer.py
|
sample-plugins/workflow/StatusFixer.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 Edgewall Software
# Copyright (C) 2007 Eli Carter <retracile@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from genshi.builder import tag
from trac.core import Component, implements
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.perm import IPermissionRequestor
revision = "$Rev$"
url = "$URL$"
class StatusFixerActionController(Component):
"""Provides the admin with a way to correct a ticket's status.
This plugin is especially useful when you made changes to your workflow,
and some ticket status are no longer valid. The tickets that are in those
status can then be set to some valid state.
Don't forget to add `StatusFixerActionController` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,StatusFixerActionController
}}}
"""
implements(ITicketActionController, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_STATUSFIX']
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
actions = []
if 'TICKET_STATUSFIX' in req.perm(ticket.resource):
actions.append((0, 'force_status'))
return actions
def get_all_status(self):
"""Return all the status that are present in the database,
so that queries for status no longer in use can be made.
"""
return [status for status, in
self.env.db_query("SELECT DISTINCT status FROM ticket")]
def render_ticket_action_control(self, req, ticket, action):
# Need to use the list of all status so you can't manually set
# something to an invalid state.
selected_value = req.args.get('force_status_value', 'new')
all_status = TicketSystem(self.env).get_all_status()
render_control = tag.select(
[tag.option(x, selected=(x == selected_value and 'selected' or
None)) for x in all_status],
id='force_status_value', name='force_status_value')
return ("force status to:", render_control,
"The next status will be the selected one")
def get_ticket_changes(self, req, ticket, action):
return {'status': req.args.get('force_status_value')}
def apply_action_side_effects(self, req, ticket, action):
pass
|
Python
| 0.000001
|
@@ -2638,17 +2638,16 @@
tatus to
-:
%22, rende
|
ba89dc2008616d1fb11a187484596cb7af0ec29f
|
Add autosectionlabel extension
|
doc/conf.py
|
doc/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# remoteStorage.js documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 25 14:11:51 2017. # # This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx_js', 'sphinx_issues']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Used to extract JSDoc function/class docs from source
js_source_path = '../src'
primary_domain = 'js'
# GitHub issues config
issues_github_path = 'remotestorage/remotestorage.js'
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'remoteStorage.js'
copyright = '2017, RS Contributors'
author = 'RS Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0-alpha7'
# The full version, including alpha/beta/rc tags.
release = '1.0.0-alpha7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Default language for code highlighting
highlight_language = 'javascript'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'remoteStoragejsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'remoteStoragejs.tex', 'remoteStorage.js Documentation',
'RS Community', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'remotestoragejs', 'remoteStorage.js Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'remoteStoragejs', 'remoteStorage.js Documentation',
author, 'remoteStoragejs', 'One line description of project.',
'Miscellaneous'),
]
|
Python
| 0
|
@@ -1092,16 +1092,52 @@
ions = %5B
+%0A 'sphinx.ext.autosectionlabel',%0A
'sphinx_
@@ -1140,16 +1140,18 @@
inx_js',
+%0A
'sphinx
@@ -1158,17 +1158,59 @@
_issues'
-%5D
+%0A%5D%0A%0Aautosectionlabel_prefix_document = True
%0A%0A# Add
|
7611a4b3e064868c37b9f52778c8fe9f721e86c5
|
Update namespace monitor with exception handling
|
polyaxon/events/management/commands/monitor_namespace.py
|
polyaxon/events/management/commands/monitor_namespace.py
|
import time
from kubernetes.client.rest import ApiException
from django.conf import settings
from clusters.models import Cluster
from events.management.commands._base_monitor import BaseMonitorCommand
from events.monitors import namespace
from polyaxon_k8s.manager import K8SManager
class Command(BaseMonitorCommand):
help = 'Watch namespace warning and errors events.'
def handle(self, *args, **options):
log_sleep_interval = options['log_sleep_interval']
self.stdout.write(
"Started a new namespace monitor with, "
"log sleep interval: `{}`.".format(log_sleep_interval),
ending='\n')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
cluster = Cluster.load()
while True:
try:
namespace.run(k8s_manager, cluster)
except ApiException as e:
namespace.logger.error(
"Exception when calling CoreV1Api->list_event_for_all_namespaces: %s\n", e)
time.sleep(log_sleep_interval)
except Exception as e:
namespace.logger.exception("Unhandled exception occurred: %s\n", e)
|
Python
| 0
|
@@ -87,16 +87,89 @@
settings
+%0Afrom django.db import InterfaceError, ProgrammingError, OperationalError
%0A%0Afrom c
@@ -446,16 +446,463 @@
ents.'%0A%0A
+ def get_cluster_or_wait(self, log_sleep_interval):%0A max_trials = 10%0A trials = 0%0A while trials %3C max_trials:%0A try:%0A return Cluster.load()%0A except (InterfaceError, ProgrammingError, OperationalError) as e:%0A namespace.logger.exception(%22Database is not synced yet %25s%5Cn%22, e)%0A trials += 1%0A time.sleep(log_sleep_interval * 2)%0A return None%0A%0A
def
@@ -1267,30 +1267,130 @@
uster =
-Cluster.load()
+self.get_cluster_or_wait(log_sleep_interval)%0A if not cluster:%0A # End process%0A return%0A
%0A
|
ad26a38263655ddfb3421f7cb748ce7782a91aeb
|
Fix tests_require
|
setup.py
|
setup.py
|
#
# Copyright 2014 Infoxchange Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup script.
"""
from setuptools import setup, find_packages
with open('requirements.txt') as requirements, \
open('test_requirements.txt') as test_requirements:
setup(
name='supervisor-logging',
version='0.0.1',
description='Stream supervisord logs to a syslog instance',
author='Infoxchange development team',
author_email='devs@infoxchange.net.au',
url='https://github.com/infoxchange/supervisor-logging',
license='Apache 2.0',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests']),
package_data={
'forklift': [
'README.md',
'requirements.txt',
'test_requirements.txt',
],
},
entry_points={
'console_scripts': [
'supervisor_logging = supervisor_logging:main',
],
},
install_requires=requirements.read().splitlines(),
test_suite='tests',
test_requires=test_requirements.read().splitlines(),
)
|
Python
| 0.000003
|
@@ -1619,24 +1619,25 @@
test
+s
_require
s=test_r
@@ -1628,17 +1628,16 @@
_require
-s
=test_re
|
05cd3098f55acc1745bd43d20e1db2c3fe3f8d01
|
Version update.`
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import find_packages, setup
with open('requirements.txt') as file:
requirements = file.read().splitlines()
setup(
version ="2.0.4",
name = "acstis",
description = "Automated client-side template injection (CSTI, sandbox escape/bypass) detection for AngularJS!",
long_description = "",
keywords = "angularjs xss xss-scanner exploit angularjs-sandbox-escape vulnerability-scanner",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.3",
"Topic :: Security"
],
packages = find_packages(),
platforms = ["any"],
author = "Tijme Gommers",
author_email ="tijme@finnwea.com",
license = "MIT",
url = "https://github.com/tijme/angularjs-csti-scanner",
install_requires = requirements,
entry_points = {
'console_scripts': [
'acstis = scripts.acstis_cli:main'
]
},
package_data={
'acstis': [
'chrome_drivers/chromedriver_linux32',
'chrome_drivers/chromedriver_linux64',
'chrome_drivers/chromedriver_mac64',
'chrome_drivers/chromedriver_win32.exe'
]
}
)
|
Python
| 0
|
@@ -1285,17 +1285,17 @@
n =%222.0.
-4
+5
%22, %0A
|
969acffd6562c27a53973a1fd7551bcf5c6c6cbc
|
tweak setup to include new version and reqs
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
APP_NAME = 'tarbell'
VERSION = '0.9b2'
settings = dict()
# Publish Helper.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
settings.update(
name=APP_NAME,
version=VERSION,
author=u'Chicago Tribune News Applications Team',
author_email='newsapps@tribune.com',
url='http://github.com/newsapps/flask-tarbell',
license='MIT',
description='A very simple content management system',
long_description='',
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=[
"Flask==0.10.1",
"GitPython==0.3.2.RC1",
"Jinja2==2.7.1",
"MarkupSafe==0.18",
"PyYAML==3.10",
"Werkzeug==0.9.4",
"async==0.6.1",
"boto==2.18.0",
"clint==0.3.1",
"gitdb==0.5.4",
"itsdangerous==0.23",
"requests==1.2.3",
"smmap==0.8.2",
"unicodecsv==0.9.4",
"wsgiref==0.1.2",
"google-api-python-client==1.2",
"keyring>=3.2.1",
"xlrd==0.9.2",
"python-dateutil>=2.2",
"docutils==0.11",
"sh==1.09",
"Markdown==2.3.1"],
entry_points={
'console_scripts': [
'tarbell = tarbell.cli:main',
],
},
keywords=['Development Status :: 3 - alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
],
)
setup(**settings)
|
Python
| 0
|
@@ -143,17 +143,17 @@
= '0.9b
-2
+3
'%0A%0Asetti
@@ -716,40 +716,8 @@
1%22,%0A
- %22GitPython==0.3.2.RC1%22,%0A
@@ -733,24 +733,24 @@
a2==2.7.1%22,%0A
+
%22Mar
@@ -820,32 +820,8 @@
4%22,%0A
- %22async==0.6.1%22,%0A
@@ -834,17 +834,17 @@
oto==2.1
-8
+9
.0%22,%0A
@@ -864,143 +864,36 @@
0.3.
-1%22,%0A %22gitdb==0.5.4%22,%0A %22itsdangerous==0.23%22,%0A %22requests==1.2.3%22,%0A %22smmap==0.8.2%22,%0A %22unicodecsv==0.9.4
+2%22,%0A %22requests==2.1.0
%22,%0A
@@ -1061,24 +1061,24 @@
ils==0.11%22,%0A
-
%22sh=
@@ -1085,16 +1085,51 @@
=1.09%22,%0A
+ %22sphinx_rtd_theme==0.1.5%22,%0A
|
317e4126331b9889433d634993039ef6848e75ef
|
version bump
|
setup.py
|
setup.py
|
from distutils.core import setup
from distutils.extension import Extension
cmdclass = { }
try:
from Cython.Distutils import build_ext
cmdclass.update({ 'build_ext': build_ext })
EXT = ".pyx"
except:
EXT = ".c"
NAME = "cypyserialize"
VERSION = "1.0.2"
DESCR = "Really easy, really quick, binary parser framework for Python"
try:
import pypandoc
LONG_DESC = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
LONG_DESC = open('README.md').read()
URL = "https://github.com/wrenoud/cypyserialize"
DOWNLOAD_URL = "https://github.com/wrenoud/cypyserialize/tarball/" + VERSION
AUTHOR = "Weston Renoud"
EMAIL = "wrenoud@gmail.com"
LICENSE = "Apache 2.0"
SRC_DIR = NAME
PACKAGES = [SRC_DIR]
EXTENSIONS = [
Extension(
SRC_DIR + ".variant",
[SRC_DIR + '/variant' + EXT],
libraries=[]
),
Extension(
SRC_DIR + ".serializers",
[SRC_DIR + '/serializers' + EXT],
libraries=[]
),
Extension(
SRC_DIR + ".serializable",
[SRC_DIR + '/serializable' + EXT],
libraries=[]
)
]
REQUIRES = []
setup(
name=NAME,
packages=PACKAGES,
version=VERSION,
description=DESCR,
long_description=LONG_DESC,
author=AUTHOR,
author_email=EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
keywords=['testing', 'logging', 'example'], # arbitrary keywords
classifiers=[],
license=LICENSE,
cmdclass=cmdclass,
ext_modules=EXTENSIONS,
install_requires=REQUIRES,
)
|
Python
| 0.000001
|
@@ -260,17 +260,17 @@
= %221.0.
-2
+3
%22%0ADESCR
|
5abbc9c3b56b97fd6365719a903121604e1d4539
|
Version 1.1.62
|
setup.py
|
setup.py
|
"""
The setup package to install the SeleniumBase Test Framework plugins
"""
from setuptools import setup, find_packages # noqa
setup(
name='seleniumbase',
version='1.1.61',
url='http://seleniumbase.com',
author='Michael Mintz',
author_email='@mintzworld',
maintainer='Michael Mintz',
description='Reliable Browser Automation - http://seleniumbase.com',
license='The MIT License',
install_requires=[
'pip>=8.1.2',
'setuptools>=18.5',
'selenium>=2.53.6',
'nose==1.3.7',
'pytest==2.9.1',
'flake8==2.5.4',
'requests==2.10.0',
'urllib3==1.15.1',
'BeautifulSoup==3.2.1',
'unittest2==1.1.0',
'chardet==2.3.0',
'simplejson==3.8.2',
'boto==2.40.0',
'ipdb==0.10.0',
'pyvirtualdisplay==0.2',
],
packages=['seleniumbase',
'seleniumbase.core',
'seleniumbase.plugins',
'seleniumbase.fixtures',
'seleniumbase.common',
'seleniumbase.config'],
entry_points={
'nose.plugins': [
'base_plugin = seleniumbase.plugins.base_plugin:Base',
'selenium = seleniumbase.plugins.selenium_plugin:SeleniumBrowser',
'page_source = seleniumbase.plugins.page_source:PageSource',
'screen_shots = seleniumbase.plugins.screen_shots:ScreenShots',
'test_info = seleniumbase.plugins.basic_test_info:BasicTestInfo',
('db_reporting = '
'seleniumbase.plugins.db_reporting_plugin:DBReporting'),
's3_logging = seleniumbase.plugins.s3_logging_plugin:S3Logging',
('hipchat_reporting = seleniumbase.plugins'
'.hipchat_reporting_plugin:HipchatReporting'),
]
}
)
|
Python
| 0
|
@@ -174,17 +174,17 @@
n='1.1.6
-1
+2
',%0A u
|
5506ecd2bd86dbd052b07247fa0b21c985df2b5c
|
Remove unuseful directories in setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015:
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
del os.link
from importlib import import_module
try:
from setuptools import setup, find_packages
except:
sys.exit("Error: missing python-setuptools library")
try:
python_version = sys.version_info
except:
python_version = (1, 5)
if python_version < (2, 7):
sys.exit("This application requires a minimum Python 2.7.x, sorry!")
elif python_version >= (3,):
sys.exit("This application is not yet compatible with Python 3.x, sorry!")
from alignak_webui import __application__, __version__, __copyright__
from alignak_webui import __releasenotes__, __license__, __doc_url__
from alignak_webui import __name__ as __pkg_name__
package = import_module('alignak_webui')
install_requires = [
'future',
'configparser',
'docopt',
'bottle>=0.12.9,<0.13',
'Beaker==1.8.0',
'CherryPy',
'pymongo>=3.2',
'requests>=2.9.1',
'python-gettext',
'termcolor',
'python-dateutil==2.4.2',
'pytz',
'alignak_backend_client'
]
# Define paths
if 'linux' in sys.platform or 'sunos5' in sys.platform:
paths = {
'bin': "/usr/bin",
'var': "/var/lib/alignak_webui/",
'share': "/var/lib/alignak_webui/share",
'etc': "/etc/alignak_webui",
'run': "/var/run/alignak_webui",
'log': "/var/log/alignak_webui",
'libexec': "/var/lib/alignak_webui/libexec",
}
elif 'bsd' in sys.platform or 'dragonfly' in sys.platform:
paths = {
'bin': "/usr/local/bin",
'var': "/usr/local/libexec/alignak_webui",
'share': "/usr/local/share/alignak_webui",
'etc': "/usr/local/etc/alignak_webui",
'run': "/var/run/alignak_webui",
'log': "/var/log/alignak_webui",
'libexec': "/usr/local/libexec/alignak_webui/plugins",
}
else:
print "Unsupported platform, sorry!"
exit(1)
data_files = [
(paths['etc'], ['etc/settings.cfg'])
]
# Specific for Read the docs build process
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
print "RTD build, no data_files"
data_files = []
setup(
name=__pkg_name__,
version=__version__,
license=__license__,
# metadata for upload to PyPI
author="Frédéric MOHIER",
author_email="frederic.mohier@gmail.com",
keywords="alignak web ui",
url="https://github.com/Alignak-monitoring-contrib/alignak-webui",
description=package.__doc__.strip(),
long_description=open('README.rst').read(),
zip_safe=False,
packages=find_packages(),
include_package_data=True,
# package_data={
# 'sample': ['package_data.dat'],
# },
data_files=data_files,
install_requires=install_requires,
entry_points={
'console_scripts': [
'alignak_webui = alignak_webui.app:main',
],
},
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
)
|
Python
| 0.000001
|
@@ -2144,62 +2144,8 @@
bui%22
-,%0A 'libexec': %22/var/lib/alignak_webui/libexec%22,
%0A
@@ -2429,32 +2429,42 @@
'run': %22/
+usr/local/
var/run/alignak_
@@ -2493,53 +2493,8 @@
- %22/var/log/alignak_webui%22,%0A 'libexec':
%22/u
@@ -2494,39 +2494,39 @@
%22/usr/local/
-libexec
+var/log
/alignak_webui/p
@@ -2527,18 +2527,9 @@
ebui
-/plugins%22,
+%22
%0A
|
bf465d2a00d2d1fd77dc17dd4e90e2cf013273dd
|
version bump
|
setup.py
|
setup.py
|
#!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
VERSION = '0.0.2'
LONG_DESC = """\
"""
setup(name='django-dockit',
version=VERSION,
description="",
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django',
maintainer = 'Jason Kraus',
maintainer_email = 'zbyte64@gmail.com',
url='http://github.com/zbyte64/django-dockit',
license='New BSD License',
packages=find_packages(exclude=['test_environment']),
#test_suite='tests.runtests.runtests',
include_package_data = True,
)
|
Python
| 0.000001
|
@@ -161,9 +161,9 @@
0.0.
-2
+3
'%0ALO
|
0e178c8bcb0bad2146fede1e6361bc57bdbf8102
|
Bump version for 0.3.0
|
setup.py
|
setup.py
|
#!/usr/bin/env python
try:
from Cython.Build import cythonize
import Cython
except ImportError:
raise RuntimeError('No cython installed. Please run `pip install cython`')
if Cython.__version__ < '0.19.1':
raise RuntimeError('Old cython installed. Please run `pip install -U cython`')
from distutils.core import setup
import os
MAJOR = 0
MINOR = 2
MICRO = 1
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'capnp', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (VERSION, VERSION))
finally:
a.close()
write_version_py()
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = ''
setup(
name="capnp",
packages=["capnp"],
version=VERSION,
package_data={'capnp': ['*.pxd', '*.pyx', '*.h']},
ext_modules=cythonize('capnp/*.pyx', language="c++"),
install_requires=[
'cython > 0.19',
'setuptools >= 0.8'],
# PyPi info
description='A cython wrapping of the C++ capnproto library',
long_description=long_description,
license='BSD',
author="Jason Paryani",
author_email="pypi-contact@jparyani.com",
url = 'https://github.com/jparyani/capnpc-python-cpp',
download_url = 'https://github.com/jparyani/capnpc-python-cpp/archive/v%s.zip' % VERSION,
keywords = ['capnp', 'capnproto'],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Communications'],
)
|
Python
| 0
|
@@ -357,17 +357,17 @@
MINOR =
-2
+3
%0AMICRO =
@@ -367,17 +367,17 @@
MICRO =
-1
+0
%0AVERSION
|
8b95eac9d92856b521265b1e2139b73805baf94d
|
Version bump
|
setup.py
|
setup.py
|
#
# Copyright 2014 Infoxchange Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup script.
"""
from setuptools import setup, find_packages
from sys import version_info
assert version_info >= (3,), "Python 3 is required."
setup(
name='docker-forklift',
version='0.2.15',
description='Utility for running a container',
author='Infoxchange Australia development team',
author_email='devs@infoxchange.net.au',
url='https://github.com/infoxchange/docker-forklift',
license='Apache 2.0',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests']),
package_data={
'forklift': ['README.md'],
},
entry_points={
'console_scripts': [
'forklift = forklift:main',
],
},
install_requires=[
'pyxdg',
'pyyaml',
],
test_suite='tests',
tests_require=[
'pep8',
'pylint',
'pylint-mccabe',
],
)
|
Python
| 0.000001
|
@@ -790,9 +790,9 @@
.2.1
-5
+6
',%0A
|
1f14b95b26e84336e4c1e8a11ea2fd06fa1a802d
|
Bump version to make PyPI happy
|
setup.py
|
setup.py
|
import os
import sys
from setuptools import setup, Extension, find_packages
ext_modules = []
# C speedups are no good for PyPy
if '__pypy__' not in sys.builtin_module_names:
ext_modules.append(
Extension('gevent_fastcgi.speedups', ['gevent_fastcgi/speedups.c']))
setup(
name='gevent-fastcgi',
version='1.0.2',
description='''FastCGI/WSGI client and server implemented using gevent
library''',
long_description='''
FastCGI/WSGI server implementation using gevent library. No need to
monkeypatch and slow down your favourite FastCGI server in order to make
it "green".
Supports connection multiplexing. Out-of-the-box support for Django and
frameworks that use PasteDeploy including Pylons and Pyramid.
''',
keywords='fastcgi gevent wsgi',
author='Alexander Kulakov',
author_email='a.kulakov@mail.ru',
url='http://github.com/momyc/gevent-fastcgi',
packages=find_packages(exclude=('gevent_fastcgi.tests.*',)),
zip_safe=True,
license='MIT',
install_requires=[
"zope.interface",
"gevent>=0.13.6",
],
entry_points={
'paste.server_runner': [
'fastcgi = gevent_fastcgi.adapters.paste_deploy:fastcgi_server_runner',
'wsgi = gevent_fastcgi.adapters.paste_deploy:wsgi_server_runner',
'wsgiref = gevent_fastcgi.adapters.paste_deploy:wsgiref_server_runner',
],
},
test_suite="tests",
tests_require=['mock'],
ext_modules=ext_modules
)
|
Python
| 0
|
@@ -323,16 +323,18 @@
n='1.0.2
+.1
',%0A d
|
c4ec856f26e4d83eb11296480ab8180f14588934
|
Update python versions
|
setup.py
|
setup.py
|
#!/usr/bin/env python3
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ogn-python',
version='0.5.0',
description='A database backend for the Open Glider Network',
long_description=long_description,
url='https://github.com/glidernet/ogn-python',
author='Konstantin Gründger aka Meisterschueler, Fabian P. Schmidt aka kerel, Dominic Spreitz',
author_email='kerel-fs@gmx.de',
license='AGPLv3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='gliding ogn',
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'Flask==1.1.2',
'Flask-SQLAlchemy==2.4.4',
'Flask-Migrate==2.5.3',
'Flask-Bootstrap==3.3.7.1',
'Flask-WTF==0.14.3',
'Flask-Caching==1.9.0',
'geopy==2.0.0',
'celery==5.0.2',
'redis==3.5.3',
'aerofiles==1.0.0',
'geoalchemy2==0.8.4',
'shapely==1.7.1',
'ogn-client==1.0.1',
'mgrs==1.4.0',
'psycopg2-binary==2.8.6',
'xmlunittest==0.5.0',
'flower==0.9.5',
'tqdm==4.51.0',
'requests==2.25.0',
],
test_require=[
'pytest==5.0.1',
'flake8==1.1.1',
'xmlunittest==0.4.0',
],
zip_safe=False
)
|
Python
| 0
|
@@ -1008,17 +1008,66 @@
on :: 3.
-4
+5',%0A 'Programming Language :: Python :: 3.6
',%0A
@@ -1102,28 +1102,129 @@
Python :: 3.
-5
+7
',%0A
+ 'Programming Language :: Python :: 3.8',%0A 'Programming Language :: Python :: 3.9-dev'%0A
%5D,%0A k
|
a7717abd4c9fe436eb657c376e5d5eef44323d42
|
Version 1.10.0
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import sys, os
version = '1.9.4'
install_requires = [
# -*- Extra requirements: -*-
]
setup(name='twitter',
version=version,
description="An API and command-line toolset for Twitter (twitter.com)",
long_description=open("./README", "r").read(),
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Communications :: Chat :: Internet Relay Chat",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
keywords='twitter, IRC, command-line tools, web 2.0',
author='Mike Verdone',
author_email='mike.verdone+twitterapi@gmail.com',
url='http://mike.verdone.ca/twitter/',
license='MIT License',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
twitter=twitter.cmdline:main
twitterbot=twitter.ircbot:main
twitter-log=twitter.logger:main
twitter-archiver=twitter.archiver:main
twitter-follow=twitter.follow:main
twitter-stream-example=twitter.stream_example:main
""",
)
|
Python
| 0
|
@@ -70,11 +70,12 @@
'1.
-9.4
+10.0
'%0A%0Ai
|
e2455de9a8a9a4d1147b75307d73f513cc866e11
|
Bump setup.py version to 0.3.0.
|
setup.py
|
setup.py
|
from setuptools import setup
requirements = ["dataclasses==0.8;python_version<'3.7'"]
setup(
name="clr",
version="0.2.0",
description="A command line tool for executing custom python scripts.",
author="Color",
author_email="dev@getcolor.com",
url="https://github.com/color/clr",
packages=["clr"],
entry_points={"console_scripts": ["clr = clr:main"],},
install_requires=requirements,
setup_requires=["pytest-runner"],
tests_require=requirements + ["pytest==6.2.4"],
license="MIT",
include_package_data=True,
package_data={"": ["completion.*"],},
)
|
Python
| 0
|
@@ -119,17 +119,17 @@
sion=%220.
-2
+3
.0%22,%0A
|
0404c2d0b25d8d8e3bb542b4872361b24ea568e9
|
add long description
|
setup.py
|
setup.py
|
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
assert sys.version_info[0:2] >= (2, 5), "Python 2.5 or higher is required"
from textwrap import dedent
from setuptools import find_packages
from distutils.core import setup
PACKAGE = "pyfarm.core"
VERSION = (0, 0, 0, "alpha0")
NAMESPACE = PACKAGE.split(".")[0]
prefixpkg = lambda name: "%s.%s" % (NAMESPACE, name)
install_requires = ["statsd"]
if sys.version_info[0:2] < (2, 7):
install_requires.append("simplejson")
setup(
name=PACKAGE,
version=".".join(map(str, VERSION)),
packages=map(prefixpkg, find_packages(NAMESPACE)),
namespace_packages=[NAMESPACE],
install_requires=install_requires,
url="https://github.com/pyfarm/pyfarm-core",
license="Apache v2.0",
author="Oliver Palmer",
author_email="development@pyfarm.net",
description=dedent("""This sub-library contains core modules, classes,
and data types which are used by other parts of PyFarm."""),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2 :: Only", # (for now)
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: System :: Distributed Computing"])
|
Python
| 0.999999
|
@@ -1547,16 +1547,69 @@
m.%22%22%22),%0A
+ long_description=open(%22README.rst%22, %22r%22).read(),%0A
clas
|
b79e11ccc4a07a5a257d2135aa9c7d3c2ff75546
|
Fix released
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='edn',
version='0.0.1',
packages=['edn'],
package_data={'edn': ['edn.parsley']},
install_requires=[
# iso8601 0.1.5 introduces a timezone parsing bug.
# https://bitbucket.org/micktwomey/pyiso8601/issue/8/015-parses-negative-timezones-incorrectly
'iso8601==0.1.4',
'parsley>=1.2',
'perfidy',
],
)
|
Python
| 0
|
@@ -160,170 +160,8 @@
s=%5B%0A
- # iso8601 0.1.5 introduces a timezone parsing bug.%0A # https://bitbucket.org/micktwomey/pyiso8601/issue/8/015-parses-negative-timezones-incorrectly%0A
@@ -176,15 +176,15 @@
8601
-=
+%3E
=0.1.
-4
+6
',%0A
|
3f6aaf630658e42862aa40037def28e16541f930
|
Fix title parsing in markdown
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from os.path import join, dirname
from setuptools import setup, find_packages
ROOT = dirname(__file__)
RE_REQUIREMENT = re.compile(r'^\s*-r\s*(?P<filename>.*)$')
RE_MD_CODE_BLOCK = re.compile(r'```(?P<language>\w+)?\n(?P<lines>.*?)```', re.S)
RE_SELF_LINK = re.compile(r'\[(.*?)\]\[\]')
RE_LINK_TO_URL = re.compile(r'\[(?P<text>.*?)\]\((?P<url>.*?)\)')
RE_LINK_TO_REF = re.compile(r'\[(?P<text>.*?)\]\[(?P<ref>.*?)\]')
RE_LINK_REF = re.compile(r'^\[(?P<key>[^!].*?)\]:\s*(?P<url>.*)$', re.M)
RE_BADGE = re.compile(r'^\[\!\[(?P<text>.*?)\]\[(?P<badge>.*?)\]\]\[(?P<target>.*?)\]$', re.M)
RE_TITLE = re.compile(r'^(?P<level>#+)\s*(?P<title>.*)$', re.M)
BADGES_TO_KEEP = ['gitter-badge', 'readthedocs-badge']
RST_TITLE_LEVELS = ['=', '-', '*']
RST_BADGE = '''\
.. image:: {badge}
:target: {target}
:alt: {text}
'''
def md2pypi(filename):
'''
Load .md (markdown) file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badges
'''
content = open(filename).read()
for match in RE_MD_CODE_BLOCK.finditer(content):
rst_block = '\n'.join(
['.. code-block:: {language}'.format(**match.groupdict()), ''] +
[' {0}'.format(l) for l in match.group('lines').split('\n')] +
['']
)
content = content.replace(match.group(0), rst_block)
refs = dict(RE_LINK_REF.findall(content))
content = RE_LINK_REF.sub('.. _\g<key>: \g<url>', content)
content = RE_SELF_LINK.sub('`\g<1>`_', content)
content = RE_LINK_TO_URL.sub('`\g<text> <\g<url>>`_', content)
for match in RE_BADGE.finditer(content):
if match.group('badge') not in BADGES_TO_KEEP:
content = content.replace(match.group(0), '')
else:
params = match.groupdict()
params['badge'] = refs[match.group('badge')]
params['target'] = refs[match.group('target')]
content = content.replace(match.group(0),
RST_BADGE.format(**params))
# Must occur after badges
for match in RE_LINK_TO_REF.finditer(content):
content = content.replace(match.group(0), '`{text} <{url}>`_'.format(
text=match.group('text'),
url=refs[match.group('ref')]
))
for match in RE_TITLE.finditer(content):
underchar = RST_TITLE_LEVELS[len(match.group('level'))]
title = match.group('title')
underline = underchar * len(title)
full_title = '\n'.join((title, underline))
content = content.replace(match.group(0), full_title)
return content
long_description = '\n'.join((
md2pypi('README.md'),
md2pypi('CHANGELOG.md'),
''
))
def pip(filename):
"""Parse pip reqs file and transform it to setuptools requirements."""
requirements = []
for line in open(join(ROOT, 'requirements', filename)):
line = line.strip()
if not line or '://' in line:
continue
match = RE_REQUIREMENT.match(line)
if match:
requirements.extend(pip(match.group('filename')))
else:
requirements.append(line)
return requirements
install_requires = pip('install.pip')
tests_require = pip('test.pip')
setup(
name='udata',
version=__import__('udata').__version__,
description=__import__('udata').__description__,
long_description=long_description,
url='https://github.com/opendatateam/udata',
author='Opendata Team',
author_email='opendatateam@data.gouv.fr',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
'sentry': ['raven[flask]>=5.3.0'],
},
entry_points={
'console_scripts': [
'udata = udata.commands:console_script',
]
},
license='GNU AGPLv3+',
# use_2to3=True,
keywords='',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
('License :: OSI Approved :: GNU Affero General Public License v3'
' or later (AGPLv3+)'),
],
)
|
Python
| 0.004923
|
@@ -918,16 +918,17 @@
t%7D%0A'''%0A%0A
+%0A
def md2p
@@ -2505,16 +2505,20 @@
level'))
+ - 1
%5D%0A
|
763d59db4d6369434c55af25caa69cf57f8d712f
|
Fix user statement rendering issues.
|
libapol/policyrep/user.py
|
libapol/policyrep/user.py
|
# Copyright 2014, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
import string
import setools.qpol as qpol
import role
import mls
import symbol
class User(symbol.PolicySymbol):
"""A user."""
@property
def roles(self):
"""The user's set of roles."""
r = set()
aiter = self.qpol_symbol.get_role_iter(self.policy)
while not aiter.end():
item = role.Role(
self.policy, qpol.qpol_role_from_void(aiter.get_item()))
# object_r is implicitly added to all roles by the compiler.
# technically it is incorrect to skip it, but policy writers
# and analysts don't expect to see it in results, and it
# will confuse, especially for set equality user queries.
if item != "object_r":
r.add(item)
aiter.next()
return r
@property
def mls_default(self):
"""The user's default MLS level."""
return mls.MLSRange(self.policy, self.qpol_symbol.get_range(self.policy))
@property
def mls_range(self):
"""The user's MLS range."""
return mls.MLSLevel(self.policy, self.qpol_symbol.get_dfltlevel(self.policy))
def statement(self):
roles = list(self.roles)
stmt = "user {0} ".format(self)
if (len(roles) > 1):
stmt += "{{ {0} }}".format(string.join(str(r) for r in roles))
else:
stmt += str(roles[0])
try:
stmt += " level {0.mls_default} range {0.mls_range};".format(self)
except AttributeError:
stmt += ";"
return stmt
|
Python
| 0
|
@@ -1561,23 +1561,21 @@
def mls_
-default
+level
(self):%0A
@@ -1644,13 +1644,13 @@
.MLS
-Range
+Level
(sel
@@ -1680,21 +1680,25 @@
bol.get_
-range
+dfltlevel
(self.po
@@ -1794,37 +1794,37 @@
return mls.MLS
-Level
+Range
(self.policy, se
@@ -1838,33 +1838,29 @@
_symbol.get_
-dfltlevel
+range
(self.policy
@@ -1859,24 +1859,25 @@
f.policy))%0A%0A
+%0A
def stat
@@ -1910,16 +1910,32 @@
= list(
+str(r) for r in
self.rol
@@ -1962,16 +1962,22 @@
user %7B0%7D
+ roles
%22.forma
@@ -2064,32 +2064,16 @@
ng.join(
-str(r) for r in
roles))%0A
@@ -2110,20 +2110,16 @@
+=
-str(
roles%5B0%5D
)%0A%0A
@@ -2114,17 +2114,16 @@
roles%5B0%5D
-)
%0A%0A
@@ -2164,23 +2164,21 @@
%7B0.mls_
-default
+level
%7D range
|
af3a7217b94254f2ce533deefd4d9e636b9937f9
|
Bump version to 0.2-dev
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(
name='Zuice',
version='0.1',
description='A dependency injection framework for Python',
author='Michael Williamson',
author_email='mike@zwobble.org',
url='http://gitorious.org/zuice',
packages=['zuice'],
)
|
Python
| 0
|
@@ -71,9 +71,13 @@
='0.
-1
+2-dev
',%0A
|
099a5b3f0300f0a37d4aea8c3bc27a9fb00aff6a
|
Update python classifiers in setup.py
|
setup.py
|
setup.py
|
import codecs
import os
import shutil
import sys
import warnings
import setuptools
from setuptools import find_packages, setup, Command
from setuptools.command.egg_info import egg_info
PACKAGE_NAME = 'certvalidator'
PACKAGE_VERSION = '0.12.0.dev1'
PACKAGE_ROOT = os.path.dirname(os.path.abspath(__file__))
# setuptools 38.6.0 and newer know about long_description_content_type, but
# distutils still complains about it, so silence the warning
sv = setuptools.__version__
svi = tuple(int(o) if o.isdigit() else o for o in sv.split('.'))
if svi >= (38, 6):
warnings.filterwarnings(
'ignore',
"Unknown distribution option: 'long_description_content_type'",
module='distutils.dist'
)
# This allows us to send the LICENSE and docs when creating a sdist. Wheels
# automatically include the LICENSE, and don't need the docs. For these
# to be included, the command must be "python setup.py sdist".
package_data = {}
if sys.argv[1:] == ['sdist'] or sorted(sys.argv[1:]) == ['-q', 'sdist']:
package_data[PACKAGE_NAME] = [
'../LICENSE',
'../*.md',
'../docs/*.md',
]
# Ensures a copy of the LICENSE is included with the egg-info for
# install and bdist_egg commands
class EggInfoCommand(egg_info):
def run(self):
egg_info_path = os.path.join(
PACKAGE_ROOT,
'%s.egg-info' % PACKAGE_NAME
)
if not os.path.exists(egg_info_path):
os.mkdir(egg_info_path)
shutil.copy2(
os.path.join(PACKAGE_ROOT, 'LICENSE'),
os.path.join(egg_info_path, 'LICENSE')
)
egg_info.run(self)
class CleanCommand(Command):
user_options = [
('all', 'a', '(Compatibility with original clean command)'),
]
def initialize_options(self):
self.all = False
def finalize_options(self):
pass
def run(self):
sub_folders = ['build', 'temp', '%s.egg-info' % PACKAGE_NAME]
if self.all:
sub_folders.append('dist')
for sub_folder in sub_folders:
full_path = os.path.join(PACKAGE_ROOT, sub_folder)
if os.path.exists(full_path):
shutil.rmtree(full_path)
for root, dirs, files in os.walk(os.path.join(PACKAGE_ROOT, PACKAGE_NAME)):
for filename in files:
if filename[-4:] == '.pyc':
os.unlink(os.path.join(root, filename))
for dirname in list(dirs):
if dirname == '__pycache__':
shutil.rmtree(os.path.join(root, dirname))
readme = ''
with codecs.open(os.path.join(PACKAGE_ROOT, 'readme.md'), 'r', 'utf-8') as f:
readme = f.read()
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description='Validates X.509 certificates and paths',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/wbond/certvalidator',
author='wbond',
author_email='will@wbond.net',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security :: Cryptography',
],
keywords='crypto pki x509 certificate crl ocsp',
install_requires=[
'asn1crypto>=1.2.0',
'oscrypto>=1.1.0'
],
packages=[PACKAGE_NAME],
package_data=package_data,
test_suite='tests.make_suite',
cmdclass={
'clean': CleanCommand,
'egg_info': EggInfoCommand,
}
)
|
Python
| 0.000002
|
@@ -3162,24 +3162,71 @@
License',%0A%0A
+ 'Programming Language :: Python :: 2',%0A
'Pro
@@ -3307,24 +3307,71 @@
on :: 2.7',%0A
+ 'Programming Language :: Python :: 3',%0A
'Pro
@@ -3701,16 +3701,65 @@
: 3.8',%0A
+ 'Programming Language :: Python :: 3.9',%0A
|
cfec28aca4a4da5ec89b831b1ad13e551d8d73fa
|
Modify setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
__version__ = "0.1.1"
setup(
name='RSK Mind',
version=__version__,
description='Framework for machine learning platform',
keywords='machine learning deep learning',
url='git@bitbucket.org:rasarmy/framework.git',
author='RSK Project',
author_email='admin@rsk-project.com',
license='MIT',
scripts=['rsk_mind/bin/rskmind-admin.py'],
entry_points={'console_scripts': [
'rskmind-admin = rskmind.core.management:execute_from_command_line',
]},
include_package_data=True,
packages=find_packages(exclude=('tests', 'tests.*')),
install_requires=[
'xgboost==0.4a30',
'geoip2',
'scikit-learn',
'scipy',
'Jinja2',
'numpy'
],
extras_require={
'docs': ['sphinx'],
'tests': ['nose']
},
zip_safe=False
)
|
Python
| 0.000001
|
@@ -626,16 +626,34 @@
'tests',
+ 'tests.*', 'doc',
'tests.
|
c7124022925c71bc5b89e02cac059f7f9a03625d
|
Fix license filename in setup.py
|
setup.py
|
setup.py
|
import pwnedcheck
from distutils.core import setup
setup(
name="PwnedCheck",
packages=["pwnedcheck"],
package_dir={"pwnedcheck": "pwnedcheck"},
version=pwnedcheck.__version__,
description="Python package to interact with http://haveibeenpwned.com",
long_description=open("README.rst").read() + "\n\n" + open("CHANGES.rst").read(),
author="Casey Dunham",
author_email="casey.dunham@gmail.com",
url="https://github.com/caseydunham/PwnedCheck",
license=open("LICENSE.rst.rst").read(),
classifiers=(
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Security",
)
)
|
Python
| 0.000003
|
@@ -502,20 +502,16 @@
ENSE.rst
-.rst
%22).read(
|
353e13a622e6a51670ce205da3d16a8b119d72fd
|
Version bump
|
setup.py
|
setup.py
|
# Copyright (c) 2017 David Preece - davep@polymath.tech, All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from setuptools import setup
setup(
name='messidge',
version='1.0.11',
author='David Preece',
author_email='davep@polymath.tech',
url='https://polymath.tech',
license='BSD',
packages=['messidge', 'messidge.broker', 'messidge.client'],
install_requires=['pyzmq', 'libnacl', 'shortuuid', 'psutil', 'lru-dict', 'cbor', 'bottle', 'litecache'],
description='A message passing library for gateways onto the public Internet.',
long_description="There are some users, and some nodes (on a private LAN). Messidge brokers between the two. "
"It authenticates, encrypts, and can validate the provenance of an rpc call.",
keywords='message messages message-passing authenticated encrypted gateway',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Security',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Object Brokering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Networking',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
|
Python
| 0.000001
|
@@ -777,17 +777,17 @@
n='1.0.1
-1
+2
',%0A a
|
913ce9a6346a226672f264b11fcf15ce408830f6
|
Version bump.
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Name: arpeggio.py
# Purpose: PEG parser interpreter
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2009 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#
# Arpeggio is implementation of packrat parser interpreter based on PEG grammars.
# Parsers are defined using python language construction or PEG language.
###############################################################################
__author__ = "Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>"
__version__ = "0.5"
from setuptools import setup
NAME = 'Arpeggio'
VERSION = __version__
DESC = 'Packrat parser interpreter'
AUTHOR = 'Igor R. Dejanovic'
AUTHOR_EMAIL = 'igor DOT dejanovic AT gmail DOT com'
LICENCE = 'MIT'
URL = 'https://github.com/igordejanovic/arpeggio'
setup(
name = NAME,
version = VERSION,
description = DESC,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = AUTHOR,
maintainer_email = AUTHOR_EMAIL,
license = LICENCE,
url = URL,
packages = ["arpeggio"],
keywords = "parser packrat peg",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Interpreters',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Libraries :: Python Modules'
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
Python
| 0
|
@@ -671,9 +671,9 @@
%220.
-5
+6
%22%0A%0Af
|
528d43ffdf1ae3182f20aab30d73183a6210d3be
|
include pxd file in depends
|
setup.py
|
setup.py
|
import os
from distutils.util import get_platform
import numpy as np
from Cython.Distutils import build_ext
from setuptools import Extension, find_packages, setup
platform = get_platform()
if platform.startswith("win"):
extra_compile_args = []
extra_link_args = []
else:
extra_compile_args = [
"-fPIC",
"-m64",
"-fopenmp",
"-march=native",
"-O3",
"-ftree-vectorizer-verbose=2",
"-Wl,--no-as-needed",
]
extra_link_args = ["-shared"]
ext_modules = []
for shape in ["cylinder", "slit"]:
pkg = f"pymwm.{shape}.utils.{shape}_utils"
pyx = os.path.join("pymwm", shape, "utils", f"{shape}_utils.pyx")
ext_modules.append(
Extension(
pkg,
sources=[pyx],
depends=[],
include_dirs=[np.get_include(), "."],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
language="c++",
)
)
setup(
name="pymwm",
version="0.1.0",
url="https://github.com/mnishida/RII_Pandas",
license="MIT",
author="Munehiro Nishida",
author_email="mnishida@hiroshima-u.ac.jp",
description="A metallic waveguide mode solver",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
packages=find_packages(),
setup_requires=["cython", "numpy", "scipy"],
install_requires=[line.strip() for line in open("requirements.txt").readlines()],
python_requires=">=3.7",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
],
keywords="metallic waveguide mode, electromagnetism",
ext_modules=ext_modules,
cmdclass={"build_ext": build_ext},
)
|
Python
| 0
|
@@ -606,19 +606,24 @@
ls%22%0A
-pyx
+basename
= os.pa
@@ -674,12 +674,8 @@
tils
-.pyx
%22)%0A
@@ -758,11 +758,25 @@
es=%5B
+basename + %22.
pyx
+%22
%5D,%0A
@@ -795,16 +795,33 @@
epends=%5B
+basename + %22.pxd%22
%5D,%0A
|
f68053b7d6bb1e8459095c11600cc53979f39ed0
|
Bump version
|
setup.py
|
setup.py
|
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import glob
import os
# require python fontforge module
msg = ('Python module `fontforge` is required. Install it with'
' `apt-get install python-fontforge`'
' or `brew install python; brew install fontforge --HEAD`')
try:
import fontforge
except ImportError:
raise Exception(msg)
# require ttfautohint
msg = ('Command line tool `ttfautohint` is required. Install it with'
' `apt-get install ttfautohint` or `brew install ttfautohint`')
assert [os.path.join(p, 'ttfautohint')
for p in os.environ.get('PATH').split(':')
if os.path.exists(os.path.join(p, 'ttfautohint'))], msg
# require libmagic
import ctypes
import ctypes.util
libmagic = None
# Let's try to find magic or magic1
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1')
# This is necessary because find_library returns None if it doesn't find the library
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
import sys
platform_to_lib = {'darwin': ['/opt/local/lib/libmagic.dylib',
'/usr/local/lib/libmagic.dylib'] +
# Assumes there will only be one version installed
glob.glob('/usr/local/Cellar/libmagic/*/lib/libmagic.dylib'),
'win32': ['magic1.dll']}
for dll in platform_to_lib.get(sys.platform, []):
try:
libmagic = ctypes.CDLL(dll)
break
except OSError:
pass
if not libmagic or not libmagic._name:
# It is better to raise an ImportError since we are importing magic module
raise ImportError('failed to find libmagic. Check your installation')
# now installation can begin!
from setuptools import setup
setup(
name="fontbakery",
version='0.0.10',
url='https://github.com/googlefonts/fontbakery/',
description='Font Bakery is a set of command-line tools for building'
' and testing font projects',
author='Vitaly Volkov',
author_email='hash3g@gmail.com',
packages=["bakery_cli",
"bakery_cli.pipe",
"bakery_cli.scripts",
"bakery_lint",
"bakery_lint.fonttests",
"bakery_cli.report"],
scripts=['tools/collection-management/fontbakery-travis-secure.sh',
'tools/fontbakery-build-font2ttf.py',
'tools/fontbakery-build-metadata.py',
'tools/fontbakery-build.py',
'tools/fontbakery-check-ufo.py',
'tools/fontbakery-check-description.py',
'tools/fontbakery-check-ttf.py',
'tools/fontbakery-check-otf.py',
'tools/fontbakery-check-upstream.py',
'tools/fontbakery-crawl.py',
'tools/fontbakery-fix-ascii-fontmetadata.py',
'tools/fontbakery-fix-dsig.py',
'tools/fontbakery-fix-fstype.py',
'tools/fontbakery-fix-gasp.py',
'tools/fontbakery-fix-glyph-private-encoding.py',
'tools/fontbakery-fix-nbsp.py',
'tools/fontbakery-fix-opentype-names.py',
'tools/fontbakery-fix-vertical-metrics.py',
'tools/fontbakery-remove-platformid1.py',
'tools/fontbakery-report.py',
'tools/fontbakery-setup.py',
'tools/fontbakery-travis-deploy.py',
'tools/fontbakery-travis-init.py'],
zip_safe=False,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
include_package_data=True,
install_requires=[
'lxml',
'requests',
'pyyaml',
'robofab',
'fontaine',
'html5lib',
'python-magic',
'markdown',
'scrapy',
'urwid',
'GitPython==0.3.2.RC1',
'defusedxml',
'unidecode'
],
setup_requires=['nose', 'mock', 'coverage'],
test_suite='nose.collector'
)
|
Python
| 0
|
@@ -2493,17 +2493,17 @@
n='0.0.1
-0
+1
',%0A u
|
176107d2de6b559b0d7045b006941e0da5927336
|
Bump SoS version requirement (INFO field fix)
|
setup.py
|
setup.py
|
#!/usr/bin/env python
__author__ = "Gao Wang"
__copyright__ = "Copyright 2016, Stephens lab"
__email__ = "gaow@uchicago.edu"
__license__ = "MIT"
import os, sys
sys.path.append('src')
_py_ver = sys.version_info
if _py_ver.major == 2 or (_py_ver.major == 3 and (_py_ver.minor, _py_ver.micro) < (6, 0)):
raise SystemError('Python 3.6 or higher is required. Please upgrade your Python {}.{}.{}.'
.format(_py_ver.major, _py_ver.minor, _py_ver.micro))
from setuptools import setup
from setuptools.command.bdist_egg import bdist_egg
from version import __version__
try:
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
class bdist_egg_disabled(bdist_egg):
"""Disabled version of bdist_egg
Prevents setup.py install performing setuptools' default easy_install,
which it should never ever do.
"""
def run(self):
sys.exit("ERROR: aborting implicit building of eggs. Use \"pip install .\" to install from source.")
cmdclass = {'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled}
setup(name = "dsc",
description = "Implementation of Dynamic Statistical Comparisons",
long_description = long_description,
author = __author__,
author_email = __email__,
url = 'https://github.com/stephenslab/dsc',
download_url= f'https://pypi.python.org/pypi/dsc/{__version__}#downloads',
version = __version__,
entry_points = {'console_scripts': ['dsc = dsc.__main__:main', 'dsc-query = dsc.__query__:main', 'dsc-io = dsc.dsc_io:main']},
license = __license__,
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
],
packages = ['dsc', 'dsc.parser'],
cmdclass = cmdclass,
package_dir = {'dsc': 'src'},
install_requires = ['numpy', 'pandas>=0.24.1', 'sympy', 'numexpr',
'sos>=0.19.11', 'sos-pbs>=0.19.6', 'h5py', 'PTable',
'pyarrow>=0.5.0', 'sqlalchemy', 'tzlocal',
'msgpack-python']
)
|
Python
| 0
|
@@ -2340,17 +2340,17 @@
%3E=0.19.1
-1
+3
', 'sos-
|
ad15cc955413575008d6fe242d253c9fce5d744a
|
update setup.py
|
setup.py
|
setup.py
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-facebox',
version='0.2',
packages=['facebox'],
include_package_data=True,
license='BSD License',
description='Simple facebox modal for Django',
long_description=README,
url='http://github.com/bashu/django-facebox',
author='Basil Shubin',
author_email='basil.shubin@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe=False,
)
|
Python
| 0.000001
|
@@ -31,16 +31,31 @@
rt setup
+, find_packages
%0A%0AREADME
@@ -315,19 +315,42 @@
ges=
-%5B'facebox
+find_packages(exclude=%5B'example
'%5D
+)
,%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.