commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
e2228be1026bbf6b1b7f17791630c1ed9d365f0c | remove the debug print | rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh | cloudmesh_web/modules/git.py | cloudmesh_web/modules/git.py | from cloudmesh.util.gitinfo import GitInfo
from cloudmesh_common.logger import LOGGER
from flask import Blueprint, render_template
from flask.ext.login import login_required
from pprint import pprint, pprint
from sh import git
import requests
log = LOGGER(__file__)
git_module = Blueprint('git_module', __name__)
@git_module.route('/git')
def display_git_authors():
result = git("shortlog", "-s", "-n",
_tty_in=True, _tty_out=False).split("\n")
authors = {}
for line in result:
try:
(commits, name) = line.split("\t")
authors[name] = {"name": name, "commits": commits}
except:
print "error:", line
"""
gitinfo = GitInfo()
# print gitinfo.version()
print "A"
print gitinfo.authors()
print "b"
pprint(gitinfo.authors("dict"))
print "c"
pprint(gitinfo.emails())
print "d"
pprint(gitinfo.emails("dict"))
print "e"
pprint(gitinfo.info())
print "f"
print gitinfo.stat("laszewski@gmail.com")
print "g"
stats = gitinfo.compute()
print stats
print "h"
for email in stats:
p = stats[email]["percentage"]
print "{0} {1:.3f}% {2:.3f}% {3:.3f}% {4:.3f}%".format(email, p[0], p[1], p[2], p[3])
"""
return render_template('general/git.html',
authors=authors)
@git_module.route('/bugs')
def display_git_bugs():
issues_open = requests.get('https://api.github.com/repos/cloudmesh/cloudmesh/issues?state=closed').json()
issues_closed = requests.get('https://api.github.com/repos/cloudmesh/cloudmesh/issues?state=open').json()
issues = issues_closed + issues_open
return render_template('general/bugs.html',
issues=issues)
| from cloudmesh.util.gitinfo import GitInfo
from cloudmesh_common.logger import LOGGER
from flask import Blueprint, render_template
from flask.ext.login import login_required
from pprint import pprint, pprint
from sh import git
import requests
log = LOGGER(__file__)
git_module = Blueprint('git_module', __name__)
@git_module.route('/git')
def display_git_authors():
result = git("shortlog", "-s", "-n",
_tty_in=True, _tty_out=False).split("\n")
authors = {}
for line in result:
print line
try:
(commits, name) = line.split("\t")
authors[name] = {"name": name, "commits": commits}
except:
print "error:", line
"""
gitinfo = GitInfo()
# print gitinfo.version()
print "A"
print gitinfo.authors()
print "b"
pprint(gitinfo.authors("dict"))
print "c"
pprint(gitinfo.emails())
print "d"
pprint(gitinfo.emails("dict"))
print "e"
pprint(gitinfo.info())
print "f"
print gitinfo.stat("laszewski@gmail.com")
print "g"
stats = gitinfo.compute()
print stats
print "h"
for email in stats:
p = stats[email]["percentage"]
print "{0} {1:.3f}% {2:.3f}% {3:.3f}% {4:.3f}%".format(email, p[0], p[1], p[2], p[3])
"""
return render_template('general/git.html',
authors=authors)
@git_module.route('/bugs')
def display_git_bugs():
issues_open = requests.get('https://api.github.com/repos/cloudmesh/cloudmesh/issues?state=closed').json()
issues_closed = requests.get('https://api.github.com/repos/cloudmesh/cloudmesh/issues?state=open').json()
issues = issues_closed + issues_open
return render_template('general/bugs.html',
issues=issues)
| apache-2.0 | Python |
f5e1b6482d36b92309ad06fee0be279371ca6a32 | change queue for dump | sunlightlabs/read_FEC,sunlightlabs/read_FEC,sunlightlabs/read_FEC,sunlightlabs/read_FEC | fecreader/downloads/views.py | fecreader/downloads/views.py | import json
from celeryproj.tasks import dump_filing_sked_celery, dump_committee_sked_celery
from celery.result import AsyncResult
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404, render_to_response
from reconciliation.utils.json_helpers import render_to_json, render_to_json_via_template
def get_filing(request, filing_number, sked):
celery_request = dump_filing_sked_celery.apply_async([sked,filing_number], queue='fast',routing_key="fast")
task_id = celery_request.id
return redirect('/download/build_file/%s/' % task_id)
def get_committee(request, committee_id, sked):
celery_request = dump_committee_sked_celery.apply_async([sked,committee_id], queue='fast',routing_key="fast")
task_id = celery_request.id
return redirect('/download/build_file/%s/' % task_id)
def build_file(request, task_id):
# this is the page that gets shown while the file is downloading. It polls the status until it's done.
return render_to_response('downloads/build_file.html',
{
'task_id':task_id,
})
def get_task_status(request, task_id):
result = AsyncResult(task_id)
return_obj = {}
if result.state == 'SUCCESS':
return_obj = {'done':True, 'result':result.result}
else:
return_obj = {'done':False, 'result':None}
return render_to_json(json.dumps(return_obj))
| import json
from celeryproj.tasks import dump_filing_sked_celery, dump_committee_sked_celery
from celery.result import AsyncResult
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404, render_to_response
from reconciliation.utils.json_helpers import render_to_json, render_to_json_via_template
def get_filing(request, filing_number, sked):
# should eventually have a home page, or straighten out urls
celery_request = dump_filing_sked_celery.apply_async([sked,filing_number], queue='slow',routing_key="slow")
task_id = celery_request.id
return redirect('/download/build_file/%s/' % task_id)
def get_committee(request, committee_id, sked):
# should eventually have a home page, or straighten out urls
celery_request = dump_committee_sked_celery.apply_async([sked,committee_id], queue='slow',routing_key="slow")
task_id = celery_request.id
return redirect('/download/build_file/%s/' % task_id)
def build_file(request, task_id):
# this is the page that gets shown while the file is downloading. It polls the status until it's done.
return render_to_response('downloads/build_file.html',
{
'task_id':task_id,
})
def get_task_status(request, task_id):
result = AsyncResult(task_id)
return_obj = {}
if result.state == 'SUCCESS':
return_obj = {'done':True, 'result':result.result}
else:
return_obj = {'done':False, 'result':None}
return render_to_json(json.dumps(return_obj))
| bsd-3-clause | Python |
eefa4ccdac979d58d3cf2511d7f6d4d19a6de0ca | remove ENGINE from conf before creating backend | dimagi/rapidsms-threadless-router,dimagi/rapidsms-threadless-router,caktus/rapidsms-threadless-router | threadless_router/router.py | threadless_router/router.py | import copy
from rapidsms.conf import settings
from rapidsms.router import Router as LegacyRouter
class Router(LegacyRouter):
""" RapidSMS router with the threading and Queue parts removed """
def __init__(self):
super(Router, self).__init__()
self.start()
def start(self):
self.info("starting router")
for name in settings.INSTALLED_APPS:
try:
self.add_app(name)
except Exception as e:
self.exception(e)
for name, conf in settings.INSTALLED_BACKENDS.items():
parsed_conf = copy.copy(conf)
engine = parsed_conf.pop('ENGINE')
self.add_backend(name, engine, parsed_conf)
self._start_all_apps()
self.running = True
def stop(self, graceful=False):
self.info("stopping router")
self._stop_all_apps()
self.running = False
def incoming(self, msg):
# disable IncomingMessage.flush_responses
msg.flush_responses = lambda: None
# process incoming phases as usual
super(Router, self).incoming(msg)
# handle message responses from within router
for response in msg.responses:
self.outgoing(response)
def outgoing(self, msg):
# disable OutgoingMessage.send_now
msg.send_now = lambda: None
# process outgoing phase as usual
super(Router, self).outgoing(msg)
# send message from within router
self.backends[msg.connection.backend.name].send(msg)
| from rapidsms.conf import settings
from rapidsms.router import Router as LegacyRouter
class Router(LegacyRouter):
""" RapidSMS router with the threading and Queue parts removed """
def __init__(self):
super(Router, self).__init__()
self.start()
def start(self):
self.info("starting router")
for name in settings.INSTALLED_APPS:
try:
self.add_app(name)
except Exception as e:
self.exception(e)
for name, conf in settings.INSTALLED_BACKENDS.items():
self.add_backend(name, conf.get("ENGINE"), conf)
self._start_all_apps()
self.running = True
def stop(self, graceful=False):
self.info("stopping router")
self._stop_all_apps()
self.running = False
def incoming(self, msg):
# disable IncomingMessage.flush_responses
msg.flush_responses = lambda: None
# process incoming phases as usual
super(Router, self).incoming(msg)
# handle message responses from within router
for response in msg.responses:
self.outgoing(response)
def outgoing(self, msg):
# disable OutgoingMessage.send_now
msg.send_now = lambda: None
# process outgoing phase as usual
super(Router, self).outgoing(msg)
# send message from within router
self.backends[msg.connection.backend.name].send(msg)
| bsd-3-clause | Python |
54b7d45d25d311f184276b7d7ba701c2fa1ae2db | fix bp bug (closes #758) | laurent-george/weboob,Konubinix/weboob,frankrousseau/weboob,frankrousseau/weboob,sputnick-dev/weboob,eirmag/weboob,willprice/weboob,yannrouillard/weboob,Boussadia/weboob,franek/weboob,Boussadia/weboob,yannrouillard/weboob,Boussadia/weboob,nojhan/weboob-devel,RouxRC/weboob,RouxRC/weboob,Konubinix/weboob,laurent-george/weboob,laurent-george/weboob,sputnick-dev/weboob,Konubinix/weboob,eirmag/weboob,frankrousseau/weboob,nojhan/weboob-devel,franek/weboob,nojhan/weboob-devel,eirmag/weboob,RouxRC/weboob,willprice/weboob,sputnick-dev/weboob,Boussadia/weboob,franek/weboob,willprice/weboob,yannrouillard/weboob | modules/bp/pages/accountlist.py | modules/bp/pages/accountlist.py | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.bank import Account, AccountNotFound
from weboob.tools.browser import BasePage
__all__ = ['AccountList']
class AccountList(BasePage):
def on_loaded(self):
self.account_list = []
self.parse_table('comptes')
self.parse_table('comptesEpargne')
self.parse_table('comptesTitres')
self.parse_table('comptesVie')
self.parse_table('comptesRetraireEuros')
def get_accounts_list(self):
return self.account_list
def parse_table(self, what):
tables = self.document.xpath("//table[@id='%s']" % what, smart_strings=False)
if len(tables) < 1:
return
lines = tables[0].xpath(".//tbody/tr")
for line in lines:
account = Account()
tmp = line.xpath("./td//a")[0]
account.label = tmp.text
account.link_id = tmp.get("href")
tmp = line.xpath("./td/span/strong")
tmp_id = tmp[0].text
tmp_balance = tmp[1].text
account.id = tmp_id
account.balance = float(''.join(tmp_balance.replace('.','').replace(',','.').split()))
self.account_list.append(account)
def get_account(self, id):
for account in self.account_list:
if account.id == id:
return account
raise AccountNotFound('Unable to find account: %s' % id)
| # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.bank import Account, AccountNotFound
from weboob.tools.browser import BasePage
__all__ = ['AccountList']
class AccountList(BasePage):
def on_loaded(self):
self.account_list = []
self.parse_table('comptes')
self.parse_table('comptesEpargne')
self.parse_table('comptesTitres')
self.parse_table('comptesVie')
self.parse_table('comptesRetraireEuros')
def get_accounts_list(self):
return self.account_list
def parse_table(self, what):
tables = self.document.xpath("//table[@id='%s']" % what, smart_strings=False)
if len(tables) < 1:
return
lines = tables[0].xpath(".//tbody/tr")
for line in lines:
account = Account()
tmp = line.xpath("./td//a")[0]
account.label = tmp.text
account.link_id = tmp.get("href")
tmp = line.xpath("./td//strong")
if len(tmp) != 2:
tmp_id = line.xpath("./td//span")[1].text
tmp_balance = tmp[0].text
else:
tmp_id = tmp[0].text
tmp_balance = tmp[1].text
account.id = tmp_id
account.balance = float(''.join(tmp_balance.replace('.','').replace(',','.').split()))
self.account_list.append(account)
def get_account(self, id):
for account in self.account_list:
if account.id == id:
return account
raise AccountNotFound('Unable to find account: %s' % id)
| agpl-3.0 | Python |
1f28c855a11d658ac312191d57b37253cf69a2b1 | fix modify_corpus to know own directory | benjaminwilson/word2vec-norm-experiments,benjaminwilson/word2vec-norm-experiments | modify_corpus.py | modify_corpus.py | """
Modifies an input text for the experiments according to the parameters defined in parameters.py
Assumes the filenames from filenames.sh
Writes out files listing the words chosen.
Requires sufficient diskspace to write out the modified text at intermediate steps.
"""
from __future__ import print_function
import os
from parameters import *
from functions import *
directory = os.path.dirname(os.path.realpath(__file__))
filenames = dict()
execfile(os.path.join(directory, 'filenames.sh'), filenames)
intermediate_file = 'delete.me'
with file(filenames['corpus_unmodified']) as f:
counts = count_words(f)
total_words = sum(counts.values())
print('Total words in corpus : %i' % total_words)
frequent_words = [word for word in counts if counts[word] > experiment_word_occurrence_min]
random.seed(random_seed)
words_experiment_1, words_experiment_2 = [random.sample(frequent_words, number_of_experiment_words) for _ in range(2)]
with file('word_freq_experiment_words', 'w') as f:
for word in words_experiment_1:
print('%s,%i' % (word, counts[word]), file=f)
with file('coocc_noise_experiment_words', 'w') as f:
for word in words_experiment_2:
print('%s,%i' % (word, counts[word]), file=f)
# intersperse the meaningless token throughout the corpus
with open(filenames['corpus_unmodified']) as f_in, open(intermediate_file, 'w') as f_out:
intersperse_words({meaningless_token: meaningless_token_frequency}, f_in, f_out)
words_experiment_1.append(meaningless_token)
# perform the replacement procedures for the word frequency and the noise cooccurrence experiments
word_samplers = {}
for word in words_experiment_1:
word_samplers[word] = truncated_geometric_sampling(word, word_freq_experiment_ratio, word_freq_experiment_power_max)
for word in words_experiment_2:
word_samplers[word] = truncated_geometric_sampling(word, coocc_noise_experiment_ratio, coocc_noise_experiment_power_max)
tmp_file = 'delete.me.2'
with open(intermediate_file) as f_in, open(tmp_file, 'w') as f_out:
replace_words(word_samplers, f_in, f_out)
intermediate_file = tmp_file
# add noise to the cooccurrence distributions of experiment 2 words
token_freq_dict = dict()
for word in words_experiment_2:
target_freq = counts[word] * 1. / total_words
for i in range(1, coocc_noise_experiment_power_max + 1):
current_freq = target_freq * truncated_geometric_proba(coocc_noise_experiment_ratio, i, coocc_noise_experiment_power_max)
token_freq_dict[build_experiment_token(word, i)] = target_freq - current_freq
with open(intermediate_file) as f_in, open(filenames['corpus_modified'], 'w') as f_out:
intersperse_words(token_freq_dict, f_in, f_out)
| """
Modifies an input text for the experiments according to the parameters defined in parameters.py
Assumes the filenames from filenames.sh
Writes out files listing the words chosen.
Requires sufficient diskspace to write out the modified text at intermediate steps.
"""
from __future__ import print_function
from parameters import *
from functions import *
filenames = dict()
execfile('filenames.sh', filenames)
intermediate_file = 'delete.me'
with file(filenames['corpus_unmodified']) as f:
counts = count_words(f)
total_words = sum(counts.values())
print('Total words in corpus : %i' % total_words)
frequent_words = [word for word in counts if counts[word] > experiment_word_occurrence_min]
random.seed(random_seed)
words_experiment_1, words_experiment_2 = [random.sample(frequent_words, number_of_experiment_words) for _ in range(2)]
with file('word_freq_experiment_words', 'w') as f:
for word in words_experiment_1:
print('%s,%i' % (word, counts[word]), file=f)
with file('coocc_noise_experiment_words', 'w') as f:
for word in words_experiment_2:
print('%s,%i' % (word, counts[word]), file=f)
# intersperse the meaningless token throughout the corpus
with open(filenames['corpus_unmodified']) as f_in, open(intermediate_file, 'w') as f_out:
intersperse_words({meaningless_token: meaningless_token_frequency}, f_in, f_out)
words_experiment_1.append(meaningless_token)
# perform the replacement procedures for the word frequency and the noise cooccurrence experiments
word_samplers = {}
for word in words_experiment_1:
word_samplers[word] = truncated_geometric_sampling(word, word_freq_experiment_ratio, word_freq_experiment_power_max)
for word in words_experiment_2:
word_samplers[word] = truncated_geometric_sampling(word, coocc_noise_experiment_ratio, coocc_noise_experiment_power_max)
tmp_file = 'delete.me.2'
with open(intermediate_file) as f_in, open(tmp_file, 'w') as f_out:
replace_words(word_samplers, f_in, f_out)
intermediate_file = tmp_file
# add noise to the cooccurrence distributions of experiment 2 words
token_freq_dict = dict()
for word in words_experiment_2:
target_freq = counts[word] * 1. / total_words
for i in range(1, coocc_noise_experiment_power_max + 1):
current_freq = target_freq * truncated_geometric_proba(coocc_noise_experiment_ratio, i, coocc_noise_experiment_power_max)
token_freq_dict[build_experiment_token(word, i)] = target_freq - current_freq
with open(intermediate_file) as f_in, open(filenames['corpus_modified'], 'w') as f_out:
intersperse_words(token_freq_dict, f_in, f_out)
| apache-2.0 | Python |
3fbfe633899b9e95534ff612a3f509ba0c1a1a65 | Add comments | suclearnub/scubot | modules/roles.py | modules/roles.py | import discord
import shlex
rolesTriggerString = '!role' # String to listen for as trigger
async def parse_roles_command(message, client):
server_roles = message.server.roles # Grab a list of all roles as Role objects
server_roles_str = [x.name for x in server_roles] # String-ify it into their names
msg = shlex.split(message.content)
role = [i for i,x in enumerate(server_roles_str) if x == msg[1]] # Check where in the list the role is
if len(msg) != 1:
try:
await client.add_roles(message.author,message.server.roles[role[0]])
except discord.DiscordException:
msg = "I'm sorry " + message.author.name + " ,I'm afraid I can't do that."
client.send_message(message.channel, msg)
else:
pass
| import discord
import shlex
rolesTriggerString = '!role'
async def parse_roles_command(message, client):
server_roles = message.server.roles
server_roles_str = [x.name for x in server_roles]
msg = shlex.split(message.content)
role = [i for i,x in enumerate(server_roles_str) if x == msg[1]]
if len(msg) != 1:
try:
await client.add_roles(message.author,message.server.roles[role[0]])
except discord.DiscordException:
msg = "I'm sorry " + message.author.name + " ,I'm afraid I can't do that."
client.send_message(message.channel, msg)
else:
pass
| mit | Python |
b55bdbc192d2a28987b42a8e495f070958eb592d | Remove slicing. | pbs/cmsplugin-filer,pbs/cmsplugin-filer,pbs/cmsplugin-filer,pbs/cmsplugin-filer | cmsplugin_filer_image/migrations/0002_delete_duplicate_thumbnailoptions.py | cmsplugin_filer_image/migrations/0002_delete_duplicate_thumbnailoptions.py | from django.db import migrations
from django.db.models import Count
def forward(apps, schema_editor):
"""
Get all the duplicates for ThumbnailOption.
Point each FilerImage where the duplicates are used to the correct
ThumbnailOption.
Delete remaining ThumbnailOption duplicates.
"""
ThumbnailOption = apps.get_model("cmsplugin_filer_image", "ThumbnailOption")
unique_fields = ["name", "width", "height", "crop", "upscale"]
duplicates = ThumbnailOption.objects.values(*unique_fields) \
.annotate(count_id=Count('id')) \
.filter(count_id__gt=1)
for duplicate in duplicates:
qs = ThumbnailOption.objects.filter(
**{x: duplicate[x] for x in unique_fields})
id_to_keep = qs[0].id
for thumbnail_option in qs:
for filer_image in thumbnail_option.filerimage_set.all():
filer_image.thumbnail_option_id = id_to_keep
filer_image.save()
thumbnail_option.delete()
class Migration(migrations.Migration):
dependencies = [('cmsplugin_filer_image', '0001_initial'),]
operations = [migrations.RunPython(forward)]
| from django.db import migrations
from django.db.models import Count
def forward(apps, schema_editor):
"""
Get all the duplicates for ThumbnailOption.
Point each FilerImage where the duplicates are used to the correct
ThumbnailOption.
Delete remaining ThumbnailOption duplicates.
"""
ThumbnailOption = apps.get_model("cmsplugin_filer_image", "ThumbnailOption")
unique_fields = ["name", "width", "height", "crop", "upscale"]
duplicates = ThumbnailOption.objects.values(*unique_fields) \
.annotate(count_id=Count('id')) \
.filter(count_id__gt=1)
for duplicate in duplicates:
qs = ThumbnailOption.objects.filter(
**{x: duplicate[x] for x in unique_fields})
id_to_keep = qs[:1][0].id
for thumbnail_option in qs[1:]:
for filer_image in thumbnail_option.filerimage_set.all():
filer_image.thumbnail_option_id = id_to_keep
filer_image.save()
thumbnail_option.delete()
class Migration(migrations.Migration):
dependencies = [('cmsplugin_filer_image', '0001_initial'),]
operations = [migrations.RunPython(forward)]
| bsd-3-clause | Python |
e06416aca47adf82942fe31b416cec830c8b46f2 | Add operation to delete an Event | pferreir/indico,mvidalgarcia/indico,mic4ael/indico,pferreir/indico,indico/indico,DirkHoffmann/indico,OmeGak/indico,mic4ael/indico,indico/indico,ThiefMaster/indico,DirkHoffmann/indico,DirkHoffmann/indico,OmeGak/indico,mic4ael/indico,mvidalgarcia/indico,indico/indico,indico/indico,mvidalgarcia/indico,pferreir/indico,OmeGak/indico,ThiefMaster/indico,mic4ael/indico,ThiefMaster/indico,mvidalgarcia/indico,DirkHoffmann/indico,ThiefMaster/indico,OmeGak/indico,pferreir/indico | indico/modules/events/operations.py | indico/modules/events/operations.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import session
from indico.core.db import db
from indico.modules.events.models.references import ReferenceType
from indico.modules.events import logger
def create_reference_type(data):
reference_type = ReferenceType()
reference_type.populate_from_dict(data)
db.session.add(reference_type)
db.session.flush()
logger.info('Reference type "%s" created by %s', reference_type, session.user)
return reference_type
def update_reference_type(reference_type, data):
reference_type.populate_from_dict(data)
db.session.flush()
logger.info('Reference type "%s" updated by %s', reference_type, session.user)
def delete_reference_type(reference_type):
db.session.delete(reference_type)
db.session.flush()
logger.info('Reference type "%s" deleted by %s', reference_type, session.user)
def create_event_references(event, data):
event.references = data['references']
db.session.flush()
for reference in event.references:
logger.info('Reference "%s" created by %s', reference, session.user)
def update_event(event, data):
event.populate_from_dict(data)
db.session.flush()
logger.info('Event %r updated with %r', event, data)
def delete_event(event):
event.as_legacy.delete(session.user)
db.session.flush()
logger.info('Event %r deleted by %r', event, session.user)
| # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import session
from indico.core.db import db
from indico.modules.events.models.references import ReferenceType
from indico.modules.events import logger
def create_reference_type(data):
reference_type = ReferenceType()
reference_type.populate_from_dict(data)
db.session.add(reference_type)
db.session.flush()
logger.info('Reference type "%s" created by %s', reference_type, session.user)
return reference_type
def update_reference_type(reference_type, data):
reference_type.populate_from_dict(data)
db.session.flush()
logger.info('Reference type "%s" updated by %s', reference_type, session.user)
def delete_reference_type(reference_type):
db.session.delete(reference_type)
db.session.flush()
logger.info('Reference type "%s" deleted by %s', reference_type, session.user)
def create_event_references(event, data):
event.references = data['references']
db.session.flush()
for reference in event.references:
logger.info('Reference "%s" created by %s', reference, session.user)
def update_event(event, data):
event.populate_from_dict(data)
db.session.flush()
logger.info('Event %r updated with %r', event, data)
| mit | Python |
2689f26434a23cf81e72c2ca6320f46d092bc22b | Add missing import to server/application_context.py | justacec/bokeh,percyfal/bokeh,ptitjano/bokeh,quasiben/bokeh,aavanian/bokeh,bokeh/bokeh,ptitjano/bokeh,draperjames/bokeh,stonebig/bokeh,aavanian/bokeh,schoolie/bokeh,stonebig/bokeh,maxalbert/bokeh,bokeh/bokeh,DuCorey/bokeh,KasperPRasmussen/bokeh,clairetang6/bokeh,KasperPRasmussen/bokeh,bokeh/bokeh,ericmjl/bokeh,schoolie/bokeh,justacec/bokeh,draperjames/bokeh,clairetang6/bokeh,ericmjl/bokeh,Karel-van-de-Plassche/bokeh,maxalbert/bokeh,phobson/bokeh,mindriot101/bokeh,philippjfr/bokeh,Karel-van-de-Plassche/bokeh,aavanian/bokeh,DuCorey/bokeh,ericmjl/bokeh,jakirkham/bokeh,jakirkham/bokeh,rs2/bokeh,percyfal/bokeh,schoolie/bokeh,azjps/bokeh,msarahan/bokeh,stonebig/bokeh,ericmjl/bokeh,aavanian/bokeh,rs2/bokeh,timsnyder/bokeh,msarahan/bokeh,Karel-van-de-Plassche/bokeh,jakirkham/bokeh,aiguofer/bokeh,aiguofer/bokeh,percyfal/bokeh,gpfreitas/bokeh,philippjfr/bokeh,aiguofer/bokeh,quasiben/bokeh,aiguofer/bokeh,dennisobrien/bokeh,stonebig/bokeh,rs2/bokeh,azjps/bokeh,KasperPRasmussen/bokeh,ericmjl/bokeh,dennisobrien/bokeh,dennisobrien/bokeh,timsnyder/bokeh,percyfal/bokeh,clairetang6/bokeh,htygithub/bokeh,ptitjano/bokeh,azjps/bokeh,phobson/bokeh,philippjfr/bokeh,mindriot101/bokeh,draperjames/bokeh,philippjfr/bokeh,timsnyder/bokeh,schoolie/bokeh,KasperPRasmussen/bokeh,htygithub/bokeh,dennisobrien/bokeh,rs2/bokeh,bokeh/bokeh,gpfreitas/bokeh,DuCorey/bokeh,gpfreitas/bokeh,draperjames/bokeh,Karel-van-de-Plassche/bokeh,quasiben/bokeh,DuCorey/bokeh,percyfal/bokeh,aiguofer/bokeh,mindriot101/bokeh,htygithub/bokeh,DuCorey/bokeh,jakirkham/bokeh,schoolie/bokeh,msarahan/bokeh,maxalbert/bokeh,maxalbert/bokeh,phobson/bokeh,bokeh/bokeh,Karel-van-de-Plassche/bokeh,justacec/bokeh,dennisobrien/bokeh,ptitjano/bokeh,azjps/bokeh,aavanian/bokeh,justacec/bokeh,azjps/bokeh,clairetang6/bokeh,timsnyder/bokeh,msarahan/bokeh,phobson/bokeh,draperjames/bokeh,htygithub/bokeh,ptitjano/bokeh,phobson/bokeh,gpfreitas/bokeh,rs2/bokeh,philippjfr/bokeh,timsnyder/bokeh,mindriot101/bokeh,KasperPRasmussen/bokeh,jakirkham/bokeh | bokeh/server/application_context.py | bokeh/server/application_context.py | ''' Provides the ``ApplicationContext`` class.
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
from .session import ServerSession
from .exceptions import ProtocolError
class ApplicationContext(object):
''' Server-side holder for bokeh.application.Application plus any associated data.
This holds data that's global to all sessions, while ServerSession holds
data specific to an "instance" of the application.
'''
def __init__(self, application):
self._application = application
self._sessions = dict()
@property
def application(self):
return self._application
def create_session_if_needed(self, session_id):
# this is because empty session_ids would be "falsey" and
# potentially open up a way for clients to confuse us
if len(session_id) == 0:
raise ProtocolError("Session ID must not be empty")
if session_id not in self._sessions:
doc = self._application.create_document()
session = ServerSession(session_id, doc)
self._sessions[session_id] = session
def get_session(self, session_id):
if session_id in self._sessions:
session = self._sessions[session_id]
return session
else:
raise ProtocolError("No such session " + session_id)
def discard_session(self, session):
if session.connection_count > 0:
raise RuntimeError("Should not be discarding a session with open connections")
log.debug("Discarding session %r last in use %r seconds ago", session.id, session.seconds_since_last_unsubscribe)
del self._sessions[session.id]
def cleanup_sessions(self, unused_session_linger_seconds):
to_discard = []
for session in self._sessions.values():
if session.connection_count == 0 and \
session.seconds_since_last_unsubscribe > unused_session_linger_seconds:
to_discard.append(session)
for session in to_discard:
self.discard_session(session)
| ''' Provides the ``ApplicationContext`` class.
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
from .session import ServerSession
class ApplicationContext(object):
''' Server-side holder for bokeh.application.Application plus any associated data.
This holds data that's global to all sessions, while ServerSession holds
data specific to an "instance" of the application.
'''
def __init__(self, application):
self._application = application
self._sessions = dict()
@property
def application(self):
return self._application
def create_session_if_needed(self, session_id):
# this is because empty session_ids would be "falsey" and
# potentially open up a way for clients to confuse us
if len(session_id) == 0:
raise ProtocolError("Session ID must not be empty")
if session_id not in self._sessions:
doc = self._application.create_document()
session = ServerSession(session_id, doc)
self._sessions[session_id] = session
def get_session(self, session_id):
if session_id in self._sessions:
session = self._sessions[session_id]
return session
else:
raise ProtocolError("No such session " + session_id)
def discard_session(self, session):
if session.connection_count > 0:
raise RuntimeError("Should not be discarding a session with open connections")
log.debug("Discarding session %r last in use %r seconds ago", session.id, session.seconds_since_last_unsubscribe)
del self._sessions[session.id]
def cleanup_sessions(self, unused_session_linger_seconds):
to_discard = []
for session in self._sessions.values():
if session.connection_count == 0 and \
session.seconds_since_last_unsubscribe > unused_session_linger_seconds:
to_discard.append(session)
for session in to_discard:
self.discard_session(session)
| bsd-3-clause | Python |
9c3e34a6e8793ad79e605fa956f0ac60a4bb6fcf | Use transactions on server side also (probably worth performance penalty) | altaurog/django-caspy,altaurog/django-caspy,altaurog/django-caspy | caspy/api/urls.py | caspy/api/urls.py | import re
from django.conf.urls import patterns, url
from django.db import transaction
from rest_framework.decorators import api_view
from rest_framework.response import Response
from . import views
urlre_p1 = re.compile(r'\(\?P<\w+>.*\(\?#(:\w+)\)\)')
urlre_p2 = re.compile(r'^\^|\$$')
def rev(viewname):
for urlp in urlpatterns:
if urlp.name == viewname:
return urlre_p2.sub('', urlre_p1.sub(r'\g<1>', urlp._regex))
raise RuntimeError("No reverse match for %s" % viewname)
def response(path, endpoints):
return Response({name: path + p for name, p in endpoints.items()})
@api_view(('GET',))
def api_root(request):
return response(request.path, {
'currency': rev('api-currency-detail'),
'book': rev('api-book-detail'),
})
urlpatterns = patterns('', # noqa
url(r'^$',
api_root,
name='api-root'),
url(r'^currency/$',
transaction.atomic(views.CurrencyList.as_view()),
name='api-currency-list'),
url(r'^currency/(?P<pk>[A-Z]+(?#:cur_code))/$',
transaction.atomic(views.CurrencyDetail.as_view()),
name='api-currency-detail'),
url(r'^book/$',
views.BookList.as_view(),
name='api-book-list'),
url(r'^book/(?P<pk>\d+(?#:book_id))/$',
views.BookDetail.as_view(),
name='api-book-detail'),
)
| import re
from django.conf.urls import patterns, url
from rest_framework.decorators import api_view
from rest_framework.response import Response
from . import views
urlre_p1 = re.compile(r'\(\?P<\w+>.*\(\?#(:\w+)\)\)')
urlre_p2 = re.compile(r'^\^|\$$')
def rev(viewname):
for urlp in urlpatterns:
if urlp.name == viewname:
return urlre_p2.sub('', urlre_p1.sub(r'\g<1>', urlp._regex))
raise RuntimeError("No reverse match for %s" % viewname)
def response(path, endpoints):
return Response({name: path + p for name, p in endpoints.items()})
@api_view(('GET',))
def api_root(request):
return response(request.path, {
'currency': rev('api-currency-detail'),
'book': rev('api-book-detail'),
})
urlpatterns = patterns('', # noqa
url(r'^$',
api_root,
name='api-root'),
url(r'^currency/$',
views.CurrencyList.as_view(),
name='api-currency-list'),
url(r'^currency/(?P<pk>[A-Z]+(?#:cur_code))/$',
views.CurrencyDetail.as_view(),
name='api-currency-detail'),
url(r'^book/$',
views.BookList.as_view(),
name='api-book-list'),
url(r'^book/(?P<pk>\d+(?#:book_id))/$',
views.BookDetail.as_view(),
name='api-book-detail'),
)
| bsd-3-clause | Python |
3703d9fd3cb211c52a9d2beddc2d9b2ce99cb1a0 | Remove double import | cjh1/tomviz,cjh1/tomviz,cryos/tomviz,thewtex/tomviz,OpenChemistry/tomviz,OpenChemistry/tomviz,mathturtle/tomviz,thewtex/tomviz,mathturtle/tomviz,cryos/tomviz,thewtex/tomviz,cjh1/tomviz,OpenChemistry/tomviz,OpenChemistry/tomviz,cryos/tomviz,mathturtle/tomviz | tomviz/python/STEM_probe.py | tomviz/python/STEM_probe.py | def generate_dataset(array):
"""Generate STEM probe function"""
import numpy as np
#----USER SPECIFIED VARIABLES-----#
###voltage###
###alpha_max###
###Nxy###
###Nz###
###dxy###
###df_min###
###df_max###
###c3###
###f_a2###
###phi_a2###
###f_a3###
###phi_a3###
###f_c3###
###phi_c3###
#---------------------------------#
#Convert all units to angstrom
c3 = c3 * 1e7
f_a2 = f_a2 * 10
f_a3 = f_a3 * 10
f_c3 = f_c3 * 10
df_min = df_min * 10
df_max = df_max * 10
wavelength = 12.398 / np.sqrt((2 * 511.0 + voltage) * voltage) #angstrom
k_max = alpha_max * 1e-3 / wavelength
k_min = 0.0
dk = 1.0 / (dxy * Nxy)
kx = np.linspace(-np.floor(Nxy / 2.0), np.ceil(Nxy / 2.0) - 1, Nxy)
[kY, kX] = np.meshgrid(kx, kx)
kX = kX * dk
kY = kY * dk
kR = np.sqrt(kX**2 + kY**2)
phi = np.arctan2(kY, kX)
df = np.linspace(df_min, df_max, Nz)
for i in range(0, Nz):
defocus = df[i]
chi = -np.pi * wavelength * kR**2 * defocus + np.pi / 2 * c3 * wavelength**3 * kR**4 + np.pi * f_a2 * wavelength * kR**2 * \
np.sin(2 * (phi - phi_a2)) + f_a3 * wavelength**2 * kR**3 * np.sin(3 * (phi - \
phi_a3)) + 2 * np.pi / 3 * f_c3 * wavelength**2 * kR**3 * np.sin(phi - phi_c3)
probe = np.exp(-1j * chi)
probe[kR > k_max] = 0
probe[kR < k_min] = 0
probe = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(probe)))
probe = probe / np.sqrt(np.sum(np.abs(probe)**2) * dxy * dxy)
np.copyto(array[:, :, i], np.abs(probe))
| def generate_dataset(array):
import numpy as np
"""Generate STEM probe function"""
import numpy as np
#----USER SPECIFIED VARIABLES-----#
###voltage###
###alpha_max###
###Nxy###
###Nz###
###dxy###
###df_min###
###df_max###
###c3###
###f_a2###
###phi_a2###
###f_a3###
###phi_a3###
###f_c3###
###phi_c3###
#---------------------------------#
#Convert all units to angstrom
c3 = c3 * 1e7
f_a2 = f_a2 * 10
f_a3 = f_a3 * 10
f_c3 = f_c3 * 10
df_min = df_min * 10
df_max = df_max * 10
wavelength = 12.398 / np.sqrt((2 * 511.0 + voltage) * voltage) #angstrom
k_max = alpha_max * 1e-3 / wavelength
k_min = 0.0
dk = 1.0 / (dxy * Nxy)
kx = np.linspace(-np.floor(Nxy / 2.0), np.ceil(Nxy / 2.0) - 1, Nxy)
[kY, kX] = np.meshgrid(kx, kx)
kX = kX * dk
kY = kY * dk
kR = np.sqrt(kX**2 + kY**2)
phi = np.arctan2(kY, kX)
df = np.linspace(df_min, df_max, Nz)
for i in range(0, Nz):
defocus = df[i]
chi = -np.pi * wavelength * kR**2 * defocus + np.pi / 2 * c3 * wavelength**3 * kR**4 + np.pi * f_a2 * wavelength * kR**2 * \
np.sin(2 * (phi - phi_a2)) + f_a3 * wavelength**2 * kR**3 * np.sin(3 * (phi - \
phi_a3)) + 2 * np.pi / 3 * f_c3 * wavelength**2 * kR**3 * np.sin(phi - phi_c3)
probe = np.exp(-1j * chi)
probe[kR > k_max] = 0
probe[kR < k_min] = 0
probe = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(probe)))
probe = probe / np.sqrt(np.sum(np.abs(probe)**2) * dxy * dxy)
np.copyto(array[:, :, i], np.abs(probe))
| bsd-3-clause | Python |
a7b95dada6098dc2837c4072a7820818c6efc538 | Change URLs to format used in templates (consistent with news app) | mollyproject/mollyproject,mollyproject/mollyproject,mollyproject/mollyproject | molly/apps/feeds/events/urls.py | molly/apps/feeds/events/urls.py | from django.conf.urls.defaults import *
from .views import IndexView, ItemListView, ItemDetailView
urlpatterns = patterns('',
(r'^$',
IndexView, {},
'index'),
(r'^(?P<slug>[a-z\-]+)/$',
ItemListView, {},
'item-list'),
(r'^(?P<slug>[a-z\-]+)/(?P<id>\d+)/$',
ItemDetailView, {},
'item-detail'),
) | from django.conf.urls.defaults import *
from .views import IndexView, ItemListView, ItemDetailView
urlpatterns = patterns('',
(r'^$',
IndexView, {},
'index'),
(r'^(?P<slug>[a-z\-]+)/$',
ItemListView, {},
'item_list'),
(r'^(?P<slug>[a-z\-]+)/(?P<id>\d+)/$',
ItemDetailView, {},
'item_detail'),
) | apache-2.0 | Python |
2955f9d5bbf08c7abd01de83faea8292d309b7cd | use proper url for post-action | ScorpionResponse/freelancefinder,ScorpionResponse/freelancefinder,ScorpionResponse/freelancefinder | freelancefinder/jobs/urls.py | freelancefinder/jobs/urls.py | """
Configure urls for the jobs app.
The default base for these urls is /jobs/
"""
from django.conf.urls import url
from .views import (JobListView, JobDetailView, PostListView, PostActionView,
FreelancerListView, FreelancerDetailView)
urlpatterns = [
url(r'^job-list/$', JobListView.as_view(), name="job-list"),
url(r'^job/(?P<pk>\d+)/$', JobDetailView.as_view(), name="job-detail"),
url(r'^freelancer-list/$', FreelancerListView.as_view(), name="freelancer-list"),
url(r'^freelancer/(?P<pk>\d+)/$', FreelancerDetailView.as_view(), name="freelancer-detail"),
url(r'^post-list/$', PostListView.as_view(), name="post-list"),
url(r'^post-action/$', PostActionView.as_view(), name="post-action"),
]
| """
Configure urls for the jobs app.
The default base for these urls is /jobs/
"""
from django.conf.urls import url
from .views import (JobListView, JobDetailView, PostListView, PostActionView,
FreelancerListView, FreelancerDetailView)
urlpatterns = [
url(r'^job-list/$', JobListView.as_view(), name="job-list"),
url(r'^job/(?P<pk>\d+)/$', JobDetailView.as_view(), name="job-detail"),
url(r'^freelancer-list/$', FreelancerListView.as_view(), name="freelancer-list"),
url(r'^freelancer/(?P<pk>\d+)/$', FreelancerDetailView.as_view(), name="freelancer-detail"),
url(r'^post-list/$', PostListView.as_view(), name="post-list"),
url(r'^post-list/$', PostActionView.as_view(), name="post-action"),
]
| bsd-3-clause | Python |
f6ac29be0d81786cea3e803f59a8861b020034b7 | Fix revision numbers. | Ademan/NumPy-GSoC,teoliphant/numpy-refactor,efiring/numpy-work,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,Ademan/NumPy-GSoC,illume/numpy3k,chadnetzer/numpy-gaurdro,jasonmccampbell/numpy-refactor-sprint,efiring/numpy-work,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,illume/numpy3k,efiring/numpy-work,efiring/numpy-work,chadnetzer/numpy-gaurdro,illume/numpy3k,illume/numpy3k | numpy/version.py | numpy/version.py | version='1.0b2'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += '.dev-r'+svn.version
| version='0.9.9'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += '.'+svn.version
| bsd-3-clause | Python |
d667566c6ea49d2730e667ecb625f7a5c09e64ac | fix to json | NCSSM-CS/CSAssess,NCSSM-CS/CSAssess,NCSSM-CS/CSAssess,NCSSM-CS/CSAssess | controller/getQuestion.py | controller/getQuestion.py | #!/usr/local/bin/python3
"""
created_by: Ebube Chuba
created_date: 3/4/2015
last_modified_by: Ebube Chuba
last_modified date: 3/6/2015
"""
# imports
import json
import utils
from sql.user import User
from sql.question import Question
from sql.topic import Topic
# Format of JSON - EC
# requestType: getQuestion
# session: "string"
# topics: "string"
# difficulty: integer
def iChooseU(form):
thisUser = utils.findUser(form)
topic = form.getlist("topic")
difficulty = form.getlist("difficulty")
complete = []
count = 0
if len(topic) != 0:
complete += Question.get(0, Topic.get(topic[0])[0])
count += 1
if len(difficulty) != 0:
complete += Question.get(0, int(difficulty[0]))
count += 1
collect = []
intersect = []
for response in complete:
if collect.count(response) < count and count > 1:
collect.append(response)
else:
intersect.add(response.toJson())
out = {}
out["questionList"] = intersect
out["sessionID"] = form.getlist("session")[0]
return json.dumps(out)
| #!/usr/local/bin/python3
"""
created_by: Ebube Chuba
created_date: 3/4/2015
last_modified_by: Ebube Chuba
last_modified date: 3/6/2015
"""
# imports
import json
import utils
from sql.user import User
from sql.question import Question
from sql.topic import Topic
# Format of JSON - EC
# requestType: getQuestion
# session: "string"
# topics: "string"
# difficulty: integer
def iChooseU(form):
thisUser = utils.findUser(form)
topic = form.getlist("topic")
difficulty = form.getlist("difficulty")
print(topic)
print(difficulty)
complete = []
count = 0
if len(topic) != 0:
complete += Question.get(0, Topic.get(topic[0])[0])
count += 1
if len(difficulty) != 0:
complete += Question.get(0, int(difficulty[0]))
count += 1
print(complete)
collect = []
intersect = []
for response in complete:
if collect.count(response) < count:
collect.append(response)
else:
intersect.add(response)
out = {}
out["questionList"] = intersect
out["sessionID"] = form.getlist("session")[0]
return json.dumps(out)
| mit | Python |
603836378728b3152af20cc73b269206d02fc5dc | Add indentation fix to safeprint | gappleto97/Senior-Project | common/safeprint.py | common/safeprint.py | import multiprocessing, sys
from datetime import datetime
from common import settings
print_lock = multiprocessing.RLock()
max_digits = multiprocessing.Value('i', 0)
def safeprint(msg, verbosity=0):
"""Prints in a thread-lock, taking a single object as an argument"""
pid = str(multiprocessing.current_process().pid)
max_digits.value = max(max_digits.value, len(pid))
pid = pid.zfill(max_digits.value)
string = ("[" + pid + "] " + datetime.now().strftime('%H:%M:%S: ') +
str(msg).replace('\n', '\n' + ' ' * len(pid)) + '\n')
with print_lock:
with open("output.txt", "a") as log:
log.write(string)
if settings.config.get('verbose') >= verbosity:
sys.stdout.write(string)
| import multiprocessing, sys
from datetime import datetime
from common import settings
print_lock = multiprocessing.RLock()
max_digits = multiprocessing.Value('i', 0)
def safeprint(msg, verbosity=0):
"""Prints in a thread-lock, taking a single object as an argument"""
pid = str(multiprocessing.current_process().pid)
max_digits.value = max(max_digits.value, len(pid))
pid = pid.zfill(max_digits.value)
string = ("[" + pid + "] " + datetime.now().strftime('%H:%M:%S: ') +
str(msg) + '\n')
with print_lock:
with open("output.txt", "a") as log:
log.write(string)
if settings.config.get('verbose') >= verbosity:
sys.stdout.write(string)
| mit | Python |
2b1acef4c204ba2913a8afef5607f6b628e117b7 | Fix spurious rebuilds of apks for Android build. | littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,junmin-zhu/chromium-rivertrail,zcbenz/cefode-chromium,M4sse/chromium.src,nacl-webkit/chrome_deps,anirudhSK/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,dushu1203/chromium.src,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,Chilledheart/chromium,dednal/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,zcbenz/cefode-chromium,littlstar/chromium.src,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,zcbenz/cefode-chromium,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,pozdnyakov/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,junmin-zhu/chromium-rivertrail,ltilve/chromium,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,markYoungH/chromium.src,jaruba/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,hujiajie/pa-chromium,jaruba/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk,anirudhSK/chromium,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,jaruba/chromium.src,M4sse/chromium.src,hujiajie/pa-chromium,jaruba/chromium.src,Chilledheart/chromium,M4sse/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,ChromiumWebApps/chromium,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,nacl-webkit/chrome_deps,ondra-novak/chromium.src,littlstar/chromium.src,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,hgl888/chromium-crosswalk,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,ondra-novak/chromium.src,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,zcbenz/cefode-chromium,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,Just-D/chromium-1,anirudhSK/chromium,dushu1203/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,junmin-zhu/chromium-rivertrail,markYoungH/chromium.src,junmin-zhu/chromium-rivertrail,ondra-novak/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,Chilledheart/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,junmin-zhu/chromium-rivertrail,anirudhSK/chromium,Jonekee/chromium.src,patrickm/chromium.src,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,bright-sparks/chromium-spacewalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,markYoungH/chromium.src,patrickm/chromium.src,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,Chilledheart/chromium,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,junmin-zhu/chromium-rivertrail,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,ltilve/chromium,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,patrickm/chromium.src,dednal/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,nacl-webkit/chrome_deps,patrickm/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,zcbenz/cefode-chromium,hujiajie/pa-chromium,Just-D/chromium-1,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,dednal/chromium.src,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,timopulkkinen/BubbleFish,ltilve/chromium,timopulkkinen/BubbleFish,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,markYoungH/chromium.src,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,Just-D/chromium-1,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,jaruba/chromium.src,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,jaruba/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,ltilve/chromium,Chilledheart/chromium,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,dednal/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,Chilledheart/chromium,Just-D/chromium-1,markYoungH/chromium.src,Jonekee/chromium.src,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,Fireblend/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src | build/build_output_dirs_android.gyp | build/build_output_dirs_android.gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
# Target for creating common output build directories. Creating output
# dirs beforehand ensures that build scripts can assume these folders to
# exist and there are no race conditions resulting from build scripts
# trying to create these directories.
# The build/java.gypi target depends on this target.
'target_name': 'build_output_dirs',
'type': 'none',
'actions': [
{
'action_name': 'create_java_output_dirs',
'variables' : {
'output_dirs' : [
'<(PRODUCT_DIR)/apks',
'<(PRODUCT_DIR)/lib.java',
'<(PRODUCT_DIR)/test.lib.java',
]
},
'inputs' : [],
# By not specifying any outputs, we ensure that this command isn't
# re-run when the output directories are touched (i.e. apks are
# written to them).
'outputs': [''],
'action': [
'mkdir',
'-p',
'<@(output_dirs)',
],
},
],
}, # build_output_dirs
], # targets
}
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
# Target for creating common output build directories. Creating output
# dirs beforehand ensures that build scripts can assume these folders to
# exist and there are no race conditions resulting from build scripts
# trying to create these directories.
# The build/java.gypi target depends on this target.
'target_name': 'build_output_dirs',
'type': 'none',
'actions': [
{
'action_name': 'create_java_output_dirs',
'variables' : {
'output_dirs' : [
'<(PRODUCT_DIR)/apks',
'<(PRODUCT_DIR)/lib.java',
'<(PRODUCT_DIR)/test.lib.java',
]
},
'inputs' : [],
'outputs': [
'<@(output_dirs)'
],
'action': [
'mkdir',
'-p',
'<@(output_dirs)',
],
},
],
}, # build_output_dirs
], # targets
}
| bsd-3-clause | Python |
1077c627302448a38ec3a6bd059e70f5e4cbfb86 | add handshake events handling | facebook/wangle,facebook/wangle,facebook/wangle | build/fbcode_builder/specs/fbzmq.py | build/fbcode_builder/specs/fbzmq.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.fbthrift as fbthrift
import specs.folly as folly
import specs.gmock as gmock
import specs.sodium as sodium
import specs.sigar as sigar
from shell_quoting import ShellQuoted
def fbcode_builder_spec(builder):
builder.add_option('zeromq/libzmq:git_hash', 'v4.2.2')
return {
'depends_on': [folly, fbthrift, gmock, sodium, sigar],
'steps': [
builder.github_project_workdir('zeromq/libzmq', '.'),
builder.step('Build and install zeromq/libzmq', [
builder.run(ShellQuoted('./autogen.sh')),
builder.configure(),
builder.make_and_install(),
]),
builder.fb_github_project_workdir('fbzmq/fbzmq/build', 'facebook'),
builder.step('Build and install fbzmq/fbzmq/build', [
builder.cmake_configure('fbzmq/fbzmq/build'),
# we need the pythonpath to find the thrift compiler
builder.run(ShellQuoted(
'PYTHONPATH="$PYTHONPATH:"{p}/lib/python2.7/site-packages '
'make -j {n}'
).format(p=builder.option('prefix'), n=builder.option('make_parallelism'))),
builder.run(ShellQuoted('make install')),
]),
],
}
| #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.fbthrift as fbthrift
import specs.folly as folly
import specs.gmock as gmock
import specs.sodium as sodium
import specs.sigar as sigar
from shell_quoting import ShellQuoted
def fbcode_builder_spec(builder):
builder.add_option('zeromq/libzmq:git_hash', 'v4.2.5')
return {
'depends_on': [folly, fbthrift, gmock, sodium, sigar],
'steps': [
builder.github_project_workdir('zeromq/libzmq', '.'),
builder.step('Build and install zeromq/libzmq', [
builder.run(ShellQuoted('./autogen.sh')),
builder.configure(),
builder.make_and_install(),
]),
builder.fb_github_project_workdir('fbzmq/fbzmq/build', 'facebook'),
builder.step('Build and install fbzmq/fbzmq/build', [
builder.cmake_configure('fbzmq/fbzmq/build'),
# we need the pythonpath to find the thrift compiler
builder.run(ShellQuoted(
'PYTHONPATH="$PYTHONPATH:"{p}/lib/python2.7/site-packages '
'make -j {n}'
).format(p=builder.option('prefix'), n=builder.option('make_parallelism'))),
builder.run(ShellQuoted('make install')),
]),
],
}
| apache-2.0 | Python |
536716d095b152355dfb00cff713552a96b95857 | Comment out lines accidentally left in the last commit. Oops. | rdaland/PhoMEnt | calc_weights.py | calc_weights.py | import sys
import megatableau, data_prob
import scipy, scipy.optimize
# Argument parsing
assert len(sys.argv)==2
tableau_file_name = sys.argv[1]
# Read in data
mt = megatableau.MegaTableau(tableau_file_name)
w_0 = -scipy.rand(len(mt.weights))
nonpos_reals = [(-25,0) for wt in mt.weights]
def one_minus_probability(weights, tableau):
return 1.0-data_prob.probability(weights, tableau)
def negative_probability(weights, tableau):
return -data_prob.probability(weights, tableau)
learned_weights = scipy.optimize.fmin_tnc(data_prob.probability, w_0, args = (mt.tableau,), bounds=nonpos_reals, approx_grad=True)
print(learned_weights)
# print("Probability given weights found by the original MEGT:")
# print(data_prob.probability([-2.19,-0.43], mt.tableau)) | import sys
import megatableau, data_prob
import scipy, scipy.optimize
# Argument parsing
assert len(sys.argv)==2
tableau_file_name = sys.argv[1]
# Read in data
mt = megatableau.MegaTableau(tableau_file_name)
w_0 = -scipy.rand(len(mt.weights))
nonpos_reals = [(-25,0) for wt in mt.weights]
def one_minus_probability(weights, tableau):
return 1.0-data_prob.probability(weights, tableau)
def negative_probability(weights, tableau):
return -data_prob.probability(weights, tableau)
learned_weights = scipy.optimize.fmin_tnc(data_prob.probability, w_0, args = (mt.tableau,), bounds=nonpos_reals, approx_grad=True)
print(learned_weights)
print("Probability given weights found by the original MEGT:")
print(data_prob.probability([-2.19,-0.43], mt.tableau)) | bsd-3-clause | Python |
8d663651c672f3fb1f53cd07dbd1ec20c5daaa90 | Update comment in migration | kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,kdeloach/model-my-watershed,kdeloach/model-my-watershed,project-icp/bee-pollinator-app,lliss/model-my-watershed,project-icp/bee-pollinator-app,kdeloach/model-my-watershed,lliss/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,lliss/model-my-watershed,lliss/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,lliss/model-my-watershed,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app | src/mmw/apps/modeling/migrations/0010_scenario_inputmod_hash.py | src/mmw/apps/modeling/migrations/0010_scenario_inputmod_hash.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('modeling', '0009_scenario_results_to_json'),
]
operations = [
migrations.AddField(
model_name='scenario',
name='inputmod_hash',
field=models.CharField(help_text='A hash of the values for inputs & modifications to compare to the existing model results, to determine if the persisted result apply to the current values', max_length=255, null=True),
),
migrations.AlterField(
model_name='scenario',
name='modification_hash',
field=models.CharField(help_text='A hash of the values for modifications to compare to the existing model results, to determine if the persisted result apply to the current values', max_length=255, null=True),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('modeling', '0009_scenario_results_to_json'),
]
operations = [
migrations.AddField(
model_name='scenario',
name='inputmod_hash',
field=models.CharField(help_text='A hash of the values for inputs to compare to the existing model results, to determine if the persisted result apply to the current values', max_length=255, null=True),
),
migrations.AlterField(
model_name='scenario',
name='modification_hash',
field=models.CharField(help_text='A hash of the values for modifications to compare to the existing model results, to determine if the persisted result apply to the current values', max_length=255, null=True),
),
]
| apache-2.0 | Python |
0e53893527f63e16e9398d18f1d19578bc14334a | add trailing comma | texastribune/tribwire,texastribune/tribwire,texastribune/tribwire,texastribune/tribwire | tribwire/urls.py | tribwire/urls.py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^link_form.html$', views.CreateLink.as_view(), name='link_form'),
)
| from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^link_form.html$', views.CreateLink.as_view(), name='link_form'))
| apache-2.0 | Python |
ecead583151cb7af4e0cfcf0054423954d0eac3b | Update files.py | scotthaleen/python-secret-sauce | src/files.py | src/files.py | # -*- coding: utf-8 -*-
import os
from generators import numbers
'''
file IO functions
'''
def slurp(filePath):
# read contents of file to string
with open(filePath) as x: data = x.read()
return data
def slurpA(filePath):
# same as slurp but return Array of lines instead of string
with open(filePath) as x: data = x.read().splitlines()
return data
def spit(filePath, data, overwrite=False):
# write all contents to a file
mode= 'w' if overwrite else 'a'
with open(filePath, mode) as x: x.write(data)
def touch(filePath, times=None):
# touch a file
with open(filePath, 'a'):
os.utime(filePath, times)
def rm(filePath):
# delete a file if exists
if os.path.isfile(filePath):
os.remove(filePath)
def rmrf(directory):
ignore_errors = True
shutil.rmtree(directory, ignore_errors)
def mv(src, dest):
shutil.move(src, dest)
def cp(src, dest):
shutil.copyfile(src, dest)
def mkdir(path):
os.makedirs(path)
class rollin(object):
'''
rolling file writer
this is a poor mans rolling file appender
TODO maybe accept a generator function for incrementing file names
must be infinite or a cycle
TODO add optional event function to be triggered on file change
(useful if you would like to automatically move the files when
done being written too)
'''
def __init__(self, directory, filename, extension, limit_megabytes=10):
self.directory = directory
self.filename = filename
self.limit_bytes = limit_megabytes*1024*1024
self.gen = numbers()
self.rotate = False
def open(self):
sz = "%s/%s_%s.%s" % (self.directory, self.filename, self.gen.next(), self.extension)
self.f = open(sz, 'a+b')
def rotate():
self.rotate = True
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.f.close()
def write(self,data):
if self.rotate or (self.f.tell() > self.limit_bytes):
self.rotate = False
self.close()
self.open()
self.f.write(data)
| # -*- coding: utf-8 -*-
import os
from generators import numbers
'''
file IO functions
'''
def slurp(filePath):
# read contents of file to string
with open(filePath) as x: data = x.read()
return data
def slurpA(filePath):
# same as slurp but return Array of lines instead of string
with open(filePath) as x: data = x.read().splitlines()
return data
def spit(filePath, data, overwrite=False):
# write all contents to a file
mode= 'w' if overwrite else 'a'
with open(filePath, mode) as x: x.write(data)
def touch(filePath, times=None):
# touch a file
with open(filePath, 'a'):
os.utime(filePath, times)
def rm(filePath):
# delete a file if exists
if os.path.isfile(filePath):
os.remove(filePath)
class rollin(object):
'''
rolling file writer
this is a poor mans rolling file appender
TODO maybe accept a generator function for incrementing file names
must be infinite or a cycle
TODO add optional event function to be triggered on file change
(useful if you would like to automatically move the files when
done being written too)
'''
def __init__(self, directory, filename, extension, limit_megabytes=10):
self.directory = directory
self.filename = filename
self.limit_bytes = limit_megabytes*1024*1024
self.gen = numbers()
self.rotate = False
def open(self):
sz = "%s/%s_%s.%s" % (self.directory, self.filename, self.gen.next(), self.extension)
self.f = open(sz, 'a+b')
def rotate():
self.rotate = True
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.f.close()
def write(self,data):
if self.rotate or (self.f.tell() > self.limit_bytes):
self.rotate = False
self.close()
self.open()
self.f.write(data)
| mit | Python |
00cea9f8e51f53f338e19adf0165031d2f9cad77 | Enable markdown extensions for TOC and linebreaks | Courgetteandratatouille/v6_ui,Courgetteandratatouille/v6_ui,olaurendeau/v6_ui,c2corg/v6_ui,c2corg/v6_ui,c2corg/v6_ui,Courgetteandratatouille/v6_ui,olaurendeau/v6_ui,olaurendeau/v6_ui,c2corg/v6_ui,Courgetteandratatouille/v6_ui,olaurendeau/v6_ui | c2corg_ui/templates/utils/format.py | c2corg_ui/templates/utils/format.py | import bbcode
import markdown
import html
from c2corg_ui.format.wikilinks import C2CWikiLinkExtension
from markdown.extensions.nl2br import Nl2BrExtension
from markdown.extensions.toc import TocExtension
_markdown_parser = None
_bbcode_parser = None
def _get_markdown_parser():
global _markdown_parser
if not _markdown_parser:
extensions = [
C2CWikiLinkExtension(),
Nl2BrExtension(),
TocExtension(marker='[toc]', baselevel=2),
]
_markdown_parser = markdown.Markdown(output_format='xhtml5',
extensions=extensions)
return _markdown_parser
def _get_bbcode_parser():
global _bbcode_parser
if not _bbcode_parser:
_bbcode_parser = bbcode.Parser(escape_html=False, newline='\n')
return _bbcode_parser
def parse_code(text, md=True, bb=True):
if md:
text = _get_markdown_parser().convert(text)
if bb:
text = _get_bbcode_parser().format(text)
return text
def sanitize(text):
return html.escape(text)
| import bbcode
import markdown
import html
from c2corg_ui.format.wikilinks import C2CWikiLinkExtension
_markdown_parser = None
_bbcode_parser = None
def _get_markdown_parser():
global _markdown_parser
if not _markdown_parser:
extensions = [
C2CWikiLinkExtension(),
]
_markdown_parser = markdown.Markdown(output_format='xhtml5',
extensions=extensions)
return _markdown_parser
def _get_bbcode_parser():
global _bbcode_parser
if not _bbcode_parser:
_bbcode_parser = bbcode.Parser(escape_html=False, newline='\n')
return _bbcode_parser
def parse_code(text, md=True, bb=True):
if md:
text = _get_markdown_parser().convert(text)
if bb:
text = _get_bbcode_parser().format(text)
return text
def sanitize(text):
return html.escape(text)
| agpl-3.0 | Python |
70a1a4b22a49b9330810faf2819d6e380d69822a | remove whitespace | berendkleinhaneveld/Registrationshop,berendkleinhaneveld/Registrationshop | ui/interactor.py | ui/interactor.py | """
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
| """
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
| mit | Python |
7a78140e1c870ba0a1c3d1659a48c615967b5f0b | change R to f in NB forms | dgasmith/EEX_scratch | eex/metadata/nb_terms.py | eex/metadata/nb_terms.py | """
Contains all of the metadata for non-bonded terms.
"""
_nb_functional_forms = {
"LJ": {
"AB": {
"form": "A/(r ** 12) - B/(r ** 6)",
"parameters": ["A", "B"],
"units": {
"A": "[energy] * [length] ** 12",
"B": "[energy] * [length] ** 6",
},
},
"epsilon/sigma": {
"form": "4.0 * epsilon * ((sigma / R) ** 12 - (sigma / R) ** 6)",
"parameters": ["epsilon", "sigma"],
"units": {
"epsilon": "[energy]",
"sigma": "[length]",
},
},
# Continue this out
"description": "This is the classic LJ non-bonded"
},
"YANNB": {}
}
nb_metadata = {}
# Valid variables used in all two-body terms
nb_metadata["variables"] = {"r": {"units": "[length]", "description": "Distance between the two indexed atoms."}}
# Add store data
nb_metadata["store_name"] = "nb"
nb_metadata["store_indices"] = {
"atom_type1": "Index of the first atom.",
"atom_type2": "Index of the second atom.",
# "term_index": "Index of nb_type stored in the DataLayer.",
}
# nb_metadata["index_columns"] = ["atom1", "atom2"]
nb_metadata["forms"] = _nb_functional_forms | """
Contains all of the metadata for non-bonded terms.
"""
_nb_functional_forms = {
"LJ": {
"AB": {
"form": "A/(R ** 12) - B/(R ** 6)",
"parameters": ["A", "B"],
"units": {
"A": "[energy] * [length] ** 12",
"B": "[energy] * [length] ** 6",
},
},
"epsilon/sigma": {
"form": "4.0 * epsilon * ((sigma / R) ** 12 - (sigma / R) ** 6)",
"parameters": ["epsilon", "sigma"],
"units": {
"epsilon": "[energy]",
"sigma": "[length]",
},
},
# Continue this out
"description": "This is the classic LJ non-bonded"
},
"YANNB": {}
}
nb_metadata = {}
# Valid variables used in all two-body terms
nb_metadata["variables"] = {"r": {"units": "[length]", "description": "Distance between the two indexed atoms."}}
# Add store data
nb_metadata["store_name"] = "nb"
nb_metadata["store_indices"] = {
"atom_type1": "Index of the first atom.",
"atom_type2": "Index of the second atom.",
# "term_index": "Index of nb_type stored in the DataLayer.",
}
# nb_metadata["index_columns"] = ["atom1", "atom2"]
nb_metadata["forms"] = _nb_functional_forms | bsd-3-clause | Python |
38ddf9c51bc87ad48bd0776dac6332983b3ab2f9 | Fix error on fragmenting sequences smaller than the window size | mchelem/cref2,mchelem/cref2,mchelem/cref2 | cref/sequence/fragment.py | cref/sequence/fragment.py |
def fragment(sequence, size=5):
"""
Fragment a string sequence using a sliding window given by size
:param sequence: String containing the sequence
:param size: Size of the window
:return: a fragment of the sequence with the given size
"""
if size > 0 and len(sequence) > size:
for i in range(len(sequence) - size + 1):
yield sequence[i: i + size]
else:
yield sequence
|
def fragment(sequence, size=5):
"""
Fragment a string sequence using a sliding window given by size
:param sequence: String containing the sequence
:param size: Size of the window
:return: a fragment of the sequence with the given size
"""
if size > 0:
for i in range(len(sequence) - size + 1):
yield sequence[i: i + size]
| mit | Python |
39f6a4a5039287cd80d1efe3741d562e1102256b | add hot load phase to view query tests | mikewied/perfrunner,couchbase/perfrunner,couchbase/perfrunner,PaintScratcher/perfrunner,pavel-paulau/perfrunner,vmx/perfrunner,PaintScratcher/perfrunner,mikewied/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,thomas-couchbase/perfrunner,EricACooper/perfrunner,hsharsha/perfrunner,couchbase/perfrunner,dkao-cb/perfrunner,thomas-couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,dkao-cb/perfrunner,hsharsha/perfrunner,vmx/perfrunner,EricACooper/perfrunner,EricACooper/perfrunner,EricACooper/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner | perfrunner/tests/query.py | perfrunner/tests/query.py | from perfrunner.helpers.cbmonitor import with_stats
from perfrunner.tests.index import IndexTest
class QueryTest(IndexTest):
COLLECTORS = {'latency': True, 'query_latency': True}
@with_stats
def access(self):
super(QueryTest, self).timer()
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.wait_for_persistence()
self.compact_bucket()
self.define_ddocs()
self.build_index()
self.workload = self.test_config.access_settings
self.access_bg_with_ddocs()
self.access()
class QueryThroughputTest(QueryTest):
def run(self):
super(QueryThroughputTest, self).run()
self.reporter.post_to_sf(
self.metric_helper.calc_avg_couch_views_ops()
)
class QueryLatencyTest(QueryTest):
def run(self):
super(QueryLatencyTest, self).run()
self.reporter.post_to_sf(
*self.metric_helper.calc_query_latency(percentile=80)
)
if self.remote.os != 'Cygwin' and \
self.test_config.name == 'query_lat_20M':
self.reporter.post_to_sf(*self.metric_helper.calc_max_beam_rss())
| from perfrunner.helpers.cbmonitor import with_stats
from perfrunner.tests.index import IndexTest
class QueryTest(IndexTest):
COLLECTORS = {'latency': True, 'query_latency': True}
@with_stats
def access(self):
super(QueryTest, self).timer()
def run(self):
self.load()
self.wait_for_persistence()
self.compact_bucket()
self.define_ddocs()
self.build_index()
self.workload = self.test_config.access_settings
self.access_bg_with_ddocs()
self.access()
class QueryThroughputTest(QueryTest):
def run(self):
super(QueryThroughputTest, self).run()
self.reporter.post_to_sf(
self.metric_helper.calc_avg_couch_views_ops()
)
class QueryLatencyTest(QueryTest):
def run(self):
super(QueryLatencyTest, self).run()
self.reporter.post_to_sf(
*self.metric_helper.calc_query_latency(percentile=80)
)
if self.remote.os != 'Cygwin' and \
self.test_config.name == 'query_lat_20M':
self.reporter.post_to_sf(*self.metric_helper.calc_max_beam_rss())
| apache-2.0 | Python |
10054e8d810820603246483a093961b2c4574032 | Update models.py | Ksynko/django-contactinfo,Ksynko/django-contactinfo | contactinfo/models.py | contactinfo/models.py | from django.db import models
from django.conf import settings
from __future__ import unicode_literals
from django_countries.fields import CountryField
class LocationType(models.Model):
name = models.CharField(max_length=30)
slug = models.CharField(max_length=30)
def __unicode__(self):
return self.name
def get_default_locationtype():
default_slug = getattr(settings, 'DEFAULT_LOCATIONTYPE_SLUG', 'office')
return LocationType.objects.get(slug=default_slug).pk
# def get_default_country():
# default_iso = getattr(settings, 'DEFAULT_COUNTRY_ISO', 'US')
# return countries.Country.objects.get(iso=default_iso).pk
class Location(models.Model):
type = models.ForeignKey(LocationType, blank=True, null=True)
country = CountryField()
def __unicode__(self):
return '%s (%s)' % (self.country, self.type)
class Address(models.Model):
location = models.ForeignKey(Location, related_name='addresses')
street = models.TextField(blank=True)
city = models.CharField(max_length=255, blank=True)
state_province = models.CharField(max_length=255, blank=True)
postal_code = models.CharField(max_length=255, blank=True)
class Meta:
verbose_name_plural = 'addresses'
def __unicode__(self):
return "%s\n%s, %s %s" % \
(self.street, self.city, self.state_province, self.postal_code)
class Phone(models.Model):
PHONE_TYPES = (
('landline', 'Land Line'),
('mobile', 'Mobile'),
('fax', 'Fax')
)
location = models.ForeignKey(Location, related_name='phones')
number = models.CharField(max_length=30)
type = models.CharField(
max_length=15,
choices=PHONE_TYPES,
default='landline',
)
def __unicode__(self):
return self.number
| from django.db import models
from django.conf import settings
from django_countries.fields import CountryField
class LocationType(models.Model):
name = models.CharField(max_length=30)
slug = models.CharField(max_length=30)
def __unicode__(self):
return self.name
def get_default_locationtype():
default_slug = getattr(settings, 'DEFAULT_LOCATIONTYPE_SLUG', 'office')
return LocationType.objects.get(slug=default_slug).pk
# def get_default_country():
# default_iso = getattr(settings, 'DEFAULT_COUNTRY_ISO', 'US')
# return countries.Country.objects.get(iso=default_iso).pk
class Location(models.Model):
type = models.ForeignKey(LocationType, blank=True, null=True)
country = CountryField()
def __unicode__(self):
return '%s (%s)' % (self.country, self.type)
class Address(models.Model):
location = models.ForeignKey(Location, related_name='addresses')
street = models.TextField(blank=True)
city = models.CharField(max_length=255, blank=True)
state_province = models.CharField(max_length=255, blank=True)
postal_code = models.CharField(max_length=255, blank=True)
class Meta:
verbose_name_plural = 'addresses'
def __unicode__(self):
return "%s\n%s, %s %s" % \
(self.street, self.city, self.state_province, self.postal_code)
class Phone(models.Model):
PHONE_TYPES = (
('landline', 'Land Line'),
('mobile', 'Mobile'),
('fax', 'Fax')
)
location = models.ForeignKey(Location, related_name='phones')
number = models.CharField(max_length=30)
type = models.CharField(
max_length=15,
choices=PHONE_TYPES,
default='landline',
)
def __unicode__(self):
return self.number
| bsd-3-clause | Python |
9df117e49b35f1bcd478fc9c89d69a32592b2518 | Remove OpenID in tests | hasgeek/funnel,hasgeek/funnel,hasgeek/lastuser,hasgeek/lastuser,hasgeek/lastuser,hasgeek/funnel,hasgeek/lastuser,hasgeek/lastuser,hasgeek/funnel,hasgeek/funnel | tests/unit/lastuser_core/test_registry_LoginProviderRegistry.py | tests/unit/lastuser_core/test_registry_LoginProviderRegistry.py | # -*- coding: utf-8 -*-
from lastuserapp import app
from lastuser_core import login_registry
from lastuser_core.registry import LoginProviderRegistry
import unittest
class TestLoginProviderRegistry(unittest.TestCase):
def test_LoginProviderRegistry(self):
"""
Test for verifying creation of LoginProviderRegistry
instance.
"""
# A LoginProviderRegistry instance is created (based on
# configuration provided) when init_for is called during
# creation of app instance. To test and verify this correctly
# we temporarily do not use the app instance available globally
# and construct app instance separately
expected_login_providers = []
if app.config.get('OAUTH_TWITTER_KEY') and app.config.get('OAUTH_TWITTER_SECRET'):
expected_login_providers.append('twitter')
if app.config.get('OAUTH_GOOGLE_KEY') and app.config.get('OAUTH_GOOGLE_SECRET'):
expected_login_providers.append('google')
if app.config.get('OAUTH_LINKEDIN_KEY') and app.config.get('OAUTH_LINKEDIN_SECRET'):
expected_login_providers.append('linkedin')
if app.config.get('OAUTH_GITHUB_KEY') and app.config.get('OAUTH_GITHUB_SECRET'):
expected_login_providers.append('github')
self.assertIsInstance(login_registry, LoginProviderRegistry)
self.assertItemsEqual(expected_login_providers, login_registry.keys())
| # -*- coding: utf-8 -*-
from lastuserapp import app
from lastuser_core import login_registry
from lastuser_core.registry import LoginProviderRegistry
import unittest
class TestLoginProviderRegistry(unittest.TestCase):
def test_LoginProviderRegistry(self):
"""
Test for verifying creation of LoginProviderRegistry
instance.
"""
# A LoginProviderRegistry instance is created (based on
# configuration provided) when init_for is called during
# creation of app instance. To test and verify this correctly
# we temporarily do not use the app instance available globally
# and construct app instance separately
expected_login_providers = []
if app.config.get('OAUTH_TWITTER_KEY') and app.config.get('OAUTH_TWITTER_SECRET'):
expected_login_providers.append('twitter')
if app.config.get('OAUTH_GOOGLE_KEY') and app.config.get('OAUTH_GOOGLE_SECRET'):
expected_login_providers.append('google')
if app.config.get('OAUTH_LINKEDIN_KEY') and app.config.get('OAUTH_LINKEDIN_SECRET'):
expected_login_providers.append('linkedin')
if app.config.get('OAUTH_GITHUB_KEY') and app.config.get('OAUTH_GITHUB_SECRET'):
expected_login_providers.append('github')
expected_login_providers.append('openid')
self.assertIsInstance(login_registry, LoginProviderRegistry)
self.assertItemsEqual(expected_login_providers, login_registry.keys())
| agpl-3.0 | Python |
7e0a04621c6762bf846495d9f201d4cf757935c9 | Update phy.io | kwikteam/phy,rossant/phy,kwikteam/phy,kwikteam/phy,rossant/phy,rossant/phy | phy/io/tests/test_mock.py | phy/io/tests/test_mock.py | # -*- coding: utf-8 -*-
"""Tests of mock datasets."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from numpy.testing import assert_array_equal as ae
from pytest import raises
from ...electrode.mea import MEA
from ..mock import (artificial_waveforms,
artificial_traces,
artificial_spike_clusters,
artificial_features,
artificial_masks,
)
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
def _test_artificial(n_spikes=None, n_clusters=None):
n_samples_waveforms = 32
n_samples_traces = 50
n_channels = 35
n_features = n_channels * 2
# Waveforms.
waveforms = artificial_waveforms(n_spikes=n_spikes,
n_samples=n_samples_waveforms,
n_channels=n_channels)
assert waveforms.shape == (n_spikes, n_samples_waveforms, n_channels)
# Traces.
traces = artificial_traces(n_samples=n_samples_traces,
n_channels=n_channels)
assert traces.shape == (n_samples_traces, n_channels)
# Spike clusters.
spike_clusters = artificial_spike_clusters(n_spikes=n_spikes,
n_clusters=n_clusters)
assert spike_clusters.shape == (n_spikes,)
if n_clusters >= 1:
assert spike_clusters.min() in (0, 1)
assert spike_clusters.max() in (n_clusters - 1, n_clusters - 2)
ae(np.unique(spike_clusters), np.arange(n_clusters))
# Features.
features = artificial_features(n_spikes, n_features)
assert features.shape == (n_spikes, n_features)
# Masks.
masks = artificial_masks(n_spikes, n_channels)
assert masks.shape == (n_spikes, n_channels)
def test_artificial():
_test_artificial(n_spikes=100, n_clusters=10)
_test_artificial(n_spikes=0, n_clusters=0)
| # -*- coding: utf-8 -*-
"""Tests of mock datasets."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from numpy.testing import assert_array_equal as ae
from pytest import raises
from ...electrode.mea import MEA
from ..mock import (artificial_waveforms,
artificial_traces,
artificial_spike_clusters,
artificial_features,
artificial_masks,
MockModel)
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
def _test_artificial(n_spikes=None, n_clusters=None):
n_samples_waveforms = 32
n_samples_traces = 50
n_channels = 35
n_features = n_channels * 2
# Waveforms.
waveforms = artificial_waveforms(n_spikes=n_spikes,
n_samples=n_samples_waveforms,
n_channels=n_channels)
assert waveforms.shape == (n_spikes, n_samples_waveforms, n_channels)
# Traces.
traces = artificial_traces(n_samples=n_samples_traces,
n_channels=n_channels)
assert traces.shape == (n_samples_traces, n_channels)
# Spike clusters.
spike_clusters = artificial_spike_clusters(n_spikes=n_spikes,
n_clusters=n_clusters)
assert spike_clusters.shape == (n_spikes,)
if n_clusters >= 1:
assert spike_clusters.min() in (0, 1)
assert spike_clusters.max() in (n_clusters - 1, n_clusters - 2)
ae(np.unique(spike_clusters), np.arange(n_clusters))
# Features.
features = artificial_features(n_spikes, n_features)
assert features.shape == (n_spikes, n_features)
# Masks.
masks = artificial_masks(n_spikes, n_channels)
assert masks.shape == (n_spikes, n_channels)
def test_artificial():
_test_artificial(n_spikes=100, n_clusters=10)
_test_artificial(n_spikes=0, n_clusters=0)
def test_mock_model():
model = MockModel()
assert model.metadata['description'] == 'A mock model.'
assert model.traces.ndim == 2
assert model.spike_samples.ndim == 1
assert model.spike_clusters.ndim == 1
assert model.features.ndim == 2
assert model.masks.ndim == 2
assert model.waveforms.ndim == 3
assert isinstance(model.probe, MEA)
with raises(NotImplementedError):
model.save()
| bsd-3-clause | Python |
2f2f29e6ceeb51bd58aa635ad91415e3f720c0e8 | Add option to generator runner to run it vs all events | synw/django-chartflo,synw/django-chartflo,synw/django-chartflo | chartflo/management/commands/gen.py | chartflo/management/commands/gen.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from chartflo.apps import GENERATORS
from .gencharts import get_changes_events, get_last_run_q, get_events_q
from mqueue.models import MEvent
class Command(BaseCommand):
help = ""
def add_arguments(self, parser):
parser.add_argument('app', type=str)
parser.add_argument('-q',
dest="quiet",
default=0,
help='Quiet mode: ex: -q=1',
)
parser.add_argument('-all',
dest="all",
default=0,
help='Update for all instances: ex: -all=1',
)
def handle(self, *args, **options):
"""
Run a generator
"""
app = options["app"]
quiet = int(options["quiet"])
runall = int(options["all"])
try:
generator = GENERATORS[app]
except:
if quiet > 0:
print("Generator not found")
return
if quiet > 0:
print("Running generator", app)
last_run_q = get_last_run_q()
if runall == 0:
events_q = get_events_q()
events_q = get_changes_events(events_q, last_run_q)
else:
events_q = MEvent.objects.all()
generator(events_q)
| # -*- coding: utf-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from chartflo.apps import GENERATORS
from .gencharts import get_changes_events, get_last_run_q, get_events_q
class Command(BaseCommand):
help = ""
def add_arguments(self, parser):
parser.add_argument('app', type=str)
parser.add_argument('-q',
dest="quiet",
default=0,
help='Quiet mode: ex: -q=1.',
)
def handle(self, *args, **options):
"""
Run a generator
"""
app = options["app"]
quiet = options["quiet"]
try:
generator = GENERATORS[app]
except:
if quiet > 0:
print("Generator not found")
return
if quiet > 0:
print("Running generator", app)
last_run_q = get_last_run_q()
events_q = get_events_q()
events = get_changes_events(events_q, last_run_q)
generator(events)
| mit | Python |
20e5246d59bcd5d89842096d5698bac1ceeb9996 | fix CUDA init of RMM-DIIS | mlouhivu/gpaw-accelerator-benchmarks,mlouhivu/gpaw-accelerator-benchmarks | copper-sheet/input.py | copper-sheet/input.py | ###
### GPAW benchmark: Copper Sheet
###
from __future__ import print_function
from ase.lattice.cubic import FaceCenteredCubic
from gpaw import GPAW, Mixer, ConvergenceError
from gpaw.mpi import size, rank
try:
from gpaw.eigensolvers.rmm_diis import RMM_DIIS
except ImportError:
from gpaw.eigensolvers.rmmdiis import RMMDIIS as RMM_DIIS
try:
from gpaw import use_mic
except ImportError:
use_mic = False
try:
from gpaw import use_cuda
use_cuda = True
except ImportError:
use_cuda = False
use_cpu = not (use_mic or use_cuda)
# no. of replicates in each dimension (increase to scale up the system)
x = 4
y = 2
z = 3
# other parameters
h = 0.22
kpts = (8,4,1)
txt = 'output.txt'
maxiter = 6
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Copper Sheet")
print(" dimensions: x=%d, y=%d, z=%d" % (x, y, z))
print(" grid spacing: h=%f" % h)
print(" Brillouin-zone sampling: kpts=" + str(kpts))
print(" MPI tasks: %d" % size)
print(" using CUDA (GPGPU): " + str(use_cuda))
print(" using pyMIC (KNC) : " + str(use_mic))
print(" using CPU (or KNL): " + str(use_cpu))
print("#"*60)
print("")
# compatibility hack for the eigensolver
if use_cuda:
rmm = RMM_DIIS(cuda=True)
else:
rmm = RMM_DIIS()
rmm.niter = 2
# setup parameters
args = {'h': h,
'nbands': -20,
'width': 0.2,
'kpts': kpts,
'xc': 'PBE',
'mixer': Mixer(0.1, 5, 100),
'eigensolver': rmm,
'maxiter': maxiter,
'parallel': {'sl_auto': True},
'txt': txt}
if use_cuda:
args['cuda'] = True
# setup the system
atoms = FaceCenteredCubic(directions=[[1,-1,0], [1,1,-2], [1,1,1]],
size=(x,y,z), symbol='Cu', pbc=(1,1,0))
atoms.center(vacuum=6.0, axis=2)
calc = GPAW(**args)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
| ###
### GPAW benchmark: Copper Sheet
###
from __future__ import print_function
from ase.lattice.cubic import FaceCenteredCubic
from gpaw import GPAW, Mixer, ConvergenceError
from gpaw.mpi import size, rank
try:
from gpaw.eigensolvers.rmm_diis import RMM_DIIS
except ImportError:
from gpaw.eigensolvers.rmmdiis import RMMDIIS as RMM_DIIS
try:
from gpaw import use_mic
except ImportError:
use_mic = False
try:
from gpaw import use_cuda
use_cuda = True
except ImportError:
use_cuda = False
use_cpu = not (use_mic or use_cuda)
# no. of replicates in each dimension (increase to scale up the system)
x = 4
y = 2
z = 3
# other parameters
h = 0.22
kpts = (8,4,1)
txt = 'output.txt'
maxiter = 6
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Copper Sheet")
print(" dimensions: x=%d, y=%d, z=%d" % (x, y, z))
print(" grid spacing: h=%f" % h)
print(" Brillouin-zone sampling: kpts=" + str(kpts))
print(" MPI tasks: %d" % size)
print(" using CUDA (GPGPU): " + str(use_cuda))
print(" using pyMIC (KNC) : " + str(use_mic))
print(" using CPU (or KNL): " + str(use_cpu))
print("#"*60)
print("")
# compatibility hack for the eigensolver
rmm = RMM_DIIS()
rmm.niter = 2
# setup parameters
args = {'h': h,
'nbands': -20,
'width': 0.2,
'kpts': kpts,
'xc': 'PBE',
'mixer': Mixer(0.1, 5, 100),
'eigensolver': rmm,
'maxiter': maxiter,
'parallel': {'sl_auto': True},
'txt': txt}
if use_cuda:
args['cuda'] = True
# setup the system
atoms = FaceCenteredCubic(directions=[[1,-1,0], [1,1,-2], [1,1,1]],
size=(x,y,z), symbol='Cu', pbc=(1,1,0))
atoms.center(vacuum=6.0, axis=2)
calc = GPAW(**args)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
| mit | Python |
e9ab24f6871af83fc8f3dc2e69b48700dafef90c | update path for video | selepo/svtplay-dl,selepo/svtplay-dl,spaam/svtplay-dl,iwconfig/svtplay-dl,olof/svtplay-dl,iwconfig/svtplay-dl,olof/svtplay-dl,qnorsten/svtplay-dl,spaam/svtplay-dl,dalgr/svtplay-dl,dalgr/svtplay-dl,qnorsten/svtplay-dl | lib/svtplay_dl/service/facebook.py | lib/svtplay_dl/service/facebook.py | from __future__ import absolute_import
import re
import json
import copy
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils.urllib import unquote_plus
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.error import ServiceError
class Facebook(Service, OpenGraphThumbMixin):
supported_domains_re = ["www.facebook.com"]
def get(self, options):
data = self.get_urldata()
match = re.search('params","([^"]+)"', data)
if not match:
yield ServiceError("Cant find params info. video need to be public.")
return
data2 = json.loads('["%s"]' % match.group(1))
data2 = json.loads(unquote_plus(data2[0]))
if "sd_src_no_ratelimit" in data2["video_data"]["progressive"][0]:
yield HTTP(copy.copy(options), data2["video_data"]["progressive"][0]["sd_src_no_ratelimit"], "240")
else:
yield HTTP(copy.copy(options), data2["video_data"]["progressive"][0]["sd_src"], "240")
if "hd_src_no_ratelimit" in data2["video_data"]["progressive"][0]:
yield HTTP(copy.copy(options), data2["video_data"]["progressive"][0]["hd_src_no_ratelimit"], "720")
else:
if data2["video_data"]["progressive"][0]["hd_src"]:
yield HTTP(copy.copy(options), data2["video_data"]["progressive"][0]["hd_src"], "720")
| from __future__ import absolute_import
import re
import json
import copy
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils.urllib import unquote_plus
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.error import ServiceError
class Facebook(Service, OpenGraphThumbMixin):
supported_domains_re = ["www.facebook.com"]
def get(self, options):
data = self.get_urldata()
match = re.search('params","([^"]+)"', data)
if not match:
yield ServiceError("Cant find params info. video need to be public.")
return
data2 = json.loads('["%s"]' % match.group(1))
data2 = json.loads(unquote_plus(data2[0]))
if "sd_src_no_ratelimit" in data2["video_data"][0]:
yield HTTP(copy.copy(options), data2["video_data"][0]["sd_src_no_ratelimit"], "240")
else:
yield HTTP(copy.copy(options), data2["video_data"][0]["sd_src"], "240")
if "hd_src_no_ratelimit" in data2["video_data"][0]:
yield HTTP(copy.copy(options), data2["video_data"][0]["hd_src_no_ratelimit"], "720")
else:
if data2["video_data"][0]["hd_src"]:
yield HTTP(copy.copy(options), data2["video_data"][0]["hd_src"], "720")
| mit | Python |
72cd061d8f4bed462d9e217a916708cd7cdd3aa5 | fix exception catching | cheral/orange3-text,cheral/orange3-text,cheral/orange3-text | orangecontrib/text/wikipedia.py | orangecontrib/text/wikipedia.py | import numpy as np
import wikipedia
from Orange import data
from orangecontrib.text.corpus import Corpus
class WikipediaAPI:
attributes = ('pageid', 'revision_id')
metas = ('title', 'content', 'summary', 'url')
@classmethod
def search(cls, lang, queries, attributes, progress_callback=None):
wikipedia.set_lang(lang)
attributes = [attr for attr in attributes if attr in cls.attributes]
metas = [attr for attr in attributes if attr in cls.metas] + ['content']
X, meta_values = [], []
for i, query in enumerate(queries):
articles = wikipedia.search(query)
for j, article in enumerate(articles):
cls._get(article, attributes, X, metas, meta_values)
if progress_callback:
progress_callback(100 * (i * len(articles) + j + 1) / (len(queries) * len(articles)))
metas = [data.StringVariable(attr) for attr in metas]
domain = data.Domain(attributes=[], metas=metas)
corpus = Corpus(None, metas=np.array(meta_values), domain=domain, text_features=metas[-1:])
corpus.extend_attributes(np.array(X), attributes)
return corpus
@classmethod
def _get(cls, article, attributes, X, metas, meta_values, recursive=True):
try:
article = wikipedia.page(article)
X.append(
[int(getattr(article, attr)) for attr in attributes]
# [getattr(article, attr) for attr in attributes]
)
meta_values.append(
[getattr(article, attr) for attr in metas]
)
except wikipedia.exceptions.DisambiguationError:
if recursive:
for article in wikipedia.search(article):
cls._get(article, attributes, X, metas, meta_values, recursive=False)
except wikipedia.exceptions.PageError:
pass
| import numpy as np
import wikipedia
from Orange import data
from orangecontrib.text.corpus import Corpus
class WikipediaAPI:
attributes = ('pageid', 'revision_id')
metas = ('title', 'content', 'summary', 'url')
@classmethod
def search(cls, lang, queries, attributes, progress_callback=None):
wikipedia.set_lang(lang)
attributes = [attr for attr in attributes if attr in cls.attributes]
metas = [attr for attr in attributes if attr in cls.metas] + ['content']
X, meta_values = [], []
for i, query in enumerate(queries):
articles = wikipedia.search(query)
for j, article in enumerate(articles):
cls._get(article, attributes, X, metas, meta_values)
if progress_callback:
progress_callback(100 * (i * len(articles) + j + 1) / (len(queries) * len(articles)))
metas = [data.StringVariable(attr) for attr in metas]
domain = data.Domain(attributes=[], metas=metas)
corpus = Corpus(None, metas=np.array(meta_values), domain=domain, text_features=metas[-1:])
corpus.extend_attributes(np.array(X), attributes)
return corpus
@classmethod
def _get(cls, article, attributes, X, metas, meta_values, recursive=True):
try:
article = wikipedia.page(article)
X.append(
[int(getattr(article, attr)) for attr in attributes]
# [getattr(article, attr) for attr in attributes]
)
meta_values.append(
[getattr(article, attr) for attr in metas]
)
except wikipedia.DisambiguationError:
if recursive:
for article in wikipedia.search(article):
cls._get(article, attributes, X, metas, meta_values, recursive=False)
| bsd-2-clause | Python |
741e26cd731e1e31b299b6f623c89a32b7531b6f | Update 'bugcomics' name | jodal/comics,datagutten/comics,jodal/comics,jodal/comics,jodal/comics,datagutten/comics,datagutten/comics,datagutten/comics | comics/comics/bugcomic.py | comics/comics/bugcomic.py | from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Bug Martini'
language = 'en'
url = 'http://www.bugmartini.com/'
start_date = '2009-10-19'
rights = 'Adam Huber'
class Crawler(CrawlerBase):
history_capable_days = 15
schedule = 'Mo,Tu,We,Th,Fr'
time_zone = 'US/Mountain'
def crawl(self, pub_date):
feed = self.parse_feed('http://www.bugmartini.com/feed/')
for entry in feed.for_date(pub_date):
title = entry.title
url = entry.summary.src('img[src*="/wp-content/uploads/"]')
url = url.replace('?resize=520%2C280', '')
return CrawlerImage(url, title)
| from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Bug'
language = 'en'
url = 'http://www.bugmartini.com/'
start_date = '2009-10-19'
rights = 'Adam Huber'
class Crawler(CrawlerBase):
history_capable_days = 15
schedule = 'Mo,Tu,We,Th,Fr'
time_zone = 'US/Mountain'
def crawl(self, pub_date):
feed = self.parse_feed('http://www.bugmartini.com/feed/')
for entry in feed.for_date(pub_date):
title = entry.title
url = entry.summary.src('img[src*="/wp-content/uploads/"]')
url = url.replace('?resize=520%2C280', '')
return CrawlerImage(url, title)
| agpl-3.0 | Python |
fee245628d492f64f3fe02563d3059317d456ed6 | Use raw string for Windows paths | mikedh/trimesh,mikedh/trimesh,mikedh/trimesh,dajusc/trimesh,dajusc/trimesh,mikedh/trimesh | trimesh/interfaces/vhacd.py | trimesh/interfaces/vhacd.py | import os
import platform
from .generic import MeshScript
from ..constants import log
from distutils.spawn import find_executable
_search_path = os.environ['PATH']
if platform.system() == 'Windows':
# split existing path by delimiter
_search_path = [i for i in _search_path.split(';') if len(i) > 0]
_search_path.append(r'C:\Program Files')
_search_path.append(r'C:\Program Files (x86)')
_search_path = ';'.join(_search_path)
log.debug('searching for vhacd in: %s', _search_path)
_vhacd_executable = None
for _name in ['vhacd', 'testVHACD']:
_vhacd_executable = find_executable(_name, path=_search_path)
if _vhacd_executable is not None:
break
exists = _vhacd_executable is not None
def convex_decomposition(mesh, **kwargs):
if not exists:
raise ValueError('No vhacd available!')
argstring = ' --input $mesh_0 --output $mesh_post --log $script'
# pass through extra arguments from the input dictionary
for key, value in kwargs.items():
argstring += ' --{} {}'.format(str(key),
str(value))
with MeshScript(meshes=[mesh],
script='',
tmpfile_ext='obj') as vhacd:
result = vhacd.run(_vhacd_executable + argstring)
return result
| import os
import platform
from .generic import MeshScript
from ..constants import log
from distutils.spawn import find_executable
_search_path = os.environ['PATH']
if platform.system() == 'Windows':
# split existing path by delimiter
_search_path = [i for i in _search_path.split(';') if len(i) > 0]
_search_path.append('C:\Program Files')
_search_path.append('C:\Program Files (x86)')
_search_path = ';'.join(_search_path)
log.debug('searching for vhacd in: %s', _search_path)
_vhacd_executable = None
for _name in ['vhacd', 'testVHACD']:
_vhacd_executable = find_executable(_name, path=_search_path)
if _vhacd_executable is not None:
break
exists = _vhacd_executable is not None
def convex_decomposition(mesh, **kwargs):
if not exists:
raise ValueError('No vhacd available!')
argstring = ' --input $mesh_0 --output $mesh_post --log $script'
# pass through extra arguments from the input dictionary
for key, value in kwargs.items():
argstring += ' --{} {}'.format(str(key),
str(value))
with MeshScript(meshes=[mesh],
script='',
tmpfile_ext='obj') as vhacd:
result = vhacd.run(_vhacd_executable + argstring)
return result
| mit | Python |
4b4683453db0ddc42cb32a72e3992b8bfa31a602 | add missing 'import posixpath'. | MIT-LCP/wfdb-python | wfdb/io/_coreio.py | wfdb/io/_coreio.py | import posixpath
from wfdb.io import _url
def _open_file(
pn_dir,
file_name,
mode="r",
*,
buffering=-1,
encoding=None,
errors=None,
newline=None,
check_access=False,
):
"""
Open a data file as a random-access file object.
See the documentation of `open` and `wfdb.io._url.openurl` for details
about the `mode`, `buffering`, `encoding`, `errors`, and `newline`
parameters.
Parameters
----------
pn_dir : str or None
The PhysioNet database directory where the file is stored, or None
if file_name is a local path.
file_name : str
The name of the file, either as a local filesystem path (if
`pn_dir` is None) or a URL path (if `pn_dir` is a string.)
mode : str, optional
The standard I/O mode for the file ("r" by default). If `pn_dir`
is not None, this must be "r", "rt", or "rb".
buffering : int, optional
Buffering policy.
encoding : str, optional
Name of character encoding used in text mode.
errors : str, optional
Error handling strategy used in text mode.
newline : str, optional
Newline translation mode used in text mode.
check_access : bool, optional
If true, raise an exception immediately if the file does not
exist or is not accessible.
"""
if pn_dir is None:
return open(
file_name,
mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
else:
url = posixpath.join(config.db_index_url, pn_dir, file_name)
return _url.openurl(
url,
mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
check_access=check_access,
)
| from wfdb.io import _url
def _open_file(
pn_dir,
file_name,
mode="r",
*,
buffering=-1,
encoding=None,
errors=None,
newline=None,
check_access=False,
):
"""
Open a data file as a random-access file object.
See the documentation of `open` and `wfdb.io._url.openurl` for details
about the `mode`, `buffering`, `encoding`, `errors`, and `newline`
parameters.
Parameters
----------
pn_dir : str or None
The PhysioNet database directory where the file is stored, or None
if file_name is a local path.
file_name : str
The name of the file, either as a local filesystem path (if
`pn_dir` is None) or a URL path (if `pn_dir` is a string.)
mode : str, optional
The standard I/O mode for the file ("r" by default). If `pn_dir`
is not None, this must be "r", "rt", or "rb".
buffering : int, optional
Buffering policy.
encoding : str, optional
Name of character encoding used in text mode.
errors : str, optional
Error handling strategy used in text mode.
newline : str, optional
Newline translation mode used in text mode.
check_access : bool, optional
If true, raise an exception immediately if the file does not
exist or is not accessible.
"""
if pn_dir is None:
return open(
file_name,
mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
else:
url = posixpath.join(config.db_index_url, pn_dir, file_name)
return _url.openurl(
url,
mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
check_access=check_access,
)
| mit | Python |
53f7acf5fc04ca6f86456fda95504ba41046d860 | Add Django Custom Tag SSO | philanthropy-u/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform | openedx/features/specializations/templatetags/sso_meta_tag.py | openedx/features/specializations/templatetags/sso_meta_tag.py | from django import template
from django.template.loader import get_template
register = template.Library()
@register.simple_tag(takes_context=True)
def sso_meta(context):
return get_template('features/specializations/sso_meta_template.html').render(context.flatten())
| from django import template
from django.template import Template
register = template.Library()
@register.simple_tag(takes_context=True)
def sso_meta(context):
return Template('<meta name="title" content="${ title }">' + ' ' +
'<meta name="description" content="${ subtitle }">' + ' ' +
## OG (Open Graph) title and description added below to give social media info to display
## (https://developers.facebook.com/docs/opengraph/howtos/maximizing-distribution-media-content#tags)
'<meta property="og:title" content="${ title }">' + ' ' +
'<meta property="og:description" content="${ subtitle }">' + ' ' +
'<meta prefix="og: http://ogp.me/ns#" name="image" property="og:image" content="${ banner_image[\'large\'][\'url\'] }">' + ' ' +
'<meta property="og:image:width" content="512">' + ' ' +
'<meta property="og:image:height" content="512">' + ' ' +
'<meta name="twitter:image" content="${ banner_image[\'large\'][\'url\'] }">' + ' ' +
'<meta name="twitter:card" content="${ banner_image[\'large\'][\'url\'] }">' + ' ' +
'<meta name="twitter:site" content="@PhilanthropyUni">' + ' ' +
'<meta name="twitter:title" content="${ title }">' + ' ' +
'<meta name="twitter:description" content="${ subtitle }">').render(context);
| agpl-3.0 | Python |
d6dc1638f11736b6cbb4063a260d824becb277bc | Add urlunparse. | chhe/livestreamer,Saturn/livestreamer,intact/livestreamer,lyhiving/livestreamer,blxd/livestreamer,chhe/livestreamer,javiercantero/streamlink,Dobatymo/livestreamer,Klaudit/livestreamer,chhe/streamlink,jtsymon/livestreamer,Feverqwe/livestreamer,mmetak/streamlink,gravyboat/streamlink,derrod/livestreamer,okaywit/livestreamer,Masaz-/livestreamer,wlerin/streamlink,streamlink/streamlink,chrippa/livestreamer,melmorabity/streamlink,Feverqwe/livestreamer,hmit/livestreamer,fishscene/streamlink,gtmanfred/livestreamer,hmit/livestreamer,chrippa/livestreamer,back-to/streamlink,beardypig/streamlink,gtmanfred/livestreamer,sbstp/streamlink,Saturn/livestreamer,gravyboat/streamlink,jtsymon/livestreamer,programming086/livestreamer,intact/livestreamer,blxd/livestreamer,derrod/livestreamer,beardypig/streamlink,wolftankk/livestreamer,Dobatymo/livestreamer,melmorabity/streamlink,flijloku/livestreamer,caorong/livestreamer,ethanhlc/streamlink,wlerin/streamlink,fishscene/streamlink,mmetak/streamlink,bastimeyer/streamlink,charmander/livestreamer,flijloku/livestreamer,wolftankk/livestreamer,lyhiving/livestreamer,caorong/livestreamer,back-to/streamlink,streamlink/streamlink,okaywit/livestreamer,Klaudit/livestreamer,ethanhlc/streamlink,charmander/livestreamer,sbstp/streamlink,bastimeyer/streamlink,javiercantero/streamlink,chhe/streamlink,Masaz-/livestreamer,programming086/livestreamer | src/livestreamer/compat.py | src/livestreamer/compat.py | import os
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_py33 = (sys.version_info[0] == 3 and sys.version_info[1] == 3)
is_win32 = os.name == "nt"
if is_py2:
_str = str
str = unicode
range = xrange
def bytes(b, enc="ascii"):
return _str(b)
elif is_py3:
bytes = bytes
str = str
range = range
try:
from urllib.parse import (
urlparse, urlunparse, urljoin, quote, unquote, parse_qsl
)
import queue
except ImportError:
from urlparse import urlparse, urlunparse, urljoin, parse_qsl
from urllib import quote, unquote
import Queue as queue
__all__ = ["is_py2", "is_py3", "is_py33", "is_win32", "str", "bytes",
"urlparse", "urlunparse", "urljoin", "parse_qsl", "quote",
"unquote", "queue", "range"]
| import os
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_py33 = (sys.version_info[0] == 3 and sys.version_info[1] == 3)
is_win32 = os.name == "nt"
if is_py2:
_str = str
str = unicode
range = xrange
def bytes(b, enc="ascii"):
return _str(b)
elif is_py3:
bytes = bytes
str = str
range = range
try:
from urllib.parse import urlparse, urljoin, quote, unquote, parse_qsl
import queue
except ImportError:
from urlparse import urlparse, urljoin, parse_qsl
from urllib import quote, unquote
import Queue as queue
__all__ = ["is_py2", "is_py3", "is_py33", "is_win32", "str", "bytes",
"urlparse", "urljoin", "parse_qsl", "quote", "unquote", "queue",
"range"]
| bsd-2-clause | Python |
1800118e416f68e10cea8094951b915bedea7067 | Fix doc strings, method names on OCDCandidateContest | california-civic-data-coalition/django-calaccess-processed-data,california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed/models/proxies/opencivicdata/candidatecontests.py | calaccess_processed/models/proxies/opencivicdata/candidatecontests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from __future__ import unicode_literals
from django.db import models
from opencivicdata.elections.models import CandidateContest
from .base import OCDProxyModelMixin
class OCDCandidateContestManager(models.Manager):
"""
Custom helpers for the OCD CandidateContest model that limit it to runoffs.
"""
def runoffs(self):
"""
Filter down to runoff CandidateContest instances.
"""
return self.get_queryset().filter(name__contains='RUNOFF')
def set_parents(self):
"""
Connect and save parent contests for all runoffs.
"""
for obj in self.runoffs():
# Carve out for the duplicate 2010 Assembly 43 runoffs until
# I can figure out what I broke.
obj.runoff_for_contest = obj.get_parent()
obj.save()
class OCDCandidateContestProxy(CandidateContest, OCDProxyModelMixin):
"""
A proxy on the OCD CandidateContest model with helper methods.
"""
objects = OCDCandidateContestManager()
class Meta:
"""
Make this a proxy model.
"""
proxy = True
def get_parent(self):
"""
Returns the undecided contest that preceeded runoff_contest.
Returns None if it can't be found.
"""
# Get the contest's post (should only ever be one per contest)
post = self.posts.all()[0].post
# Then try getting the most recent contest for the same post
# that preceeds the runoff contest
try:
return CandidateContest.objects.filter(
posts__post=post,
election__date__lt=self.election.date,
).latest('election__date')
except CandidateContest.DoesNotExist:
return None
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from __future__ import unicode_literals
from django.db import models
from opencivicdata.elections.models import CandidateContest
from .base import OCDProxyModelMixin
class OCDCandidateContestManager(models.Manager):
"""
Custom helpers for the OCD CandidateContest model that limit it to runoffs.
"""
def get_queryset(self):
"""
Filters down to state senate divisions.
"""
return super(OCDCandidateContestManager, self).get_queryset().filter(name__contains='RUNOFF')
def set_parents(self):
"""
Connect and save parent contests for all runoffs.
"""
for obj in self.get_queryset().all():
# Carve out for the duplicate 2010 Assembly 43 runoffs until
# I can figure out what I broke.
obj.runoff_for_contest = obj.get_parent()
obj.save()
class OCDCandidateContestProxy(CandidateContest, OCDProxyModelMixin):
"""
A proxy on the OCD CandidateContest model with helper methods and limited to runoffs.
"""
objects = OCDCandidateContestManager()
class Meta:
"""
Make this a proxy model.
"""
proxy = True
def get_parent(self):
"""
Returns the undecided contest that preceeded runoff_contest.
Returns None if it can't be found.
"""
# Get the contest's post (should only ever be one per contest)
post = self.posts.all()[0].post
# Then try getting the most recent contest for the same post
# that preceeds the runoff contest
try:
return CandidateContest.objects.filter(
posts__post=post,
election__date__lt=self.election.date,
).latest('election__date')
except CandidateContest.DoesNotExist:
return None
| mit | Python |
d5ac585da2aef7feea107250cdf81652aa5f32ce | Fix to mongo metadata | samuel/kokki | kokki/cookbooks/mongodb/metadata.py | kokki/cookbooks/mongodb/metadata.py |
__description__ = "MongoDB database"
__config__ = {
"mongodb.dbpath": dict(
description = "Path where to store the MongoDB database",
default = "/var/lib/mongodb",
),
"mongodb.logpath": dict(
description = "Path where to store the MongoDB log",
default = "/var/log/mongodb/mongodb.log",
),
"mongodb.port": dict(
description = "Specifies the port number on which Mongo will listen for client connections.",
default = None,
),
"mongodb.verbose": dict(
description = "Verbose logging output",
default = False,
),
"mongodb.rest": dict(
description = "Allow extended operations at the HTTP Interface",
default = False,
),
"mongodb.oplog_size": dict(
description = "Custom size for replication operation log.",
default = None,
),
"mongodb.op_id_mem": dict(
description = "Size limit for in-memory storage of op ids.",
default = None,
),
"mongodb.replica_set": dict(
description = "<setname>[/<seedlist>] Use replica sets with the specified logical set name. Typically the optional seed host list need not be specified.",
default = None,
),
}
|
__description__ = "MongoDB database"
__config__ = {
"mongodb.dbpath": dict(
description = "Path where to store the MongoDB database",
default = "/var/lib/mongodb",
),
"mongodb.logpath": dict(
description = "Path where to store the MongoDB log",
default = "/var/log/mongodb/mongodb.log",
),
"mongodb.port": dict(
description = "Specifies the port number on which Mongo will listen for client connections.",
default = None,
),
"mongodb.verbose": dict(
description = "Verbose logging output",
default = False,
),
"mongodb.rest": dict(
description = "Allow extended operations at the HTTP Interface",
default = False,
),
"mongodb.oplog_size": dict(
description = "Custom size for replication operation log.",
default = None,
),
"mongodb.op_id_mem": dict(
description = "Size limit for in-memory storage of op ids.",
default = None,
),
"mongodb.replica_set": dict(
description = "<setname>[/<seedlist>] Use replica sets with the specified logical set name. Typically the optional seed host list need not be specified."
default = None,
),
}
| bsd-3-clause | Python |
27bf030df4c2f46eef8cdcd9441bd5d21a22e5cc | Fix public API root view links | tuomas777/parkkihubi | parkings/api/public/urls.py | parkings/api/public/urls.py | from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from .parking_area import PublicAPIParkingAreaViewSet
from .parking_area_statistics import PublicAPIParkingAreaStatisticsViewSet
router = DefaultRouter()
router.register(r'parking_area', PublicAPIParkingAreaViewSet, base_name='parkingarea')
router.register(r'parking_area_statistics', PublicAPIParkingAreaStatisticsViewSet, base_name='parkingareastatistics')
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
]
| from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from .parking_area import PublicAPIParkingAreaViewSet
from .parking_area_statistics import PublicAPIParkingAreaStatisticsViewSet
router = DefaultRouter()
router.register(r'parking_area', PublicAPIParkingAreaViewSet)
router.register(r'parking_area_statistics', PublicAPIParkingAreaStatisticsViewSet)
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
]
| mit | Python |
dcc5208f091ad11a853afecc3a9260cc14bc08c7 | Tweak to doc string | 4dn-dcic/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront,4dn-dcic/fourfront,4dn-dcic/fourfront,4dn-dcic/fourfront,hms-dbmi/fourfront | src/encoded/commands/load_ontology_terms.py | src/encoded/commands/load_ontology_terms.py | #!/usr/bin/env python3
import argparse
import logging
from pyramid.path import DottedNameResolver
from pyramid.paster import get_app
from encoded import configure_dbsession
import sys
import os
from datetime import datetime
logger = logging.getLogger(__name__)
EPILOG = __doc__
def main():
start = datetime.now()
print(str(start))
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description="Load Ontology Term Data", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--access-key', help="store local or copy to s3, will generate and store access key for admin user")
args = parser.parse_args()
# get the pyramids app
app = get_app(args.config_uri, args.app_name)
# create db schema
configure_dbsession(app)
load_term_data = 'encoded.loadxl:load_ontology_terms'
print("****** load test data is %s" % (load_term_data))
load_test_data = DottedNameResolver().resolve(load_term_data)
load_term_data(app)
end = datetime.now()
print("FINISHED - START: ", str(start), "\tEND: ", str(end))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import argparse
import logging
from pyramid.path import DottedNameResolver
from pyramid.paster import get_app
from encoded import configure_dbsession
import sys
import os
from datetime import datetime
logger = logging.getLogger(__name__)
EPILOG = __doc__
def main():
start = datetime.now()
print(str(start))
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description="Load Test Data", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--access-key', help="store local or copy to s3, will generate and store access key for admin user")
args = parser.parse_args()
# get the pyramids app
app = get_app(args.config_uri, args.app_name)
# create db schema
configure_dbsession(app)
load_term_data = 'encoded.loadxl:load_ontology_terms'
print("****** load test data is %s" % (load_term_data))
load_test_data = DottedNameResolver().resolve(load_term_data)
load_term_data(app)
end = datetime.now()
print("FINISHED - START: ", str(start), "\tEND: ", str(end))
if __name__ == "__main__":
main()
| mit | Python |
f1a4220ecd5f6e24d79fbde9e62e861c814ae5c0 | increase version to 0.2.0 | XiaonuoGantan/pywebsocket,XiaonuoGantan/pywebsocket | src/setup.py | src/setup.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >>sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='Web Socket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'Web Socket (http://tools.ietf.org/html/'
'draft-hixie-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='http://www.apache.org/licenses/LICENSE-2.0',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME],
url='http://code.google.com/p/pywebsocket/',
version='0.2.0',
)
# vi:sts=4 sw=4 et
| #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >>sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='Web Socket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'Web Socket (http://tools.ietf.org/html/'
'draft-hixie-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='http://www.apache.org/licenses/LICENSE-2.0',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME],
url='http://code.google.com/p/pywebsocket/',
version='0.1.0',
)
# vi:sts=4 sw=4 et
| bsd-3-clause | Python |
ce843bd14c78b180f9961836625893c6e9d08a19 | Update version number to 0.7.1. Review URL: https://codereview.appspot.com/5651087 | XiaonuoGantan/pywebsocket,XiaonuoGantan/pywebsocket | src/setup.py | src/setup.py | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='WebSocket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'WebSocket (http://tools.ietf.org/html/'
'draft-ietf-hybi-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
# See the source of distutils.version, distutils.versionpredicate and
# distutils.dist to understand how to name version numbers.
version='0.7.1',
)
# vi:sts=4 sw=4 et
| #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='WebSocket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'WebSocket (http://tools.ietf.org/html/'
'draft-ietf-hybi-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
# See the source of distutils.version, distutils.versionpredicate and
# distutils.dist to understand how to name version numbers.
version='0.6b1',
)
# vi:sts=4 sw=4 et
| bsd-3-clause | Python |
eb12be375904430f26e7faa6b92624504e348446 | Improve script structure | franzpl/sweep,spatialaudio/sweep | log_sweep_rect_window/log_sweep_rect_window.py | log_sweep_rect_window/log_sweep_rect_window.py | #!/usr/bin/env python3
"""The influence of windowing of sweep signals when using a
Rect Window.
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -20.
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
def get_results():
akf = np.correlate(excitation, excitation, 'full')
plotting.plot_time(akf,
scale='db',
title='AKF log. Sweep with Rect Window')
plt.savefig('akf_log_sweep_rect.png')
plt.close()
excitation_zeropadded = generation.zero_padding(
excitation, pad, fs)
system_response = system(excitation_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return calculation.snr_db(ir[0], ir[1:4 * fs]), akf.max()
with open("log_sweep_rect_window.txt", "w") as f:
snr, akf_max = get_results()
f.write("AKF_max(dB): " +
str(plotting._db_calculation(akf_max)) +
" SNR(dB): " + str(snr) + " \n")
| #!/usr/bin/env python3
"""On the influence of windowing of sweep signals.
"""
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import numpy as np
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
# Noise in measurement chain
noise_level_db = -20.
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter
ir = measurement_chain.convolution([1.0])
def get_results(system):
print("Processing {}".format(system.name))
system_response = system(excitation_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded, system_response, fs)
return calculation.snr_db(ir[0], ir[1:4 * fs])
f = open("log_sweep_rect_window.txt", "w")
plotting.plot_time(
np.correlate(excitation,
excitation,
'full'),
scale='dB',
title="Rect-windowed log_sweep")
plt.savefig("akf_fig.png")
plt.close()
# plt.show()
snr = get_results(measurement_chain.chained(ir, noise))
f.write("SNR: " + str(snr) + " dB \n")
| mit | Python |
f19f8ed67546da5cb9d67f0793d036d64fe916eb | Update doc.conf.py | JuliaFEM/JuliaFEM.jl | docs/doc.conf.py | docs/doc.conf.py | import os
import sys
import re
import juliadoc
extensions = ['sphinx.ext.mathjax',
'juliadoc.julia',
'juliadoc.jldoctest',
'juliadoc.jlhelp']
master_doc = 'index'
html_theme_path = [juliadoc.get_theme_dir()]
html_sidebars = juliadoc.default_sidebars()
| import juliadoc
extensions = ['juliadoc.julia', 'juliadoc.jlhelp']
html_theme_path = [juliadoc.get_theme_dir()]
html_sidebars = juliadoc.default_sidebars()
| mit | Python |
867815fecc204c03bc49fa121630a75491336278 | allow space in toolkit version, #2353 | IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology | com.ibm.streamsx.topology/opt/python/packages/streamsx/spl/toolkit.py | com.ibm.streamsx.topology/opt/python/packages/streamsx/spl/toolkit.py | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017,2019
"""
SPL toolkit integration.
********
Overview
********
SPL operators are defined by an SPL toolkit. When a ``Topology``
contains invocations of SPL operators, their defining toolkit must
be made known using :py:func:`add_toolkit`.
Toolkits shipped with the IBM Streams product under
``$STREAMS_INSTALL/toolkits`` are implictly known and
must not be added through ``add_toolkit``.
"""
__all__ = ['add_toolkit', 'add_toolkit_dependency']
import os
def add_toolkit(topology, location):
"""Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['root'] = os.path.abspath(location)
topology.graph._spl_toolkits.append(tkinfo)
def add_toolkit_dependency(topology, name, version):
"""Add a version dependency on an SPL toolkit to a topology.
To specify a range of versions for the dependent toolkits,
use brackets (``[]``) or parentheses. Use brackets to represent an
inclusive range and parentheses to represent an exclusive range.
The following examples describe how to specify a dependency on a range of toolkit versions:
* ``[1.0.0, 2.0.0]`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both inclusive.
* ``[1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 or later, but not including 2.0.0.
* ``(1.0.0, 2.0.0]`` represents a dependency on toolkits versions later than 1.0.0 and less than or equal to 2.0.0.
* ``(1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both exclusive.
Args:
topology(Topology): Topology to include toolkit in.
name(str): Toolkit name.
version(str): Toolkit version dependency.
.. seealso::
`Toolkit information model file <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.3.0/com.ibm.streams.dev.doc/doc/toolkitinformationmodelfile.html>`_
.. versionadded:: 1.12
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['name'] = name
tkinfo['version'] = version.replace(' ', '')
topology.graph._spl_toolkits.append(tkinfo)
| # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017,2019
"""
SPL toolkit integration.
********
Overview
********
SPL operators are defined by an SPL toolkit. When a ``Topology``
contains invocations of SPL operators, their defining toolkit must
be made known using :py:func:`add_toolkit`.
Toolkits shipped with the IBM Streams product under
``$STREAMS_INSTALL/toolkits`` are implictly known and
must not be added through ``add_toolkit``.
"""
__all__ = ['add_toolkit', 'add_toolkit_dependency']
import os
def add_toolkit(topology, location):
"""Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['root'] = os.path.abspath(location)
topology.graph._spl_toolkits.append(tkinfo)
def add_toolkit_dependency(topology, name, version):
"""Add a version dependency on an SPL toolkit to a topology.
To specify a range of versions for the dependent toolkits,
use brackets (``[]``) or parentheses. Use brackets to represent an
inclusive range and parentheses to represent an exclusive range.
The following examples describe how to specify a dependency on a range of toolkit versions:
* ``[1.0.0, 2.0.0]`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both inclusive.
* ``[1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 or later, but not including 2.0.0.
* ``(1.0.0, 2.0.0]`` represents a dependency on toolkits versions later than 1.0.0 and less than or equal to 2.0.0.
* ``(1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both exclusive.
Args:
topology(Topology): Topology to include toolkit in.
name(str): Toolkit name.
version(str): Toolkit version dependency.
.. seealso::
`Toolkit information model file <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.3.0/com.ibm.streams.dev.doc/doc/toolkitinformationmodelfile.html>`_
.. versionadded:: 1.12
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['name'] = name
tkinfo['version'] = version
topology.graph._spl_toolkits.append(tkinfo)
| apache-2.0 | Python |
26dbff0a13207674250cb0019cdac07ee49e2530 | Bump version string on setup.py up to 0.2.2 | pipoket/booru-grabber | src/setup.py | src/setup.py | # -*- coding: cp949 -*-
import os
import sys
import glob
import py2exe
import subprocess
from distutils.core import setup
from cx_Freeze import setup, Executable
if sys.platform != "win32":
print "This script is only for Win32 build as of now!"
sys.exit(1)
base = "Win32GUI"
setup(
name = "booru-grabber",
version = "0.2.2",
description = "Booru Grabber",
options = {
"build_exe": {
"includes": ["os"],
"excludes": ["doctest", "pdb", "unittest", "difflib",
"optparse", "_gtkagg", "_tkagg",
"bsddb", "curses", "email", "pywin.debugger", "tcl"],
"compressed": True,
"optimize": 1,
"create_shared_zip": True,
"include_msvcr": True,
},
},
executables = [Executable("grabber.py", base=base)],
)
# Compress the files with UPX
output_path = os.path.join("build", "exe.win32-2.7")
upx_base_args = ["upx", '--best', '--no-progress']
for filename in os.listdir(output_path):
if (filename == "grabber.exe" or
os.path.splitext(filename)[1].lower() in ('.exe','.dll','.pyd', '.so')):
filepath = os.path.join(output_path, filename)
args = ["upx", "--best", filepath]
subprocess.call(args)
| # -*- coding: cp949 -*-
import os
import sys
import glob
import py2exe
import subprocess
from distutils.core import setup
from cx_Freeze import setup, Executable
if sys.platform != "win32":
print "This script is only for Win32 build as of now!"
sys.exit(1)
base = "Win32GUI"
setup(
name = "booru-grabber",
version = "0.2.0",
description = "Booru Grabber",
options = {
"build_exe": {
"includes": ["os"],
"excludes": ["doctest", "pdb", "unittest", "difflib",
"optparse", "_gtkagg", "_tkagg",
"bsddb", "curses", "email", "pywin.debugger", "tcl"],
"compressed": True,
"optimize": 1,
"create_shared_zip": True,
"include_msvcr": True,
},
},
executables = [Executable("grabber.py", base=base)],
)
# Compress the files with UPX
output_path = os.path.join("build", "exe.win32-2.7")
upx_base_args = ["upx", '--best', '--no-progress']
for filename in os.listdir(output_path):
if (filename == "grabber.exe" or
os.path.splitext(filename)[1].lower() in ('.exe','.dll','.pyd', '.so')):
filepath = os.path.join(output_path, filename)
args = ["upx", "--best", filepath]
subprocess.call(args)
| mit | Python |
054a45fa7a4005ab6eb08bfb6a33e4e984526c05 | Change version to 0.4.2 | googlearchive/pywebsocket,GoogleChromeLabs/pywebsocket3,GoogleChromeLabs/pywebsocket3,google/pywebsocket,googlearchive/pywebsocket,googlearchive/pywebsocket,google/pywebsocket,GoogleChromeLabs/pywebsocket3,google/pywebsocket | src/setup.py | src/setup.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >>sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='Web Socket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'Web Socket (http://tools.ietf.org/html/'
'draft-hixie-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME],
url='http://code.google.com/p/pywebsocket/',
version='0.4.2',
)
# vi:sts=4 sw=4 et
| #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >>sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='Web Socket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'Web Socket (http://tools.ietf.org/html/'
'draft-hixie-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME],
url='http://code.google.com/p/pywebsocket/',
version='0.4.1',
)
# vi:sts=4 sw=4 et
| bsd-3-clause | Python |
ae4148f205cdb01a30016c3e2b46ecb05a2dd4fb | Fix topic | bit-bots/bitbots_behaviour | bitbots_body_behavior/src/bitbots_body_behavior/body_behavior.py | bitbots_body_behavior/src/bitbots_body_behavior/body_behavior.py | #!/usr/bin/env python3
"""
BehaviorModule
^^^^^^^^^^^^^^
.. moduleauthor:: Martin Poppinga <1popping@informatik.uni-hamburg.de>
Starts the body behavior
"""
import actionlib
import os
import rospy
from geometry_msgs.msg import PoseWithCovarianceStamped
from tf2_geometry_msgs import PoseStamped
from humanoid_league_msgs.msg import GameState, HeadMode, Strategy, TeamData,\
PlayAnimationAction, GoalPartsRelative, RobotControlState, PoseWithCertainty, PoseWithCertaintyArray
from move_base_msgs.msg import MoveBaseActionFeedback
from actionlib_msgs.msg import GoalID
from bitbots_blackboard.blackboard import BodyBlackboard
from dynamic_stack_decider import dsd
if __name__ == "__main__":
rospy.init_node("Bodybehavior")
D = dsd.DSD(BodyBlackboard(), '/debug/dsd/body_behavior')
D.blackboard.team_data.strategy_sender = rospy.Publisher("strategy", Strategy, queue_size=2)
D.blackboard.blackboard.head_pub = rospy.Publisher("head_mode", HeadMode, queue_size=10)
D.blackboard.pathfinding.pathfinding_pub = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=1)
D.blackboard.pathfinding.pathfinding_cancel_pub = rospy.Publisher('move_base/cancel', GoalID, queue_size=1)
dirname = os.path.dirname(os.path.realpath(__file__))
D.register_actions(os.path.join(dirname, "actions"))
D.register_decisions(os.path.join(dirname, "decisions"))
D.load_behavior(os.path.join(dirname, "main.dsd"))
# TODO: callbacks away from the blackboard!
rospy.Subscriber("balls_relative", PoseWithCertaintyArray, D.blackboard.world_model.balls_callback)
rospy.Subscriber("goal_posts_relative", GoalPartsRelative, D.blackboard.world_model.goal_parts_callback)
rospy.Subscriber("gamestate", GameState, D.blackboard.gamestate.gamestate_callback)
rospy.Subscriber("team_data", TeamData, D.blackboard.team_data.team_data_callback)
rospy.Subscriber("amcl_pose", PoseWithCovarianceStamped, D.blackboard.world_model.position_callback)
rospy.Subscriber("robot_state", RobotControlState, D.blackboard.blackboard.robot_state_callback)
rospy.Subscriber("move_base/feedback", MoveBaseActionFeedback, D.blackboard.pathfinding.feedback_callback)
D.blackboard.animation.server = actionlib.SimpleActionClient("bitbots_animation", PlayAnimationAction)
rate = rospy.Rate(5)
while not rospy.is_shutdown():
D.update()
rate.sleep()
| #!/usr/bin/env python3
"""
BehaviorModule
^^^^^^^^^^^^^^
.. moduleauthor:: Martin Poppinga <1popping@informatik.uni-hamburg.de>
Starts the body behavior
"""
import actionlib
import os
import rospy
from geometry_msgs.msg import PoseWithCovarianceStamped
from tf2_geometry_msgs import PoseStamped
from humanoid_league_msgs.msg import GameState, HeadMode, Strategy, TeamData,\
PlayAnimationAction, GoalPartsRelative, RobotControlState, PoseWithCertainty, PoseWithCertaintyArray
from move_base_msgs.msg import MoveBaseActionFeedback
from actionlib_msgs.msg import GoalID
from bitbots_blackboard.blackboard import BodyBlackboard
from dynamic_stack_decider import dsd
if __name__ == "__main__":
rospy.init_node("Bodybehavior")
D = dsd.DSD(BodyBlackboard(), '/debug/dsd/body_behavior')
D.blackboard.team_data.strategy_sender = rospy.Publisher("strategy", Strategy, queue_size=2)
D.blackboard.blackboard.head_pub = rospy.Publisher("head_mode", HeadMode, queue_size=10)
D.blackboard.pathfinding.pathfinding_pub = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=1)
D.blackboard.pathfinding.pathfinding_cancel_pub = rospy.Publisher('move_base/cancel', GoalID, queue_size=1)
dirname = os.path.dirname(os.path.realpath(__file__))
D.register_actions(os.path.join(dirname, "actions"))
D.register_decisions(os.path.join(dirname, "decisions"))
D.load_behavior(os.path.join(dirname, "main.dsd"))
# TODO: callbacks away from the blackboard!
rospy.Subscriber("balls_relative", PoseWithCertaintyArray, D.blackboard.world_model.balls_callback)
rospy.Subscriber("goal_parts_relative", GoalPartsRelative, D.blackboard.world_model.goal_parts_callback)
rospy.Subscriber("gamestate", GameState, D.blackboard.gamestate.gamestate_callback)
rospy.Subscriber("team_data", TeamData, D.blackboard.team_data.team_data_callback)
rospy.Subscriber("amcl_pose", PoseWithCovarianceStamped, D.blackboard.world_model.position_callback)
rospy.Subscriber("robot_state", RobotControlState, D.blackboard.blackboard.robot_state_callback)
rospy.Subscriber("move_base/feedback", MoveBaseActionFeedback, D.blackboard.pathfinding.feedback_callback)
D.blackboard.animation.server = actionlib.SimpleActionClient("bitbots_animation", PlayAnimationAction)
rate = rospy.Rate(5)
while not rospy.is_shutdown():
D.update()
rate.sleep()
| bsd-3-clause | Python |
abdd39654a67ea66dde9fbd5cdd253b14551e1ec | Simplify output message | elifesciences/builder,elifesciences/builder | src/tasks.py | src/tasks.py | """Miscellanious admin tasks.
If you find certain 'types' of tasks accumulating, they might be
better off in their own module. This module really is for stuff
that has no home."""
from buildercore import core, bootstrap
from fabric.api import local, task
from utils import confirm, errcho
from decorators import requires_aws_stack, debugtask
from buildercore import bakery
from buildercore.core import stack_conn
from buildercore.context_handler import load_context
@task
@requires_aws_stack
def create_ami(stackname, name=None):
pname = core.project_name_from_stackname(stackname)
msg = "this will create a new AMI for the project %r" % pname
confirm(msg)
amiid = bakery.create_ami(stackname, name)
print(amiid)
errcho('update project file with new ami %s. these changes must be merged and committed manually' % amiid)
#
#
#
@debugtask
def diff_builder_config():
"helps keep three"
file_sets = [
[
"./builder-private-example/pillar/elife.sls",
"./cloned-projects/builder-base-formula/pillar/elife.sls",
"./builder-private/pillar/elife.sls"
],
[
"./projects/elife.yaml",
"./builder-private/projects/elife-private.yaml",
]
]
for paths in file_sets:
local("meld " + " ".join(paths))
@task
@requires_aws_stack
def repair_cfn_info(stackname):
with stack_conn(stackname):
bootstrap.write_environment_info(stackname, overwrite=True)
@task
@requires_aws_stack
def repair_context(stackname):
# triggers the workaround of downloading it from EC2 and persisting it
load_context(stackname)
@task
@requires_aws_stack
def remove_minion_key(stackname):
bootstrap.remove_minion_key(stackname)
| """Miscellanious admin tasks.
If you find certain 'types' of tasks accumulating, they might be
better off in their own module. This module really is for stuff
that has no home."""
from buildercore import core, bootstrap
from fabric.api import local, task
from utils import confirm, errcho
from decorators import requires_aws_stack, debugtask
from buildercore import bakery
from buildercore.core import stack_conn
from buildercore.context_handler import load_context
@task
@requires_aws_stack
def create_ami(stackname, name=None):
pname = core.project_name_from_stackname(stackname)
msg = "this will create a new AMI for the project %r" % pname
confirm(msg)
amiid = bakery.create_ami(stackname, name)
errcho('AWS has created AMI with id:')
print(amiid)
errcho('update project file with new ami %s. these changes must be merged and committed manually' % amiid)
#
#
#
@debugtask
def diff_builder_config():
"helps keep three"
file_sets = [
[
"./builder-private-example/pillar/elife.sls",
"./cloned-projects/builder-base-formula/pillar/elife.sls",
"./builder-private/pillar/elife.sls"
],
[
"./projects/elife.yaml",
"./builder-private/projects/elife-private.yaml",
]
]
for paths in file_sets:
local("meld " + " ".join(paths))
@task
@requires_aws_stack
def repair_cfn_info(stackname):
with stack_conn(stackname):
bootstrap.write_environment_info(stackname, overwrite=True)
@task
@requires_aws_stack
def repair_context(stackname):
# triggers the workaround of downloading it from EC2 and persisting it
load_context(stackname)
@task
@requires_aws_stack
def remove_minion_key(stackname):
bootstrap.remove_minion_key(stackname)
| mit | Python |
db8aa8a85069452a01303776e6213eefeaf43b22 | Update P04_readWord moved spacing to separate properties | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter13/P04_readWord.py | books/AutomateTheBoringStuffWithPython/Chapter13/P04_readWord.py | # This program uses the python-docx module to manipulate Word documents
import docx
doc = docx.Document("demo.docx")
print(len(doc.paragraphs))
print(doc.paragraphs[0].text)
print(doc.paragraphs[1].text)
print(len(doc.paragraphs[1].runs))
print(doc.paragraphs[1].runs[0].text)
print(doc.paragraphs[1].runs[1].text)
print(doc.paragraphs[1].runs[2].text)
print(doc.paragraphs[1].runs[3].text)
print(doc.paragraphs[1].runs[4].text)
| # This program uses the python-docx module to manipulate Word documents
import docx
doc = docx.Document("demo.docx")
print(len(doc.paragraphs))
print(doc.paragraphs[0].text)
print(doc.paragraphs[1].text)
print(len(doc.paragraphs[1].runs))
print(doc.paragraphs[1].runs[0].text)
print(doc.paragraphs[1].runs[1].text)
print(doc.paragraphs[1].runs[2].text)
print(doc.paragraphs[1].runs[3].text)
print(doc.paragraphs[1].runs[4].text)
| mit | Python |
7e55437db6685f0e8c494159fa9cf4cedbb45810 | Allow retry_failed_photo_verifications to take a list of receipt_ids to retry | mitocw/edx-platform,CourseTalk/edx-platform,naresh21/synergetics-edx-platform,miptliot/edx-platform,JioEducation/edx-platform,msegado/edx-platform,lduarte1991/edx-platform,simbs/edx-platform,chrisndodge/edx-platform,hamzehd/edx-platform,iivic/BoiseStateX,cecep-edu/edx-platform,devs1991/test_edx_docmode,synergeticsedx/deployment-wipro,UOMx/edx-platform,analyseuc3m/ANALYSE-v1,bigdatauniversity/edx-platform,prarthitm/edxplatform,jolyonb/edx-platform,romain-li/edx-platform,msegado/edx-platform,ZLLab-Mooc/edx-platform,tanmaykm/edx-platform,UOMx/edx-platform,simbs/edx-platform,bigdatauniversity/edx-platform,philanthropy-u/edx-platform,jbzdak/edx-platform,inares/edx-platform,Stanford-Online/edx-platform,hamzehd/edx-platform,teltek/edx-platform,JioEducation/edx-platform,iivic/BoiseStateX,zubair-arbi/edx-platform,xingyepei/edx-platform,ahmadiga/min_edx,prarthitm/edxplatform,shurihell/testasia,fintech-circle/edx-platform,cecep-edu/edx-platform,doganov/edx-platform,Livit/Livit.Learn.EdX,raccoongang/edx-platform,gymnasium/edx-platform,Edraak/edx-platform,jzoldak/edx-platform,EDUlib/edx-platform,lduarte1991/edx-platform,stvstnfrd/edx-platform,CourseTalk/edx-platform,marcore/edx-platform,arbrandes/edx-platform,ahmadiga/min_edx,deepsrijit1105/edx-platform,gymnasium/edx-platform,simbs/edx-platform,devs1991/test_edx_docmode,Livit/Livit.Learn.EdX,jbzdak/edx-platform,teltek/edx-platform,Endika/edx-platform,adoosii/edx-platform,wwj718/edx-platform,Edraak/circleci-edx-platform,antoviaque/edx-platform,marcore/edx-platform,cognitiveclass/edx-platform,mcgachey/edx-platform,TeachAtTUM/edx-platform,msegado/edx-platform,jzoldak/edx-platform,devs1991/test_edx_docmode,devs1991/test_edx_docmode,naresh21/synergetics-edx-platform,amir-qayyum-khan/edx-platform,stvstnfrd/edx-platform,cecep-edu/edx-platform,JCBarahona/edX,adoosii/edx-platform,RPI-OPENEDX/edx-platform,Ayub-Khan/edx-platform,IndonesiaX/edx-platform,raccoongang/edx-platform,devs1991/test_edx_docmode,ahmadiga/min_edx,inares/edx-platform,BehavioralInsightsTeam/edx-platform,ZLLab-Mooc/edx-platform,xingyepei/edx-platform,eduNEXT/edx-platform,TeachAtTUM/edx-platform,EDUlib/edx-platform,solashirai/edx-platform,iivic/BoiseStateX,Edraak/edraak-platform,Edraak/circleci-edx-platform,a-parhom/edx-platform,MakeHer/edx-platform,mbareta/edx-platform-ft,edx/edx-platform,JCBarahona/edX,appsembler/edx-platform,synergeticsedx/deployment-wipro,waheedahmed/edx-platform,kursitet/edx-platform,cpennington/edx-platform,tanmaykm/edx-platform,Endika/edx-platform,cognitiveclass/edx-platform,RPI-OPENEDX/edx-platform,arbrandes/edx-platform,CredoReference/edx-platform,shurihell/testasia,10clouds/edx-platform,jjmiranda/edx-platform,IndonesiaX/edx-platform,Ayub-Khan/edx-platform,vikas1885/test1,IndonesiaX/edx-platform,edx-solutions/edx-platform,amir-qayyum-khan/edx-platform,kursitet/edx-platform,synergeticsedx/deployment-wipro,jolyonb/edx-platform,IndonesiaX/edx-platform,gsehub/edx-platform,CourseTalk/edx-platform,cpennington/edx-platform,JCBarahona/edX,CredoReference/edx-platform,proversity-org/edx-platform,longmen21/edx-platform,shurihell/testasia,defance/edx-platform,ovnicraft/edx-platform,amir-qayyum-khan/edx-platform,10clouds/edx-platform,kmoocdev2/edx-platform,bigdatauniversity/edx-platform,BehavioralInsightsTeam/edx-platform,mitocw/edx-platform,raccoongang/edx-platform,ovnicraft/edx-platform,caesar2164/edx-platform,mbareta/edx-platform-ft,Edraak/edx-platform,longmen21/edx-platform,inares/edx-platform,pepeportela/edx-platform,zhenzhai/edx-platform,wwj718/edx-platform,ampax/edx-platform,romain-li/edx-platform,itsjeyd/edx-platform,philanthropy-u/edx-platform,EDUlib/edx-platform,alu042/edx-platform,procangroup/edx-platform,ahmedaljazzar/edx-platform,teltek/edx-platform,marcore/edx-platform,zhenzhai/edx-platform,TeachAtTUM/edx-platform,iivic/BoiseStateX,defance/edx-platform,halvertoluke/edx-platform,hastexo/edx-platform,solashirai/edx-platform,arbrandes/edx-platform,Lektorium-LLC/edx-platform,jzoldak/edx-platform,gsehub/edx-platform,zhenzhai/edx-platform,franosincic/edx-platform,inares/edx-platform,waheedahmed/edx-platform,10clouds/edx-platform,kursitet/edx-platform,jjmiranda/edx-platform,analyseuc3m/ANALYSE-v1,Stanford-Online/edx-platform,Edraak/circleci-edx-platform,Ayub-Khan/edx-platform,romain-li/edx-platform,kmoocdev2/edx-platform,pepeportela/edx-platform,xingyepei/edx-platform,a-parhom/edx-platform,antoviaque/edx-platform,fintech-circle/edx-platform,nttks/edx-platform,cognitiveclass/edx-platform,edx/edx-platform,eduNEXT/edunext-platform,IONISx/edx-platform,pabloborrego93/edx-platform,franosincic/edx-platform,waheedahmed/edx-platform,zhenzhai/edx-platform,louyihua/edx-platform,a-parhom/edx-platform,alu042/edx-platform,miptliot/edx-platform,halvertoluke/edx-platform,ZLLab-Mooc/edx-platform,UOMx/edx-platform,MakeHer/edx-platform,ESOedX/edx-platform,romain-li/edx-platform,gsehub/edx-platform,bigdatauniversity/edx-platform,cpennington/edx-platform,BehavioralInsightsTeam/edx-platform,vikas1885/test1,gymnasium/edx-platform,mitocw/edx-platform,lduarte1991/edx-platform,MakeHer/edx-platform,CredoReference/edx-platform,pomegranited/edx-platform,RPI-OPENEDX/edx-platform,zubair-arbi/edx-platform,itsjeyd/edx-platform,proversity-org/edx-platform,IONISx/edx-platform,philanthropy-u/edx-platform,edx/edx-platform,jjmiranda/edx-platform,procangroup/edx-platform,ZLLab-Mooc/edx-platform,cognitiveclass/edx-platform,kursitet/edx-platform,jzoldak/edx-platform,Edraak/edx-platform,eduNEXT/edx-platform,stvstnfrd/edx-platform,cpennington/edx-platform,zubair-arbi/edx-platform,UOMx/edx-platform,simbs/edx-platform,eduNEXT/edunext-platform,doganov/edx-platform,longmen21/edx-platform,pepeportela/edx-platform,10clouds/edx-platform,ampax/edx-platform,itsjeyd/edx-platform,inares/edx-platform,MakeHer/edx-platform,devs1991/test_edx_docmode,tanmaykm/edx-platform,Edraak/edraak-platform,nttks/edx-platform,IONISx/edx-platform,jolyonb/edx-platform,miptliot/edx-platform,Lektorium-LLC/edx-platform,proversity-org/edx-platform,franosincic/edx-platform,chrisndodge/edx-platform,amir-qayyum-khan/edx-platform,longmen21/edx-platform,lduarte1991/edx-platform,Edraak/edraak-platform,pomegranited/edx-platform,Edraak/edx-platform,hastexo/edx-platform,devs1991/test_edx_docmode,edx-solutions/edx-platform,CredoReference/edx-platform,Lektorium-LLC/edx-platform,hastexo/edx-platform,IndonesiaX/edx-platform,cecep-edu/edx-platform,tanmaykm/edx-platform,waheedahmed/edx-platform,devs1991/test_edx_docmode,shabab12/edx-platform,simbs/edx-platform,appsembler/edx-platform,ahmadiga/min_edx,vikas1885/test1,Ayub-Khan/edx-platform,JioEducation/edx-platform,angelapper/edx-platform,pabloborrego93/edx-platform,xingyepei/edx-platform,procangroup/edx-platform,angelapper/edx-platform,IONISx/edx-platform,zhenzhai/edx-platform,louyihua/edx-platform,cecep-edu/edx-platform,mcgachey/edx-platform,mbareta/edx-platform-ft,ampax/edx-platform,marcore/edx-platform,ahmedaljazzar/edx-platform,deepsrijit1105/edx-platform,RPI-OPENEDX/edx-platform,mcgachey/edx-platform,pomegranited/edx-platform,ahmedaljazzar/edx-platform,Ayub-Khan/edx-platform,halvertoluke/edx-platform,edx-solutions/edx-platform,shurihell/testasia,adoosii/edx-platform,ZLLab-Mooc/edx-platform,analyseuc3m/ANALYSE-v1,EDUlib/edx-platform,doganov/edx-platform,hamzehd/edx-platform,waheedahmed/edx-platform,louyihua/edx-platform,deepsrijit1105/edx-platform,pomegranited/edx-platform,eduNEXT/edx-platform,edx-solutions/edx-platform,JCBarahona/edX,louyihua/edx-platform,synergeticsedx/deployment-wipro,prarthitm/edxplatform,antoviaque/edx-platform,doganov/edx-platform,shabab12/edx-platform,jbzdak/edx-platform,ovnicraft/edx-platform,alexthered/kienhoc-platform,franosincic/edx-platform,Endika/edx-platform,Livit/Livit.Learn.EdX,gymnasium/edx-platform,Stanford-Online/edx-platform,stvstnfrd/edx-platform,CourseTalk/edx-platform,xingyepei/edx-platform,bigdatauniversity/edx-platform,appsembler/edx-platform,miptliot/edx-platform,franosincic/edx-platform,mbareta/edx-platform-ft,zubair-arbi/edx-platform,adoosii/edx-platform,proversity-org/edx-platform,mitocw/edx-platform,mcgachey/edx-platform,chrisndodge/edx-platform,cognitiveclass/edx-platform,alexthered/kienhoc-platform,ESOedX/edx-platform,caesar2164/edx-platform,alexthered/kienhoc-platform,caesar2164/edx-platform,jbzdak/edx-platform,fintech-circle/edx-platform,Edraak/circleci-edx-platform,Edraak/edx-platform,IONISx/edx-platform,ESOedX/edx-platform,procangroup/edx-platform,zubair-arbi/edx-platform,solashirai/edx-platform,alu042/edx-platform,ovnicraft/edx-platform,caesar2164/edx-platform,hastexo/edx-platform,shabab12/edx-platform,iivic/BoiseStateX,TeachAtTUM/edx-platform,halvertoluke/edx-platform,kmoocdev2/edx-platform,solashirai/edx-platform,angelapper/edx-platform,teltek/edx-platform,defance/edx-platform,jolyonb/edx-platform,wwj718/edx-platform,ahmedaljazzar/edx-platform,appsembler/edx-platform,shabab12/edx-platform,romain-li/edx-platform,msegado/edx-platform,naresh21/synergetics-edx-platform,JCBarahona/edX,pomegranited/edx-platform,alexthered/kienhoc-platform,alexthered/kienhoc-platform,BehavioralInsightsTeam/edx-platform,Edraak/circleci-edx-platform,ESOedX/edx-platform,analyseuc3m/ANALYSE-v1,arbrandes/edx-platform,philanthropy-u/edx-platform,eduNEXT/edx-platform,gsehub/edx-platform,MakeHer/edx-platform,wwj718/edx-platform,Edraak/edraak-platform,defance/edx-platform,prarthitm/edxplatform,shurihell/testasia,naresh21/synergetics-edx-platform,Stanford-Online/edx-platform,chrisndodge/edx-platform,vikas1885/test1,kmoocdev2/edx-platform,kursitet/edx-platform,alu042/edx-platform,angelapper/edx-platform,pabloborrego93/edx-platform,fintech-circle/edx-platform,antoviaque/edx-platform,jbzdak/edx-platform,pabloborrego93/edx-platform,ovnicraft/edx-platform,longmen21/edx-platform,nttks/edx-platform,doganov/edx-platform,raccoongang/edx-platform,solashirai/edx-platform,kmoocdev2/edx-platform,pepeportela/edx-platform,edx/edx-platform,jjmiranda/edx-platform,a-parhom/edx-platform,adoosii/edx-platform,nttks/edx-platform,ampax/edx-platform,Livit/Livit.Learn.EdX,JioEducation/edx-platform,mcgachey/edx-platform,itsjeyd/edx-platform,nttks/edx-platform,deepsrijit1105/edx-platform,eduNEXT/edunext-platform,hamzehd/edx-platform,RPI-OPENEDX/edx-platform,ahmadiga/min_edx,Endika/edx-platform,Lektorium-LLC/edx-platform,eduNEXT/edunext-platform,msegado/edx-platform,hamzehd/edx-platform,halvertoluke/edx-platform,wwj718/edx-platform,vikas1885/test1 | lms/djangoapps/verify_student/management/commands/retry_failed_photo_verifications.py | lms/djangoapps/verify_student/management/commands/retry_failed_photo_verifications.py | """
Django admin commands related to verify_student
"""
from verify_student.models import SoftwareSecurePhotoVerification
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
This method finds those PhotoVerifications with a status of
MUST_RETRY and attempts to verify them.
"""
args = "<SoftwareSecurePhotoVerification id, SoftwareSecurePhotoVerification id, ...>"
help = (
"Retries SoftwareSecurePhotoVerifications passed as "
"arguments, or if no arguments are supplied, all that "
"are in a state of 'must_retry'"
)
def handle(self, *args, **options):
if args:
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(
receipt_id__in=args
)
force_must_retry = True
else:
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(status='must_retry')
force_must_retry = False
print("Attempting to retry {0} failed PhotoVerification submissions".format(len(attempts_to_retry)))
for index, attempt in enumerate(attempts_to_retry):
print("Retrying submission #{0} (ID: {1}, User: {2})".format(index, attempt.id, attempt.user))
# Set the attempts status to 'must_retry' so that we can re-submit it
if force_must_retry:
attempt.status = 'must_retry'
attempt.submit(copy_id_photo_from=attempt.copy_id_photo_from)
print("Retry result: {0}".format(attempt.status))
print("Done resubmitting failed photo verifications")
| """
Django admin commands related to verify_student
"""
from verify_student.models import SoftwareSecurePhotoVerification
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
This method finds those PhotoVerifications with a status of
MUST_RETRY and attempts to verify them.
"""
help = 'Retries SoftwareSecurePhotoVerifications that are in a state of \'must_retry\''
def handle(self, *args, **options):
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(status='must_retry')
print("Attempting to retry {0} failed PhotoVerification submissions".format(len(attempts_to_retry)))
for index, attempt in enumerate(attempts_to_retry):
print("Retrying submission #{0} (ID: {1}, User: {2})".format(index, attempt.id, attempt.user))
attempt.submit(copy_id_photo_from=attempt.copy_id_photo_from)
print("Retry result: {0}".format(attempt.status))
print("Done resubmitting failed photo verifications")
| agpl-3.0 | Python |
7011fb0031da0f1ed2b65e3db5f6768ced74c36e | Add date field to unbalanced move line. Otherwise the following might happen: File "/path/to/odoo/addons/account_fiscal_year_closing/models/account_fiscalyear_closing.py", line 328, in button_calculate res = self.calculate() File "/path/to/odoo/addons/account_fiscal_year_closing/models/account_fiscalyear_closing.py", line 315, in calculate return self._show_unbalanced_move_wizard(data) File "/path/to/odoo/addons/account_fiscal_year_closing/models/account_fiscalyear_closing.py", line 295, in _show_unbalanced_move_wizard wizard = self.env["account.fiscalyear.closing.unbalanced.move"].create(data) [...] ValueError: Invalid field 'date' on model 'account.fiscalyear.closing.unbalanced.move.line' | OCA/account-closing,OCA/account-closing | account_fiscal_year_closing/wizards/account_fiscal_year_closing_unbalanced_move.py | account_fiscal_year_closing/wizards/account_fiscal_year_closing_unbalanced_move.py | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountFiscalYearClosingUnbalancedMove(models.TransientModel):
_name = "account.fiscalyear.closing.unbalanced.move"
_description = "Account fiscalyear closing unbalanced move"
journal_id = fields.Many2one(
comodel_name="account.journal",
string="Journal",
readonly=True,
)
ref = fields.Char(
string="Reference",
readonly=True,
)
date = fields.Date(
string="Date",
readonly=True,
)
line_ids = fields.One2many(
comodel_name="account.fiscalyear.closing.unbalanced.move.line",
inverse_name="move_id",
string="Unbalanced move lines",
readonly=True,
)
class AccountFiscalYearClosingUnbalancedMoveLine(models.TransientModel):
_name = "account.fiscalyear.closing.unbalanced.move.line"
_description = "Account fiscalyear closing unbalanced move line"
move_id = fields.Many2one(
comodel_name="account.fiscalyear.closing.unbalanced.move",
string="Unbalanced move",
)
account_id = fields.Many2one(
comodel_name="account.account",
string="Account",
)
credit = fields.Float()
debit = fields.Float()
partner_id = fields.Many2one(
comodel_name="res.partner",
string="Partner",
)
name = fields.Char()
date = fields.Date(
string="Date",
)
| # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountFiscalYearClosingUnbalancedMove(models.TransientModel):
_name = "account.fiscalyear.closing.unbalanced.move"
_description = "Account fiscalyear closing unbalanced move"
journal_id = fields.Many2one(
comodel_name="account.journal",
string="Journal",
readonly=True,
)
ref = fields.Char(
string="Reference",
readonly=True,
)
date = fields.Date(
string="Date",
readonly=True,
)
line_ids = fields.One2many(
comodel_name="account.fiscalyear.closing.unbalanced.move.line",
inverse_name="move_id",
string="Unbalanced move lines",
readonly=True,
)
class AccountFiscalYearClosingUnbalancedMoveLine(models.TransientModel):
_name = "account.fiscalyear.closing.unbalanced.move.line"
_description = "Account fiscalyear closing unbalanced move line"
move_id = fields.Many2one(
comodel_name="account.fiscalyear.closing.unbalanced.move",
string="Unbalanced move",
)
account_id = fields.Many2one(
comodel_name="account.account",
string="Account",
)
credit = fields.Float()
debit = fields.Float()
partner_id = fields.Many2one(
comodel_name="res.partner",
string="Partner",
)
name = fields.Char()
| agpl-3.0 | Python |
cb32cf0f0160d1f582787119d0480de3ba8b9b53 | change the size of input to remedy OOM issue. | tensorflow/tensorflow,davidzchen/tensorflow,yongtang/tensorflow,sarvex/tensorflow,freedomtan/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,annarev/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,aam-at/tensorflow,cxxgtxy/tensorflow,annarev/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,gautam1858/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,annarev/tensorflow,aam-at/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,frreiss/tensorflow-fred,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,cxxgtxy/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,davidzchen/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,sarvex/tensorflow,tensorflow/tensorflow,davidzchen/tensorflow,cxxgtxy/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,cxxgtxy/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,aldian/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,karllessard/tensorflow,karllessard/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,frreiss/tensorflow-fred,gautam1858/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,aam-at/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,petewarden/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,davidzchen/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,annarev/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,aldian/tensorflow,sarvex/tensorflow,annarev/tensorflow,aldian/tensorflow,cxxgtxy/tensorflow,freedomtan/tensorflow,annarev/tensorflow,sarvex/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,aldian/tensorflow,karllessard/tensorflow,cxxgtxy/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,petewarden/tensorflow,sarvex/tensorflow,aam-at/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,Intel-Corporation/tensorflow,davidzchen/tensorflow,yongtang/tensorflow,karllessard/tensorflow,aam-at/tensorflow,sarvex/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,aldian/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,frreiss/tensorflow-fred,tensorflow/tensorflow,cxxgtxy/tensorflow,petewarden/tensorflow,aldian/tensorflow,annarev/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,paolodedios/tensorflow | tensorflow/python/keras/layers/preprocessing/image_preprocessing_distribution_test.py | tensorflow/python/keras/layers/preprocessing/image_preprocessing_distribution_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.platform import test
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager", "graph"]))
class ImagePreprocessingDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, distribution):
# TODO(b/159738418): large image input causes OOM in ubuntu multi gpu.
np_images = np.random.random((32, 32, 32, 3)).astype(np.float32)
image_dataset = dataset_ops.Dataset.from_tensor_slices(np_images).batch(
16, drop_remainder=True)
with distribution.scope():
input_data = keras.Input(shape=(32, 32, 3), dtype=dtypes.float32)
image_preprocessor = keras.Sequential([
image_preprocessing.Resizing(height=256, width=256),
image_preprocessing.RandomCrop(height=224, width=224),
image_preprocessing.RandomTranslation(.1, .1),
image_preprocessing.RandomRotation(.2),
image_preprocessing.RandomFlip(),
image_preprocessing.RandomZoom(.2, .2)])
preprocessed_image = image_preprocessor(input_data)
flatten_layer = keras.layers.Flatten(data_format="channels_last")
output = flatten_layer(preprocessed_image)
cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
output = cls_layer(output)
model = keras.Model(inputs=input_data, outputs=output)
model.compile(loss="binary_crossentropy")
_ = model.predict(image_dataset)
if __name__ == "__main__":
test.main()
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.platform import test
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager", "graph"]))
class ImagePreprocessingDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution(self, distribution):
np_images = np.random.random((1000, 32, 32, 3)).astype(np.float32)
image_dataset = dataset_ops.Dataset.from_tensor_slices(np_images).batch(
32, drop_remainder=True)
with distribution.scope():
input_data = keras.Input(shape=(32, 32, 3), dtype=dtypes.float32)
image_preprocessor = keras.Sequential([
image_preprocessing.Resizing(height=256, width=256),
image_preprocessing.RandomCrop(height=224, width=224),
image_preprocessing.RandomTranslation(.1, .1),
image_preprocessing.RandomRotation(.2),
image_preprocessing.RandomFlip(),
image_preprocessing.RandomZoom(.2, .2)])
preprocessed_image = image_preprocessor(input_data)
flatten_layer = keras.layers.Flatten(data_format="channels_last")
output = flatten_layer(preprocessed_image)
cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
output = cls_layer(output)
model = keras.Model(inputs=input_data, outputs=preprocessed_image)
model.compile(loss="binary_crossentropy")
_ = model.predict(image_dataset)
if __name__ == "__main__":
test.main()
| apache-2.0 | Python |
c5666f554b455e6ee3857aeeb415f9ac28dd5332 | Improve Vale of Glamorgan import script | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_vale_of_glamorgan.py | polling_stations/apps/data_collection/management/commands/import_vale_of_glamorgan.py | """
Import Vale of Glamorgan
"""
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Vale of Glamorgan
"""
council_id = 'W06000014'
districts_name = 'Polling Districts'
stations_name = 'Polling Stations.shp'
def district_record_to_dict(self, record):
# this address is missing from the stations file
# so put it in an object property where we can
# pick it up in station_record_to_dict()
if record[0] == 'FD1':
self.fd1_address = record[4]
return {
'internal_council_id': record[0],
'name' : record[1],
'polling_station_id' : record[0]
}
def station_record_to_dict(self, record):
address = record[2]
if record[0] == 'FD1':
address = self.fd1_address
# extract postcode
try:
address_parts = address.split(', ')
postcode = address_parts[-1]
if postcode == 'Penarth' or\
postcode == 'The Knap' or\
postcode == 'Leckwith' or\
postcode == 'Barry' or\
postcode == 'Llantwit Major,':
postcode = ''
else:
del(address_parts[-1])
except TypeError:
postcode = ''
# format address
address = "\n".join(address_parts)
return {
'internal_council_id': record[0],
'postcode' : postcode,
'address' : address,
'polling_district_id': record[0]
}
| """
Import Vale of Glamorgan
"""
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Vale of Glamorgan
"""
council_id = 'W06000014'
districts_name = 'Polling Districts'
stations_name = 'Polling Stations.shp'
def district_record_to_dict(self, record):
# this address is missing from the stations file
# so put it in an object property where we can
# pick it up in station_record_to_dict()
if record[0] == 'FD1':
self.fd1_address = record[4]
return {
'internal_council_id': record[0],
'name': record[1],
}
def station_record_to_dict(self, record):
address = record[2]
if record[0] == 'FD1':
address = self.fd1_address
# extract postcode
try:
address_parts = address.split(', ')
postcode = address_parts[-1]
if postcode == 'Penarth' or postcode == 'The Knap' or postcode == 'Leckwith':
postcode = ''
except TypeError:
postcode = ''
# format address
del(address_parts[-1])
address = "\n".join(address_parts)
return {
'internal_council_id': record[0],
'postcode' : postcode,
'address' : address
}
| bsd-3-clause | Python |
00b5edfc2087687b535428729a6ce566a9642a8b | test added to check if a container is found | rrpg/engine,rrpg/engine | core/commands/open.py | core/commands/open.py | # -*- coding: utf-8 -*-
from models import item_container, item
import core.command
from core.localisation import _
class open(core.command.command):
def run(self):
"""
c.run()
Open an item container in the area where the player is.
The result of the command is a list of the items of the container
"""
if len(self._args) == 0:
raise core.command.exception(_('ERROR_OPEN_NO_CONTAINER_PROVIDED'))
containers = item_container.factory.getAllFromIdAreaAndType(
self._player.getAreaId(),
self._args[0]
)
if len(containers) == 0:
raise core.command.exception(_('ERROR_OPEN_CONTAINER_NOT_AVAILABLE'))
elif len(containers) > 1:
raise core.command.exception(_('ERROR_OPEN_MULTIPLE_CONTAINERS_AVAILABLE'))
items = item.inventory.fromStr(containers[0]['items'])
result = {'container_type': self._args[0], 'items': list()}
for i in items:
it = item.model.loadById(i)
result['items'].append({
'name': it['name'],
'quantity': items[i]['quantity']
})
return result
def render(self, data):
output = list()
output.append(_('ITEMS_IN_CONTAINER_%s') % data['container_type'])
for i in data['items']:
output.append(str(i['quantity']).rjust(3) + ' ' + i['name'])
return '\n'.join(output)
| # -*- coding: utf-8 -*-
from models import item_container, item
import core.command
from core.localisation import _
class open(core.command.command):
def run(self):
"""
c.run()
Open an item container in the area where the player is.
The result of the command is a list of the items of the container
"""
if len(self._args) == 0:
raise core.command.exception(_('ERROR_OPEN_NO_CONTAINER_PROVIDED'))
containers = item_container.factory.getAllFromIdAreaAndType(
self._player.getAreaId(),
self._args[0]
)
if len(containers) > 1:
raise core.command.exception(_('ERROR_OPEN_MULTIPLE_CONTAINERS_AVAILABLE'))
items = item.inventory.fromStr(containers[0]['items'])
result = {'container_type': self._args[0], 'items': list()}
for i in items:
it = item.model.loadById(i)
result['items'].append({
'name': it['name'],
'quantity': items[i]['quantity']
})
return result
def render(self, data):
output = list()
output.append(_('ITEMS_IN_CONTAINER_%s') % data['container_type'])
for i in data['items']:
output.append(str(i['quantity']).rjust(3) + ' ' + i['name'])
return '\n'.join(output)
| mit | Python |
4dc0381b7ed7c6d5105a17710d667deca4d418f3 | disable msi login (#4322) | samedder/azure-cli,yugangw-msft/azure-cli,QingChenmsft/azure-cli,samedder/azure-cli,samedder/azure-cli,samedder/azure-cli,QingChenmsft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,QingChenmsft/azure-cli,yugangw-msft/azure-cli,QingChenmsft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli | src/command_modules/azure-cli-profile/azure/cli/command_modules/profile/_params.py | src/command_modules/azure-cli-profile/azure/cli/command_modules/profile/_params.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import register_cli_argument
from azure.cli.core.commands.parameters import ignore_type
from .custom import load_subscriptions
def get_subscription_id_list(prefix, **kwargs): # pylint: disable=unused-argument
subscriptions = load_subscriptions()
result = []
for subscription in subscriptions:
result.append(subscription['id'])
result.append(subscription['name'])
return result
register_cli_argument('login', 'password', options_list=('--password', '-p'), help="Credentials like user password, or for a service principal, provide client secret or a pem file with key and public certificate. Will prompt if not given.")
register_cli_argument('login', 'service_principal', action='store_true', help='The credential representing a service principal.')
register_cli_argument('login', 'username', options_list=('--username', '-u'), help='Organization id or service principal')
register_cli_argument('login', 'tenant', options_list=('--tenant', '-t'), help='The AAD tenant, must provide when using service principals.')
register_cli_argument('login', 'allow_no_subscriptions', action='store_true', help="Support access tenants without subscriptions. It's uncommon but useful to run tenant level commands, such as 'az ad'")
register_cli_argument('login', 'msi', ignore_type)
register_cli_argument('login', 'msi_port', ignore_type)
# register_cli_argument('login', 'msi', action='store_true', help="Log in using the Virtual Machine's identity", arg_group='Managed Service Identity')
# register_cli_argument('login', 'msi_port', help="the port to retrieve tokens for login", arg_group='Managed Service Identity')
register_cli_argument('logout', 'username', help='account user, if missing, logout the current active account')
register_cli_argument('account', 'subscription', options_list=('--subscription', '-s'), help='Name or ID of subscription.', completer=get_subscription_id_list)
register_cli_argument('account list', 'all', help="List all subscriptions, rather just 'Enabled' ones", action='store_true')
register_cli_argument('account list', 'refresh', help="retrieve up to date subscriptions from server", action='store_true')
register_cli_argument('account show', 'show_auth_for_sdk', options_list=('--sdk-auth',), action='store_true', help='output result in compatible with Azure SDK auth file')
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import register_cli_argument
from .custom import load_subscriptions
def get_subscription_id_list(prefix, **kwargs): # pylint: disable=unused-argument
subscriptions = load_subscriptions()
result = []
for subscription in subscriptions:
result.append(subscription['id'])
result.append(subscription['name'])
return result
register_cli_argument('login', 'password', options_list=('--password', '-p'), help="Credentials like user password, or for a service principal, provide client secret or a pem file with key and public certificate. Will prompt if not given.")
register_cli_argument('login', 'service_principal', action='store_true', help='The credential representing a service principal.')
register_cli_argument('login', 'username', options_list=('--username', '-u'), help='Organization id or service principal')
register_cli_argument('login', 'tenant', options_list=('--tenant', '-t'), help='The AAD tenant, must provide when using service principals.')
register_cli_argument('login', 'allow_no_subscriptions', action='store_true', help="Support access tenants without subscriptions. It's uncommon but useful to run tenant level commands, such as 'az ad'")
register_cli_argument('login', 'msi', action='store_true', help="Log in using the Virtual Machine's identity", arg_group='Managed Service Identity')
register_cli_argument('login', 'msi_port', help="the port to retrieve tokens for login", arg_group='Managed Service Identity')
register_cli_argument('logout', 'username', help='account user, if missing, logout the current active account')
register_cli_argument('account', 'subscription', options_list=('--subscription', '-s'), help='Name or ID of subscription.', completer=get_subscription_id_list)
register_cli_argument('account list', 'all', help="List all subscriptions, rather just 'Enabled' ones", action='store_true')
register_cli_argument('account list', 'refresh', help="retrieve up to date subscriptions from server", action='store_true')
register_cli_argument('account show', 'show_auth_for_sdk', options_list=('--sdk-auth',), action='store_true', help='output result in compatible with Azure SDK auth file')
| mit | Python |
3a62f69c7ee1a453fe5cd5cee77b39df23b2d5ff | Convert RepositorySearchResult to ShortRepository | sigmavirus24/github3.py | github3/search/repository.py | github3/search/repository.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import models
from .. import repos
class RepositorySearchResult(models.GitHubCore):
def _update_attributes(self, data):
result = data.copy()
#: Score of the result
self.score = self._get_attribute(result, 'score')
if 'score' in result:
del result['score']
#: Text matches
self.text_matches = self._get_attribute(result, 'text_matches', [])
if 'text_matches' in result:
del result['text_matches']
#: Repository object
self.repository = repos.ShortRepository(result, self)
def _repr(self):
return '<RepositorySearchResult [{0}]>'.format(self.repository)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..models import GitHubCore
from ..repos import Repository
class RepositorySearchResult(GitHubCore):
def _update_attributes(self, data):
result = data.copy()
#: Score of the result
self.score = self._get_attribute(result, 'score')
if 'score' in result:
del result['score']
#: Text matches
self.text_matches = self._get_attribute(result, 'text_matches', [])
if 'text_matches' in result:
del result['text_matches']
#: Repository object
self.repository = Repository(result, self)
def _repr(self):
return '<RepositorySearchResult [{0}]>'.format(self.repository)
| bsd-3-clause | Python |
1eb3df5ca3c86effa85ba76a8bdf549f3560f3a5 | Add reporting unit URL to region serializer. | consbio/landscapesim,consbio/landscapesim,consbio/landscapesim | landscapesim/serializers/regions.py | landscapesim/serializers/regions.py | import json
from rest_framework import serializers
from django.core.urlresolvers import reverse
from landscapesim.models import Region
class ReportingUnitSerializer(serializers.Serializer):
type = serializers.SerializerMethodField()
properties = serializers.SerializerMethodField()
geometry = serializers.SerializerMethodField()
class Meta:
fields = ('type', 'geometry', 'properties',)
def get_type(self, obj):
return 'Feature'
def get_geometry(self, obj):
return json.loads(obj.polygon.json)
def get_properties(self, obj):
return {
'id': obj.id,
'unit_id': obj.unit_id,
'name': obj.name
}
class RegionSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
class Meta:
model = Region
fields = ('id', 'name', 'url')
def get_url(self, obj):
return reverse('region-reporting-units', args=[obj.id])
| import json
from rest_framework import serializers
from landscapesim.models import Region
class ReportingUnitSerializer(serializers.Serializer):
type = serializers.SerializerMethodField()
properties = serializers.SerializerMethodField()
geometry = serializers.SerializerMethodField()
class Meta:
fields = ('type', 'geometry', 'properties',)
def get_type(self, obj):
return 'Feature'
def get_geometry(self, obj):
return json.loads(obj.polygon.json)
def get_properties(self, obj):
return {
'id': obj.id,
'unit_id': obj.unit_id,
'name': obj.name
}
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = ('id', 'name')
| bsd-3-clause | Python |
2cc829d468b9c5efefcb3707e55f71d6a81a64b7 | add structured mesh test; not working | amaxwell/datatank_py | test/datagraph_mesh_test.py | test/datagraph_mesh_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
import numpy as np
from datatank_py.DTDataFile import DTDataFile
from datatank_py.DTMesh2D import DTMesh2D
from datatank_py.DTStructuredMesh2D import DTStructuredMesh2D
from datatank_py.DTStructuredGrid2D import DTStructuredGrid2D
if __name__ == '__main__':
output_file = DTDataFile("dg_mesh_test.dtbin", truncate=True)
output_file.DEBUG = True
# Create and save a single 2D Mesh. The mesh_function is kind of
# unnecessary since you can just multiply xx and yy directly,
# but it fits well with using a 2D function + grid in DataTank.
def mesh_function(x, y, t):
return np.cos(x / float(t+1) * 10) + np.cos(y + t)
# return the step to avoid getting fouled up in computing it
(x, dx) = np.linspace(-10, 10, 50, retstep=True)
(y, dy) = np.linspace(-10, 10, 100, retstep=True)
xx, yy = np.meshgrid(x, y)
grid = (np.min(x), np.min(y), dx, dy)
# time indexes must start at 0
for idx, time in enumerate(np.arange(0, 20, 2, dtype=np.double)):
mesh = mesh_function(xx, yy, time)
dtmesh = DTMesh2D(mesh, grid=grid)
output_file.write(dtmesh, "Test Mesh_%d" % (idx), time=time)
sgrid = DTStructuredGrid2D(np.sin(range(10)), np.cos(range(10)))
xx, yy = np.meshgrid(range(10), range(10))
mesh_values = mesh_function(xx, yy, 0)
print sgrid.shape()
print mesh_values.shape
output_file["sgrid"] = sgrid
output_file["smesh"] = DTStructuredMesh2D(mesh, grid=sgrid)
output_file.close()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
import numpy as np
from datatank_py.DTDataFile import DTDataFile
from datatank_py.DTMesh2D import DTMesh2D
if __name__ == '__main__':
output_file = DTDataFile("dg_mesh_test.dtbin", truncate=True)
output_file.DEBUG = True
# Create and save a single 2D Mesh. The mesh_function is kind of
# unnecessary since you can just multiply xx and yy directly,
# but it fits well with using a 2D function + grid in DataTank.
def mesh_function(x, y, t):
return np.cos(x / float(t+1) * 10) + np.cos(y + t)
# return the step to avoid getting fouled up in computing it
(x, dx) = np.linspace(-10, 10, 50, retstep=True)
(y, dy) = np.linspace(-10, 10, 100, retstep=True)
xx, yy = np.meshgrid(x, y)
grid = (np.min(x), np.min(y), dx, dy)
# time indexes must start at 0
for idx, time in enumerate(np.arange(0, 20, 2, dtype=np.double)):
mesh = mesh_function(xx, yy, time)
dtmesh = DTMesh2D(mesh, grid=grid)
output_file.write(dtmesh, "Test Mesh_%d" % (idx), time=time)
output_file.close()
| bsd-3-clause | Python |
34c20fb80a22e816e1c3e8ef887b415e32949d64 | Update check_all_managed_nodes_status.py | r0h4n/node-agent,Tendrl/node_agent,r0h4n/node-agent,Tendrl/node-agent,r0h4n/node-agent,Tendrl/node_agent,Tendrl/node-agent,Tendrl/node-agent | tendrl/node_agent/node_sync/check_all_managed_nodes_status.py | tendrl/node_agent/node_sync/check_all_managed_nodes_status.py | import etcd
import uuid
from tendrl.commons.event import Event
from tendrl.commons.objects.job import Job
from tendrl.commons.message import Message
from tendrl.commons.utils import etcd_utils
def run():
try:
nodes = NS._int.client.read("/nodes")
except etcd.EtcdKeyNotFound:
return
for node in nodes.leaves:
node_id = node.key.split('/')[-1]
try:
NS._int.client.write(
"/nodes/{0}/NodeContext/status".format(node_id),
"DOWN",
prevExist=False
)
_node_context = NS.tendrl.objects.NodeContext(node_id=node_id).load()
_tc = NS.tendrl.objects.TendrlContext(node_id=node_id).load()
_cluster = NS.tendrl.objects.Cluster(integration_id=_tc.integration_id).load()
# Remove stale provisioner tag
if _cluster.is_managed == "yes":
_tag = "provisioner/%s" % _cluster.integration_id
if _tag in _node_context.tags:
_index_key = "/indexes/tags/%s" % _tag
_node_context.tags.remove(_tag)
_node_context.save()
etcd_utils.delete(_index_key)
_msg = "node_sync, STALE provisioner node found! re-configuring monitoring (job-id: %s) on this node"
payload = {
"tags": ["tendrl/node_%s" % node_id],
"run": "tendrl.flows.ConfigureMonitoring",
"status": "new",
"parameters": {'TendrlContext.integration_id': _tc.integration_id},
"type": "node"
}
_job_id = str(uuid.uuid4())
Job(job_id=_job_id,
status="new",
payload=payload).save()
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": _msg % _job_id
}
)
)
except etcd.EtcdAlreadyExist:
pass
return
| import etcd
from tendrl.commons.utils import etcd_utils
def run():
try:
nodes = NS._int.client.read("/nodes")
except etcd.EtcdKeyNotFound:
return
for node in nodes.leaves:
node_id = node.key.split('/')[-1]
try:
NS._int.client.write(
"/nodes/{0}/NodeContext/status".format(node_id),
"DOWN",
prevExist=False
)
_node_context = NS.tendrl.objects.NodeContext(node_id=node_id).load()
_tc = NS.tendrl.objects.TendrlContext(node_id=node_id).load()
_cluster = NS.tendrl.objects.Cluster(integration_id=_tc.integration_id).load()
# Remove stale provisioner tag
if _cluster.is_managed == "yes":
_tag = "provisioner/%s" % _cluster.integration_id
if _tag in _node_context.tags:
_index_key = "/indexes/tags/%s" % _tag
_node_context.tags.remove(_tag)
_node_context.save()
etcd_utils.delete(_index_key)
except etcd.EtcdAlreadyExist:
pass
return
| lgpl-2.1 | Python |
cc5a9719ff4c91f385c973a759f63fd650f0388e | Remove old style urls | eldarion/kaleo,pinax/pinax-invitations | pinax/invitations/tests/urls.py | pinax/invitations/tests/urls.py | from django.conf.urls import url, include
urlpatterns = [
url(r"^", include("pinax.invitations.urls", namespace="pinax_invitations")),
]
| try:
from django.conf.urls import patterns, include
except ImportError:
from django.conf.urls.defaults import patterns, include
urlpatterns = patterns(
"",
(r"^", include("pinax.invitations.urls", namespace="pinax_invitations")),
)
| unknown | Python |
acff6f07daca9e6703439a4d7fc7a287a42c4bcc | Bump version | markstory/lint-review,markstory/lint-review,markstory/lint-review | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.26.3'
| __version__ = '2.36.2'
| mit | Python |
e26d57cc18fcd7ce9fde05fe7fce50d4cd9e7949 | increase xmatch timeout to 5min | imbasimba/astroquery,imbasimba/astroquery,ceb8/astroquery,ceb8/astroquery | astroquery/xmatch/__init__.py | astroquery/xmatch/__init__.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.xmatch`.
"""
url = _config.ConfigItem(
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync',
'xMatch URL')
timeout = _config.ConfigItem(
300,
'time limit for connecting to xMatch server')
conf = Conf()
from .core import XMatch, XMatchClass
__all__ = ['XMatch', 'XMatchClass',
'Conf', 'conf',
]
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.xmatch`.
"""
url = _config.ConfigItem(
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync',
'xMatch URL')
timeout = _config.ConfigItem(
60,
'time limit for connecting to xMatch server')
conf = Conf()
from .core import XMatch, XMatchClass
__all__ = ['XMatch', 'XMatchClass',
'Conf', 'conf',
]
| bsd-3-clause | Python |
0a05eff834fea8b122af58db3404af26b3ddb125 | Fix default field datetime format for html5 datetime-local | hasadna/OpenCommunity,yaniv14/OpenCommunity,hasadna/OpenCommunity,hasadna/OpenCommunity,yaniv14/OpenCommunity,yaniv14/OpenCommunity,nonZero/OpenCommunity,nonZero/OpenCommunity,nonZero/OpenCommunity,yaniv14/OpenCommunity,hasadna/OpenCommunity,nonZero/OpenCommunity | src/ocd/formats/he/formats.py | src/ocd/formats/he/formats.py | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j בF Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j בF Y, H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j בF'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
DATETIME_INPUT_FORMATS = (
'%Y-%m-%dT%H:%M',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
| # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j בF Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j בF Y, H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j בF'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
| bsd-3-clause | Python |
f72bc39e42288a51246900b7e4e4f7ef87ba2369 | Fix typo | giumas/python-acoustics,antiface/python-acoustics,FRidh/python-acoustics,python-acoustics/python-acoustics,felipeacsi/python-acoustics | unittest/test_atmosphere.py | unittest/test_atmosphere.py | import unittest
from acoustics.atmosphere import Atmosphere
class AtmosphereCase(unittest.TestCase):
def test_standard_atmosphere(self):
a = Atmosphere()
self.assertEqual(a.temperature, 293.15)
self.assertEqual(a.pressure, 101.325)
self.assertEqual(a.relative_humidity, 0.0)
self.assertEqual(a.soundspeed, 343.2)
if __name__ == '__main__':
unittest.main()
| import unittest
from acoustics.atmosphere import atmosphere
class AtmosphereCase(unittest.TestCase):
def test_standard_atmosphere(self):
a = Atmosphere()
self.assertEqual(a.temperature, 293.15)
self.assertEqual(a.pressure, 101.325)
self.assertEqual(a.relative_humidity, 0.0)
self.assertEqual(a.soundspeed, 343.2)
if __name__ == '__main__':
unittest.main() | bsd-3-clause | Python |
42d9d9a6663347ff4bb9ad3c649140e09396ec98 | Add refresh_token method | devicehive/devicehive-python | devicehive/handler.py | devicehive/handler.py | class Handler(object):
"""Handler class."""
def __init__(self, transport, token, options):
self._transport = transport
self._token = token
self.options = options
def refresh_token(self):
self._token.refresh()
return self._token.access_token()
def handle_connected(self):
raise NotImplementedError
def handle_event(self, event):
raise NotImplementedError
| class Handler(object):
"""Handler class."""
def __init__(self, transport, token, options):
self._transport = transport
self._token = token
self.options = options
def handle_connected(self):
raise NotImplementedError
def handle_event(self, event):
raise NotImplementedError
| apache-2.0 | Python |
582e3ce5a7b306b4a65ffd8ec19f016e0ccbc83d | Update ipc_lista1.6.py | any1m1c/ipc20161 | lista1/ipc_lista1.6.py | lista1/ipc_lista1.6.py | #ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule
| #ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo,
| apache-2.0 | Python |
3ab2520a23e59dfa52e6b027b6ec15c8b93d3bae | Update ipc_lista1.8.py | any1m1c/ipc20161 | lista1/ipc_lista1.8.py | lista1/ipc_lista1.8.py | #ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhdas
| #ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas
| apache-2.0 | Python |
18a027b97bffd83d5202a2f069e79a559b5ac73a | Update ipc_lista1.8.py | any1m1c/ipc20161 | lista1/ipc_lista1.8.py | lista1/ipc_lista1.8.py | #ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
#Calcule e mostre o total do seu salário no referido mês.
QntHora = input("Entre com o valor de seu rendimento por hora:
| #ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
#Calcule e mostre o total do seu salário no referido mês.
QntHora = input("Entre com o valor de seu rendimento por hora
| apache-2.0 | Python |
5f2e53af756b9759a76c46f3ff0fe003a268133d | Bump base package requirement (#8572) | DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core | linkerd/setup.py | linkerd/setup.py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "linkerd", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=16.4.0'
setup(
name='datadog-linkerd',
version=ABOUT["__version__"],
description='The Linkerd check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent linkerd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.linkerd'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "linkerd", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=16.2.0'
setup(
name='datadog-linkerd',
version=ABOUT["__version__"],
description='The Linkerd check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent linkerd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.linkerd'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| bsd-3-clause | Python |
338a6edd46c1bab0065b19f4c82b07ecb6c75ccc | change list of files, into json | dave-lab41/pelops,d-grossman/pelops,d-grossman/pelops,dave-lab41/pelops,Lab41/pelops,Lab41/pelops | etl/veriFileList2Json.py | etl/veriFileList2Json.py | import json
import sys
# turn the list of files into json for working with
def main():
inFileName = sys.argv[1]
outFileName = '{0}.json'.format(inFileName)
inFile = open(inFileName, 'r')
outFile = open(outFileName, 'w')
for line in inFile:
d = dict()
line = line.strip()
attrs = line.split('_')
d['imageName'] = line
d['vehicleID'] = attrs[0]
d['cameraID'] = attrs[1]
d['colorID'] = str(-1)
d['typeID'] = str(-1)
outFile.write(json.dumps(d)+'\n')
inFile.close()
if __name__ == '__main__':
main()
| import json
import sys
# turn the list of files into json for working with
def main():
inFileName = sys.argv[1]
outFileName = '{0}.json'.format(inFileName)
inFile = open(inFileName, 'r')
outFile = open(outFileName, 'w')
for line in inFile:
d = dict()
line = line.strip()
attrs = line.split('_')
d['imageName'] = line
d['vehicleID'] = attrs[0]
d['cameraID'] = attrs[1]
d['colorID'] = str(-1)
d['typeID'] = str(-1)
outFile.write(d+'\n')
inFile.close()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
93ed84050b9fbbe14f3d0d61f5dcdc4c22eedc3c | Update logserver.py | bengjerstad/multiuselogserver,bengjerstad/multiuselogserver,bengjerstad/multiuselogserver | logserver/logserver.py | logserver/logserver.py | import hug
import json
import sqlite3
import pandas as pd
conn = sqlite3.connect('check.db')
c = conn.cursor()
@hug.directive()
def cors(support='*', response=None, **kwargs):
'''Returns passed in parameter multiplied by itself'''
response and response.set_header('Access-Control-Allow-Origin', support)
@hug.get(examples='')
@hug.local()
#just to Test exporting of the dbfile
def load(hug_cors):
logs = {}
dbkeys = ['title','time','stat','hash']
sqlcmd = "SELECT * FROM ConnectLog WHERE 1"
dbout = c.execute(sqlcmd)
dbout = dbout.fetchall()
for idx,row in enumerate(dbout):
logs[idx] = dict(zip(dbkeys,row))
return logs
@hug.get(examples='')
@hug.local()
def connection(hug_cors):
logs = {}
dbkeys = ['title','stat']
sqlcmd = "SELECT `title`,`stat` FROM ConnectLog WHERE 1"
dbout = c.execute(sqlcmd)
dbout = dbout.fetchall()
titles = {x[0] for x in dbout}
for thistitle in titles:
laststat = "200"
thisstat = "Good"
for idx,row in enumerate(dbout):
if(thistitle == row[0]):
if(row[1] != laststat):
#print(thistitle,laststat,row[1])
laststat = row[1]
if(row[1] == str(200)):
thisstat = "Yield"
if(row[1] != str(200)):
thisstat = "Bad"
logs[thistitle] = thisstat
return logs
| import hug
import json
import sqlite3
import pandas as pd
conn = sqlite3.connect('check.db')
c = conn.cursor()
@hug.directive()
def cors(support='*', response=None, **kwargs):
'''Returns passed in parameter multiplied by itself'''
response and response.set_header('Access-Control-Allow-Origin', support)
@hug.get(examples='')
@hug.local()
#just to Test exporting of the dbfile
def load(hug_cors):
logs = {}
dbkeys = ['title','time','stat','hash']
sqlcmd = "SELECT * FROM ConnectLog WHERE 1"
dbout = c.execute(sqlcmd)
dbout = dbout.fetchall()
for idx,row in enumerate(dbout):
logs[idx] = dict(zip(dbkeys,row))
return logs
@hug.get(examples='')
@hug.local()
def connection(hug_cors):
logs = {}
dbkeys = ['title','stat']
sqlcmd = "SELECT `title`,`stat` FROM ConnectLog WHERE 1"
dbout = c.execute(sqlcmd)
dbout = dbout.fetchall()
titles = {x[0] for x in dbout}
for thistitle in titles:
laststat = "200"
thisstat = "Good"
for idx,row in enumerate(dbout):
if(thistitle == row[0]):
if(row[1] != laststat):
print(row[1],laststat)
laststat = row[1]
if(row[1] == 200):
thisstat = "Yield"
if(row[1] != 200):
thisstat = "Bad"
logs[thistitle] = thisstat
return logs
| mit | Python |
b87d811f3e48928b5a41e5b2e0e52fceee2c75b4 | Update version 0.9.2 -> 0.9.3 | dwavesystems/dimod,dwavesystems/dimod | dimod/package_info.py | dimod/package_info.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
__version__ = '0.9.3'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
__version__ = '0.9.2'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| apache-2.0 | Python |
f86ae2d132d6c6cb83e0e8afd65abed474cc5dea | Update version 0.7.8 -> 0.7.9 | dwavesystems/dimod,dwavesystems/dimod | dimod/package_info.py | dimod/package_info.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
__version__ = '0.7.9'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
__version__ = '0.7.8'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| apache-2.0 | Python |
521b4fbec142306fad2347a5dd3a56aeec2f9498 | Remove deleted places from place index | aapris/linkedevents,aapris/linkedevents,tuomas777/linkedevents,City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents,tuomas777/linkedevents,City-of-Helsinki/linkedevents,tuomas777/linkedevents,aapris/linkedevents | events/search_indexes.py | events/search_indexes.py | from haystack import indexes
from .models import Event, Place, PublicationStatus
from django.utils.html import strip_tags
class EventIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
start_time = indexes.DateTimeField(model_attr='start_time')
end_time = indexes.DateTimeField(model_attr='end_time')
def get_updated_field(self):
return 'last_modified_time'
def get_model(self):
return Event
def prepare(self, obj):
#obj.lang_keywords = obj.keywords.filter(language=get_language())
if obj.description:
obj.description = strip_tags(obj.description)
return super(EventIndex, self).prepare(obj)
def index_queryset(self, using=None):
return self.get_model().objects.filter(publication_status=PublicationStatus.PUBLIC)
class PlaceIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
def get_updated_field(self):
return 'last_modified_time'
def get_model(self):
return Place
def index_queryset(self, using=None):
return self.get_model().objects.filter(deleted=False)
| from haystack import indexes
from .models import Event, Place, PublicationStatus
from django.utils.html import strip_tags
class EventIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
start_time = indexes.DateTimeField(model_attr='start_time')
end_time = indexes.DateTimeField(model_attr='end_time')
def get_updated_field(self):
return 'last_modified_time'
def get_model(self):
return Event
def prepare(self, obj):
#obj.lang_keywords = obj.keywords.filter(language=get_language())
if obj.description:
obj.description = strip_tags(obj.description)
return super(EventIndex, self).prepare(obj)
def index_queryset(self, using=None):
return self.get_model().objects.filter(publication_status=PublicationStatus.PUBLIC)
class PlaceIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
def get_updated_field(self):
return 'last_modified_time'
def get_model(self):
return Place
| mit | Python |
9beab775c299117a7b1b68904c437b0b9a52c17a | Add module level docstring explaining when _pmf functions are useful. | dit/dit,dit/dit,dit/dit,chebee7i/dit,Autoplectic/dit,dit/dit,chebee7i/dit,Autoplectic/dit,chebee7i/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,chebee7i/dit,Autoplectic/dit | dit/divergences/kl.py | dit/divergences/kl.py | """
These are special interest implementations that should be used only in
very particular situations.
cross_entropy_pmf
relative_entropy_pmf
DKL_pmf
These functions should be used only if the pmfs that are passed in have
the same exact length and correspond to the same outcome probabilities.
They also assume linear distributed probabilities. The expected use case
is when one is working with a family of distributions (or pmfs) all of
which have the same exact sample space. For example, normally distributed
pmfs can be generated as: dit.math.norm([.5, .5], size=5). You can
pass those distributions to sklearn.metrics.pairwise_distance with
metric=DKL_pmf.
cross_entropy
relative_entropy
DKL
These functions should be used only if the sample spaces of the passed in
distributions are identical (so both the same size and the same order).
The two distributions can have pmfs in different bases.
"""
import dit
import numpy as np
def cross_entropy_pmf(p, q=None):
"""
Calculates the cross entropy from probability mass functions `p` and `q`.
If `q` is None, then it is set to be `p`.
Then the entropy of `p` is calculated.
Assumption: Linearly distributed probabilities.
"""
if q is None:
q = p
p = np.asarray(p)
q = np.asarray(q)
return -np.nansum(p * np.log2(q))
entropy_pmf = cross_entropy_pmf
def relative_entropy_pmf(p, q):
"""
Calculates the relative entropy (or Kullback-Leibler divergence).
Assumption: Linearly distributed probabilities.
.. math::
D_{KL}(p || q)
"""
return cross_entropy_pmf(p, q) - cross_entropy_pmf(p, p)
DKL_pmf = relative_entropy_pmf
def cross_entropy(d1, d2, pmf_only=True):
"""
Returns H(d1, d2)
"""
if pmf_only:
mode = 'asis'
else:
mode = 'dense'
pmf1 = dit.copypmf(d1, base='linear', mode=mode)
pmf2 = dit.copypmf(d2, base='linear', mode=mode)
return -np.nansum(pmf1 * np.log2(pmf2))
def relative_entropy(d1, d2):
ce = cross_entropy(d1, d2, pmf_only=False)
return ce - dit.shannon.entropy(d1)
DKL = relative_entropy
| import dit
import numpy as np
def cross_entropy_pmf(p, q=None):
"""
Calculates the cross entropy from probability mass functions `p` and `q`.
If `q` is None, then it is set to be `p`.
Then the entropy of `p` is calculated.
Assumption: Linearly distributed probabilities.
"""
if q is None:
q = p
p = np.asarray(p)
q = np.asarray(q)
return -np.nansum(p * np.log2(q))
entropy_pmf = cross_entropy_pmf
def relative_entropy_pmf(p, q):
"""
Calculates the relative entropy (or Kullback-Leibler divergence).
Assumption: Linearly distributed probabilities.
.. math::
D_{KL}(p || q)
"""
return cross_entropy_pmf(p, q) - cross_entropy_pmf(p, p)
DKL_pmf = relative_entropy_pmf
def cross_entropy(d1, d2, pmf_only=True):
"""
Returns H(d1, d2)
"""
if pmf_only:
mode = 'asis'
else:
mode = 'dense'
pmf1 = dit.copypmf(d1, base='linear', mode=mode)
pmf2 = dit.copypmf(d2, base='linear', mode=mode)
return -np.nansum(pmf1 * np.log2(pmf2))
def relative_entropy(d1, d2):
ce = cross_entropy(d1, d2, pmf_only=False)
return ce - dit.shannon.entropy(d1)
DKL = relative_entropy
| bsd-3-clause | Python |
c65b6adafcdf791030090a72f4490171012ce4fd | Use a buggy pox module | ucb-sts/sts,jmiserez/sts,jmiserez/sts,ucb-sts/sts | config/fuzz_pox_simple.py | config/fuzz_pox_simple.py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
# Use POX as our controller
start_cmd = ('''./pox.py samples.buggy '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
invariant_check_name="InvariantChecker.check_liveness",
check_interval=5,
halt_on_violation=True)
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
# Use POX as our controller
start_cmd = ('''./pox.py openflow.discovery forwarding.l2_multi '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
invariant_check_name="InvariantChecker.check_liveness",
halt_on_violation=True)
| apache-2.0 | Python |
23bba70700add35015aeeb3ff0bcee425e9cfd3f | Update Curso.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Curso/Curso.py | backend/Models/Curso/Curso.py | class Curso(object):
def __init__(self,curso):
self.id = curso.getId()
self.nome = curso.getNome()
self.id_campus = curso.getId_campus()
self.id_grau = curso.getId_grau()
self.codigo = curso.getCodigo()
self.permanencia_minima = curso.getPermanencia_minima()
self.permanencia_maxima = curso.getPermanencia_maxima()
self.creditos_formatura = curso.getCreditos_formatura()
self.creditos_optativos_concentracao = curso.getCreditos_optativos_concentracao()
self.creditos_optativos_conexa = curso.getCreditos_optativos_conexa()
self.creditos_livres_maximo = curso.getCreditos_livres_maximo()
self.mec = curso.getMec()
| class Curso(object):
def __init__(self,curso):
self.id = curso.getId()
self.nome = curso.getNome()
self.id_campus = curso.getId_campus()
self.id_grau = curso.getId_grau()
self.codigo = curso.getCodigo()
self.permanencia_minima = curso.getPermanencia_minima()
self.permanencia_maxima = curso.getPermanencia_maxima()
self.creditos_formatura = curso.getCreditos_formatura()
self.creditos_optativos_concentracao = curso.getCreditos_optativos_concentracao()
self.creditos_optativos_conexa = curso.getCreditos_optativos_conexa()
self.creditos_livres_maximo = curso.getCreditos_livres_maximo()
| mit | Python |
84f4626a623283c3c4d98d9be0ccd69fe837f772 | Update download URL and add more output to downloader. | lucasb-eyer/BiternionNet | download_data.py | download_data.py | #!/usr/bin/env python
from lbtoolbox.download import download
import os
import inspect
import tarfile
def here(f):
me = inspect.getsourcefile(here)
return os.path.join(os.path.dirname(os.path.abspath(me)), f)
def download_extract(urlbase, name, into):
print("Downloading " + name)
fname = download(os.path.join(urlbase, name), into)
print("Extracting...")
with tarfile.open(fname) as f:
f.extractall(path=into)
if __name__ == '__main__':
baseurl = 'https://omnomnom.vision.rwth-aachen.de/data/BiternionNets/'
datadir = here('data')
# First, download the Tosato datasets.
download_extract(baseurl, 'CAVIARShoppingCenterFullOccl.tar.bz2', into=datadir)
download_extract(baseurl, 'CAVIARShoppingCenterFull.tar.bz2', into=datadir)
download_extract(baseurl, 'HIIT6HeadPose.tar.bz2', into=datadir)
download_extract(baseurl, 'HOC.tar.bz2', into=datadir)
download_extract(baseurl, 'HOCoffee.tar.bz2', into=datadir)
download_extract(baseurl, 'IHDPHeadPose.tar.bz2', into=datadir)
download_extract(baseurl, 'QMULPoseHeads.tar.bz2', into=datadir)
print("Done.")
| #!/usr/bin/env python
from lbtoolbox.download import download
import os
import inspect
import tarfile
def here(f):
me = inspect.getsourcefile(here)
return os.path.join(os.path.dirname(os.path.abspath(me)), f)
def download_extract(url, into):
fname = download(url, into)
print("Extracting...")
with tarfile.open(fname) as f:
f.extractall(path=into)
if __name__ == '__main__':
baseurl = 'https://omnomnom.vision.rwth-aachen.de/data/tosato/'
datadir = here('data')
# First, download the Tosato datasets.
download_extract(baseurl + 'CAVIARShoppingCenterFullOccl.tar.bz2', into=datadir)
download_extract(baseurl + 'CAVIARShoppingCenterFull.tar.bz2', into=datadir)
download_extract(baseurl + 'HIIT6HeadPose.tar.bz2', into=datadir)
download_extract(baseurl + 'HOC.tar.bz2', into=datadir)
download_extract(baseurl + 'HOCoffee.tar.bz2', into=datadir)
download_extract(baseurl + 'IHDPHeadPose.tar.bz2', into=datadir)
download_extract(baseurl + 'QMULPoseHeads.tar.bz2', into=datadir)
| mit | Python |
c938a280f9b976031635cc0e96371960640acdc5 | Update version to reflect changes | whowutwut/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,michaelfardu/thinkconfluent,jufm/confluent,jjohnson42/confluent,michaelfardu/thinkconfluent,xcat2/confluent,chenglch/confluent,chenglch/confluent,jjohnson42/confluent,michaelfardu/thinkconfluent,chenglch/confluent,xcat2/confluent,jufm/confluent,xcat2/confluent,michaelfardu/thinkconfluent,chenglch/confluent,whowutwut/confluent,jjohnson42/confluent,whowutwut/confluent,jjohnson42/confluent,chenglch/confluent,jufm/confluent,michaelfardu/thinkconfluent,jufm/confluent,whowutwut/confluent,jufm/confluent | confluent_server/setup.py | confluent_server/setup.py | from setuptools import setup
setup(
name='confluent_server',
version='0.1.9',
author='Jarrod Johnson',
author_email='jbjohnso@us.ibm.com',
url='http://xcat.sf.net/',
description='confluent systems management server',
packages=['confluent', 'confluent/config', 'confluent/interface',
'confluent/plugins/hardwaremanagement/',
'confluent/plugins/configuration/'],
install_requires=['pycrypto>=2.6', 'confluent_client>=0.1.0', 'eventlet',
'pyghmi>=0.6.5'],
scripts=['bin/confluent'],
)
| from setuptools import setup
setup(
name='confluent_server',
version='0.1.8',
author='Jarrod Johnson',
author_email='jbjohnso@us.ibm.com',
url='http://xcat.sf.net/',
description='confluent systems management server',
packages=['confluent', 'confluent/config', 'confluent/interface',
'confluent/plugins/hardwaremanagement/',
'confluent/plugins/configuration/'],
install_requires=['pycrypto>=2.6', 'confluent_client>=0.1.0', 'eventlet',
'pyghmi>=0.6.5'],
scripts=['bin/confluent'],
)
| apache-2.0 | Python |
0d47c8ffb4182c1a2455d3e2e834626fcb7e727b | Fix flake8 errors | emencia/dr-dump | drdump/drdump.py | drdump/drdump.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import pkg_resources
class ApplicationsList(object):
def __init__(self, apps):
self._apps = apps
def __iter__(self):
return iter(self._apps)
@classmethod
def from_packages(cls, extra_apps=()):
packages = [d for d in pkg_resources.working_set]
apps = {p.key for p in packages}
apps.update(extra_apps)
return cls(apps)
class Drdump(object):
def __init__(self, dependencies, names, exclude_apps=None, dump_other_apps=True):
self.dependencies = dependencies
self.names = names
self.exclude_apps = list(exclude_apps) if exclude_apps else []
self.dump_other_apps = dump_other_apps
def __call__(self, output_codec):
with output_codec as output:
for name, context in self:
output(name, context)
def __iter__(self):
"""
Build source from global and item templates
"""
exclude_models = set(self.exclude_apps)
for name, item in self.dependencies.get_dump_order(self.names):
yield name, item
exclude_models.update(item['models'])
if self.dump_other_apps:
yield 'other_apps', {
'exclude_models': list(exclude_models),
'use_natural_key': True
}
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import pkg_resources
class ApplicationsList(object):
def __init__(self, apps):
self._apps = apps
def __iter__(self):
return iter(self._apps)
@classmethod
def from_packages(cls, extra_apps=()):
packages = [d for d in pkg_resources.working_set]
apps = {p.key for p in packages}
apps.update(extra_apps)
return cls(apps)
class Drdump(object):
def __init__(self, dependencies, names, exclude_apps=None, dump_other_apps=True):
self.dependencies = dependencies
self.names = names
self.exclude_apps = list(exclude_apps) if exclude_apps else []
self.dump_other_apps = dump_other_apps
def __call__(self, output_codec):
with output_codec as output:
for name, context in self:
output(name, context)
def __iter__(self):
"""
Build source from global and item templates
"""
exclude_models = set(self.exclude_apps)
for name, item in self.dependencies.get_dump_order(self.names):
yield name, item
exclude_models.update(item['models'])
if self.dump_other_apps:
yield 'other_apps', {
'exclude_models': list(exclude_models),
'use_natural_key': True
}
| mit | Python |
3878ebcd4c5584fa205bd9984e90db416e6c2ada | Add default for type field | bashu/django-airports | airports/models.py | airports/models.py | # -*- coding: utf-8 -*-
from django.contrib.gis.db import models
try:
from django.utils.encoding import force_unicode as force_text
except (NameError, ImportError):
from django.utils.encoding import force_text
from django.utils.encoding import python_2_unicode_compatible
from django.core.validators import MinLengthValidator
from django.utils.translation import gettext_lazy as _
from .conf import DJANGO_VERSION
if DJANGO_VERSION < 2:
from django.contrib.gis.db.models import GeoManager
else:
from django.db.models import Manager as GeoManager
@python_2_unicode_compatible
class Airport(models.Model):
airport_id = models.PositiveIntegerField(primary_key=True, editable=False)
name = models.CharField(_("name"), max_length=100)
city_name = models.CharField(_("name"), null=True, blank=True, max_length=100)
iata = models.CharField(_("IATA/FAA code"), null=True, blank=True, max_length=3,
validators=[MinLengthValidator(3)])
icao = models.CharField(_("ICAO code"), null=True, blank=True, max_length=4,
validators=[MinLengthValidator(4)])
local = models.CharField(_("Local code"), null=True, blank=True, max_length=12)
ident = models.CharField(_("Ident code"), null=True, blank=True, max_length=12)
altitude = models.FloatField(_("altitude"), default=0)
location = models.PointField(_("location"))
country = models.ForeignKey('cities.Country', on_delete=models.DO_NOTHING, null=True)
region = models.ForeignKey('cities.Region', on_delete=models.DO_NOTHING, null=True)
city = models.ForeignKey('cities.City', on_delete=models.DO_NOTHING, null=True)
type = models.CharField(max_length=16, default='')
objects = GeoManager()
class Meta: # pylint: disable=C1001
ordering = ['airport_id']
def __str__(self):
return force_text(self.name)
| # -*- coding: utf-8 -*-
from django.contrib.gis.db import models
try:
from django.utils.encoding import force_unicode as force_text
except (NameError, ImportError):
from django.utils.encoding import force_text
from django.utils.encoding import python_2_unicode_compatible
from django.core.validators import MinLengthValidator
from django.utils.translation import gettext_lazy as _
from .conf import DJANGO_VERSION
if DJANGO_VERSION < 2:
from django.contrib.gis.db.models import GeoManager
else:
from django.db.models import Manager as GeoManager
@python_2_unicode_compatible
class Airport(models.Model):
airport_id = models.PositiveIntegerField(primary_key=True, editable=False)
name = models.CharField(_("name"), max_length=100)
city_name = models.CharField(_("name"), null=True, blank=True, max_length=100)
iata = models.CharField(_("IATA/FAA code"), null=True, blank=True, max_length=3,
validators=[MinLengthValidator(3)])
icao = models.CharField(_("ICAO code"), null=True, blank=True, max_length=4,
validators=[MinLengthValidator(4)])
local = models.CharField(_("Local code"), null=True, blank=True, max_length=12)
ident = models.CharField(_("Ident code"), null=True, blank=True, max_length=12)
altitude = models.FloatField(_("altitude"), default=0)
location = models.PointField(_("location"))
country = models.ForeignKey('cities.Country', on_delete=models.DO_NOTHING, null=True)
region = models.ForeignKey('cities.Region', on_delete=models.DO_NOTHING, null=True)
city = models.ForeignKey('cities.City', on_delete=models.DO_NOTHING, null=True)
type = models.CharField(max_length=16)
objects = GeoManager()
class Meta: # pylint: disable=C1001
ordering = ['airport_id']
def __str__(self):
return force_text(self.name)
| mit | Python |
c94c86df52184af6b07dcf58951688cea178b8e6 | Make lua autoconfig work better. | DMOJ/judge,DMOJ/judge,DMOJ/judge | dmoj/executors/LUA.py | dmoj/executors/LUA.py | from .base_executor import ScriptExecutor
class Executor(ScriptExecutor):
ext = '.lua'
name = 'LUA'
command = 'lua'
command_paths = ['lua', 'lua5.3', 'lua5.2', 'lua5.1']
address_grace = 131072
test_program = "io.write(io.read('*all'))"
@classmethod
def get_version_flags(cls, command):
return ['-v']
| from .base_executor import ScriptExecutor
class Executor(ScriptExecutor):
ext = '.lua'
name = 'LUA'
command = 'lua'
address_grace = 131072
test_program = "io.write(io.read('*all'))"
@classmethod
def get_version_flags(cls, command):
return ['-v']
| agpl-3.0 | Python |
3c35369a5a7b67e934d59c321439e3d3e5495970 | Fix dependency | snare/scruffy | example/duckman/setup.py | example/duckman/setup.py | import sys
from setuptools import setup
setup(
name = "duckman",
version = "0.1",
author = "snare",
author_email = "snare@ho.ax",
description = ("Ya thrust yer pelvis HUAGHH"),
license = "Buy snare a beer",
keywords = "duckman",
url = "https://github.com/snare/scruffy",
packages=['duckman'],
entry_points = {
'console_scripts': ['duckman = duckman:main']
},
install_requires = ['scruffington']
)
| import sys
from setuptools import setup
setup(
name = "duckman",
version = "0.1",
author = "snare",
author_email = "snare@ho.ax",
description = ("Ya thrust yer pelvis HUAGHH"),
license = "Buy snare a beer",
keywords = "duckman",
url = "https://github.com/snare/scruffy",
packages=['duckman'],
entry_points = {
'console_scripts': ['duckman = duckman:main']
},
install_requires = ['scruffy'],
dependency_links = ["https://github.com/snarez/scruffy/tarball/master#egg=scruffy"]
)
| mit | Python |
47e0a3e3822253c5dfbd55d98c8235e28b8c5419 | Update __init__.py | stuaxo/vext | vext/__init__.py | vext/__init__.py | import logging
from os import environ
from os.path import join
from distutils.sysconfig import get_python_lib
if environ.get("VEXT_DEBUG_LOG", "0") == "1":
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
vext_pth = join(get_python_lib(), 'vext_importer.pth')
logger = logging.getLogger("vext")
def install_importer():
logger.debug("install_importer has been moved to gatekeeper module")
from vext import gatekeeper
gatekeeper.install_importer()
| import logging
from os import environ
from os.path import join
from distutils.sysconfig import get_python_lib
if "VEXT_DEBUG_LOG" in environ:
logging.basicConfig(level=logging.DEBUG)
vext_pth = join(get_python_lib(), 'vext_importer.pth')
logger = logging.getLogger("vext")
def install_importer():
logger.debug("install_importer has been moved to gatekeeper module")
from vext import gatekeeper
gatekeeper.install_importer() | mit | Python |
83429a5abcecc9a750a9b376b2ee86922a3861e4 | refactor is_conda placement | efiop/dvc,dmpetrov/dataversioncontrol,dmpetrov/dataversioncontrol,efiop/dvc | dvc/utils/pkg.py | dvc/utils/pkg.py | from dvc.utils import is_binary
def is_conda():
try:
from .build import PKG # patched during conda package build
return PKG == "conda"
except ImportError:
return False
def get_linux():
import distro
if not is_binary():
return "pip"
package_managers = {
"rhel": "yum",
"centos": "yum",
"fedora": "yum",
"amazon": "yum",
"opensuse": "yum",
"ubuntu": "apt",
"debian": "apt",
}
return package_managers.get(distro.id())
def get_darwin():
if not is_binary():
if __file__.startswith("/usr/local/Cellar"):
return "formula"
else:
return "pip"
return None
def get_windows():
return None if is_binary() else "pip"
def get_package_manager():
import platform
from dvc.exceptions import DvcException
if is_conda():
return "conda"
m = {
"Windows": get_windows(),
"Darwin": get_darwin(),
"Linux": get_linux(),
}
system = platform.system()
func = m.get(system)
if func is None:
raise DvcException("not supported system '{}'".format(system))
return func
| from dvc.utils import is_binary
def is_conda():
try:
from .build import PKG # patched during conda package build
return PKG == "conda"
except ImportError:
return False
def get_linux():
import distro
if is_conda():
return "conda"
if not is_binary():
return "pip"
package_managers = {
"rhel": "yum",
"centos": "yum",
"fedora": "yum",
"amazon": "yum",
"opensuse": "yum",
"ubuntu": "apt",
"debian": "apt",
}
return package_managers.get(distro.id())
def get_darwin():
if is_conda():
return "conda"
if not is_binary():
if __file__.startswith("/usr/local/Cellar"):
return "formula"
else:
return "pip"
return None
def get_windows():
if is_conda():
return "conda"
return None if is_binary() else "pip"
def get_package_manager():
import platform
from dvc.exceptions import DvcException
m = {
"Windows": get_windows(),
"Darwin": get_darwin(),
"Linux": get_linux(),
}
system = platform.system()
func = m.get(system)
if func is None:
raise DvcException("not supported system '{}'".format(system))
return func
| apache-2.0 | Python |
e9f2cda8cb0019a3b41226c746e51b2dff5d1518 | bump up the version to 0.1.4 | tyrchen/vint | vint/__init__.py | vint/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'vint'
__version__ = '0.1.4'
__author__ = 'Tyr Chen'
__email__ = 'tyr.chen@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013 Tyr Chen'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'vint'
__version__ = '0.1.3'
__author__ = 'Tyr Chen'
__email__ = 'tyr.chen@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013 Tyr Chen'
| mit | Python |
7cef87a81278c227db0cb07329d1b659dbd175b3 | Use standard library instead of django.utils.importlib | novafloss/django-mail-factory,novafloss/django-mail-factory | mail_factory/models.py | mail_factory/models.py | # -*- coding: utf-8 -*-
import django
from django.conf import settings
from django.utils.module_loading import module_has_submodule
try:
from importlib import import_module
except ImportError:
# Compatibility for python-2.6
from django.utils.importlib import import_module
def autodiscover():
"""Auto-discover INSTALLED_APPS mails.py modules."""
for app in settings.INSTALLED_APPS:
module = '%s.mails' % app # Attempt to import the app's 'mails' module
try:
import_module(module)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have a mails module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
app_module = import_module(app)
if module_has_submodule(app_module, 'mails'):
raise
# If we're using Django >= 1.7, use the new app-loading mecanism which is way
# better.
if django.VERSION < (1, 7):
autodiscover()
| # -*- coding: utf-8 -*-
import django
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
def autodiscover():
"""Auto-discover INSTALLED_APPS mails.py modules."""
for app in settings.INSTALLED_APPS:
module = '%s.mails' % app # Attempt to import the app's 'mails' module
try:
import_module(module)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have a mails module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
app_module = import_module(app)
if module_has_submodule(app_module, 'mails'):
raise
# If we're using Django >= 1.7, use the new app-loading mecanism which is way
# better.
if django.VERSION < (1, 7):
autodiscover()
| bsd-3-clause | Python |
3aaca776a2b3292168594e4734c23336f28ddb88 | fix tagged_object_list in middleware | wlanslovenija/cmsplugin-blog,divio/cmsplugin-blog,divio/cmsplugin-blog,divio/cmsplugin-blog,wlanslovenija/cmsplugin-blog,wlanslovenija/cmsplugin-blog,wlanslovenija/cmsplugin-blog | cmsplugin_blog/middleware.py | cmsplugin_blog/middleware.py | from simple_translation.middleware import MultilingualGenericsMiddleware, filter_queryset_language
from cmsplugin_blog.models import Entry
class MultilingualBlogEntriesMiddleware(MultilingualGenericsMiddleware):
language_fallback_middlewares = [
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware'
]
def process_view(self, request, view_func, view_args, view_kwargs):
super(MultilingualGenericsMiddleware, self).process_response(request, view_func, view_args, view_kwargs)
if 'queryset_or_model' in view_kwargs and hasattr(view_kwargs['queryset_or_model'], 'model'):
view_kwargs['queryset_or_model'] = filter_queryset_language(request, view_kwargs['queryset_or_model']) | from simple_translation.middleware import MultilingualGenericsMiddleware
from cmsplugin_blog.models import Entry
class MultilingualBlogEntriesMiddleware(MultilingualGenericsMiddleware):
language_fallback_middlewares = [
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware'
] | bsd-3-clause | Python |
bd8698e733e9cfc99040f3c9ecc217525303432d | make middleware work with cbv | divio/cmsplugin-blog,wlanslovenija/cmsplugin-blog,wlanslovenija/cmsplugin-blog,divio/cmsplugin-blog,wlanslovenija/cmsplugin-blog,wlanslovenija/cmsplugin-blog,divio/cmsplugin-blog | cmsplugin_blog/middleware.py | cmsplugin_blog/middleware.py | from simple_translation.middleware import MultilingualGenericsMiddleware
from cmsplugin_blog.models import Entry
class MultilingualBlogEntriesMiddleware(MultilingualGenericsMiddleware):
language_fallback_middlewares = [
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware'
]
def process_view(self, request, view_func, view_args, view_kwargs):
if 'queryset' in view_kwargs or hasattr(view_func, 'queryset'):
queryset = getattr(view_func, 'queryset', view_kwargs['queryset'])
model = queryset.model
if model == Entry:
super(MultilingualBlogEntriesMiddleware, self).process_view(
request, view_func, view_args, view_kwargs)
if 'queryset_or_model' in view_kwargs and getattr(view_kwargs['queryset_or_model'], 'model', None) == Entry:
view_kwargs['queryset'] = view_kwargs['queryset_or_model']
super(MultilingualBlogEntriesMiddleware, self).process_view(
request, view_func, view_args, view_kwargs)
view_kwargs['queryset_or_model'] = view_kwargs['queryset']
del view_kwargs['queryset']
| from simple_translation.middleware import MultilingualGenericsMiddleware
from cmsplugin_blog.models import Entry
class MultilingualBlogEntriesMiddleware(MultilingualGenericsMiddleware):
language_fallback_middlewares = [
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware'
]
def process_view(self, request, view_func, view_args, view_kwargs):
if 'queryset' in view_kwargs and view_kwargs['queryset'].model == Entry:
super(MultilingualBlogEntriesMiddleware, self).process_view(
request, view_func, view_args, view_kwargs)
if 'queryset_or_model' in view_kwargs and getattr(view_kwargs['queryset_or_model'], 'model', None) == Entry:
view_kwargs['queryset'] = view_kwargs['queryset_or_model']
super(MultilingualBlogEntriesMiddleware, self).process_view(
request, view_func, view_args, view_kwargs)
view_kwargs['queryset_or_model'] = view_kwargs['queryset']
del view_kwargs['queryset']
| bsd-3-clause | Python |
48008e92751f21ef210f3df18a9e239c38767689 | Use the new name for `raw_input` in Python 3. Close #82. | erikrose/nose-progressive | noseprogressive/wrapping.py | noseprogressive/wrapping.py | """Facilities for wrapping stderr and stdout and dealing with the fallout"""
from __future__ import with_statement
import __builtin__
import cmd
import pdb
import sys
def cmdloop(self, *args, **kwargs):
"""Call pdb's cmdloop, making readline work.
Patch raw_input so it sees the original stdin and stdout, lest
readline refuse to work.
The C implementation of raw_input uses readline functionality only if
both stdin and stdout are from a terminal AND are FILE*s (not
PyObject*s): http://bugs.python.org/issue5727 and
https://bugzilla.redhat.com/show_bug.cgi?id=448864
"""
def unwrapping_raw_input(*args, **kwargs):
"""Call raw_input(), making sure it finds an unwrapped stdout."""
wrapped_stdout = sys.stdout
sys.stdout = wrapped_stdout.stream
ret = orig_raw_input(*args, **kwargs)
sys.stdout = wrapped_stdout
return ret
try:
orig_raw_input = raw_input
except NameError:
orig_raw_input = input
if hasattr(sys.stdout, 'stream'):
__builtin__.raw_input = unwrapping_raw_input
# else if capture plugin has replaced it with a StringIO, don't bother.
try:
# Interesting things happen when you try to not reference the
# superclass explicitly.
ret = cmd.Cmd.cmdloop(self, *args, **kwargs)
finally:
__builtin__.raw_input = orig_raw_input
return ret
def set_trace(*args, **kwargs):
"""Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output.
"""
# There's no stream attr if capture plugin is enabled:
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
# Python 2.5 can't put an explicit kwarg and **kwargs in the same function
# call.
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
# Ordinarily (and in a silly fashion), pdb refuses to use raw_input() if
# you pass it a stream on instantiation. Fix that:
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back)
class StreamWrapper(object):
"""Wrapper for stdout/stderr to do progress bar dodging"""
# An outer class so isinstance() works in begin()
def __init__(self, stream, plugin):
self.stream = stream
self._plugin = plugin
def __getattr__(self, name):
return getattr(self.stream, name)
def write(self, data):
if hasattr(self._plugin, 'bar'):
with self._plugin.bar.dodging():
self.stream.write(data)
else:
# Some things write to stderr before the bar is inited.
self.stream.write(data)
| """Facilities for wrapping stderr and stdout and dealing with the fallout"""
from __future__ import with_statement
import __builtin__
import cmd
import pdb
import sys
def cmdloop(self, *args, **kwargs):
"""Call pdb's cmdloop, making readline work.
Patch raw_input so it sees the original stdin and stdout, lest
readline refuse to work.
The C implementation of raw_input uses readline functionality only if
both stdin and stdout are from a terminal AND are FILE*s (not
PyObject*s): http://bugs.python.org/issue5727 and
https://bugzilla.redhat.com/show_bug.cgi?id=448864
"""
def unwrapping_raw_input(*args, **kwargs):
"""Call raw_input(), making sure it finds an unwrapped stdout."""
wrapped_stdout = sys.stdout
sys.stdout = wrapped_stdout.stream
ret = orig_raw_input(*args, **kwargs)
sys.stdout = wrapped_stdout
return ret
orig_raw_input = raw_input
if hasattr(sys.stdout, 'stream'):
__builtin__.raw_input = unwrapping_raw_input
# else if capture plugin has replaced it with a StringIO, don't bother.
try:
# Interesting things happen when you try to not reference the
# superclass explicitly.
ret = cmd.Cmd.cmdloop(self, *args, **kwargs)
finally:
__builtin__.raw_input = orig_raw_input
return ret
def set_trace(*args, **kwargs):
"""Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output.
"""
# There's no stream attr if capture plugin is enabled:
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
# Python 2.5 can't put an explicit kwarg and **kwargs in the same function
# call.
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
# Ordinarily (and in a silly fashion), pdb refuses to use raw_input() if
# you pass it a stream on instantiation. Fix that:
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back)
class StreamWrapper(object):
"""Wrapper for stdout/stderr to do progress bar dodging"""
# An outer class so isinstance() works in begin()
def __init__(self, stream, plugin):
self.stream = stream
self._plugin = plugin
def __getattr__(self, name):
return getattr(self.stream, name)
def write(self, data):
if hasattr(self._plugin, 'bar'):
with self._plugin.bar.dodging():
self.stream.write(data)
else:
# Some things write to stderr before the bar is inited.
self.stream.write(data)
| mit | Python |
b1dc3196ee12cb5a271fec17d92c2c4b6e977032 | use Glen's icons | ellson/graphviz-web-dynamic,ellson/graphviz-web-dynamic,ellson/graphviz-web-dynamic | ht2php.py | ht2php.py | #!/usr/bin/python
import sys
if len(sys.argv) < 2:
exit
pageset = sys.argv[1].split()
source = sys.argv[2]
basename = source.split('.')[0]
if len(basename.split('_')) > 1:
baseparent = basename.split('_')[0]
else:
baseparent = ''
if basename == 'Download':
fout = open(basename + '.php', 'w')
fin = open('Agree.ht', 'r')
fout.write(fin.read())
fin.close
fout = open(basename + '..php', 'w')
fout.write('<?php setcookie(clickthroughlicense); ?>\n')
else:
fout = open(basename + '.php', 'w')
fout.write('''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<!--
This is a generated document. Please edit "''' + basename + '''.ht" instead
and then type "make".
-->
<html>
<head>
<title>Graphviz</title>
</head>
<body bgcolor="white">
<table cellspacing="20">
<tr><td>
<!-- icon -->
<img src="icons/doc-''' + basename.lower() + '''.png" alt="">
</td><td>
<!-- header -->
<h2>Graphviz - Graph Visualization Software</h2>
<p>
<h1>''' + basename + '''</h1>
</td></tr>
<tr><td valign="top">
<!-- menu -->
\t<table bgcolor="#c0c0ff">\n''')
for page in pageset:
menuitem = page.split('.')[0]
if len(menuitem.split('_')) > 1:
menuparent = menuitem.split('_')[0]
menuname = menuitem.split('_')[1]
indent = ' '
else:
menuparent = ''
menuname = menuitem
indent = ''
if basename == menuparent or baseparent == menuparent or menuparent == '':
if basename == menuitem:
fout.write('\t<tr><td bgcolor="#c0ffc0">' + indent + menuname + '</td></tr>\n')
else:
fout.write('\t<tr><td>' + indent + '<a href="' + menuitem + '.php">' + menuname + '</a></td></tr>\n')
fout.write('''\t</table>
</td><td valign="top">
<!-- body -->\n''')
fin = open(source, 'r')
fout.write(fin.read())
fin.close
fout.write('''</td></tr>
</table>
</body>
</html>\n''')
fout.close
| #!/usr/bin/python
import sys
if len(sys.argv) < 2:
exit
pageset = sys.argv[1].split()
source = sys.argv[2]
basename = source.split('.')[0]
if len(basename.split('_')) > 1:
baseparent = basename.split('_')[0]
else:
baseparent = ''
if basename == 'Download':
fout = open(basename + '.php', 'w')
fin = open('Agree.ht', 'r')
fout.write(fin.read())
fin.close
fout = open(basename + '..php', 'w')
fout.write('<?php setcookie(clickthroughlicense); ?>\n')
else:
fout = open(basename + '.php', 'w')
fout.write('''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<!--
This is a generated document. Please edit "''' + basename + '''.ht" instead
and then type "make".
-->
<html>
<head>
<title>Graphviz</title>
</head>
<body bgcolor="white">
<table cellspacing="20">
<tr><td>
<!-- icon -->
<img src="doc.png" alt="">
</td><td>
<!-- header -->
<h2>Graphviz - Graph Visualization Software</h2>
<p>
<h1>''' + basename + '''</h1>
</td></tr>
<tr><td valign="top">
<!-- menu -->
\t<table bgcolor="#c0c0ff">\n''')
for page in pageset:
menuitem = page.split('.')[0]
if len(menuitem.split('_')) > 1:
menuparent = menuitem.split('_')[0]
menuname = menuitem.split('_')[1]
indent = ' '
else:
menuparent = ''
menuname = menuitem
indent = ''
if basename == menuparent or baseparent == menuparent or menuparent == '':
if basename == menuitem:
fout.write('\t<tr><td bgcolor="#c0ffc0">' + indent + menuname + '</td></tr>\n')
else:
fout.write('\t<tr><td>' + indent + '<a href="' + menuitem + '.php">' + menuname + '</a></td></tr>\n')
fout.write('''\t</table>
</td><td valign="top">
<!-- body -->\n''')
fin = open(source, 'r')
fout.write(fin.read())
fin.close
fout.write('''</td></tr>
</table>
</body>
</html>\n''')
fout.close
| epl-1.0 | Python |
84f38202cfcbcd3306662905fa8ea81e9b095904 | Bump version to 3.0.10.post0 | webu/django-cms,irudayarajisawa/django-cms,SofiaReis/django-cms,isotoma/django-cms,isotoma/django-cms,rryan/django-cms,benzkji/django-cms,datakortet/django-cms,saintbird/django-cms,owers19856/django-cms,jsma/django-cms,SachaMPS/django-cms,kk9599/django-cms,divio/django-cms,rsalmaso/django-cms,vad/django-cms,chmberl/django-cms,Livefyre/django-cms,divio/django-cms,webu/django-cms,DylannCordel/django-cms,petecummings/django-cms,rsalmaso/django-cms,evildmp/django-cms,petecummings/django-cms,andyzsf/django-cms,liuyisiyisi/django-cms,robmagee/django-cms,evildmp/django-cms,benzkji/django-cms,SmithsonianEnterprises/django-cms,kk9599/django-cms,czpython/django-cms,donce/django-cms,Jaccorot/django-cms,nimbis/django-cms,irudayarajisawa/django-cms,rryan/django-cms,wuzhihui1123/django-cms,josjevv/django-cms,sephii/django-cms,josjevv/django-cms,frnhr/django-cms,divio/django-cms,takeshineshiro/django-cms,farhaadila/django-cms,Livefyre/django-cms,cyberintruder/django-cms,philippze/django-cms,SofiaReis/django-cms,intip/django-cms,bittner/django-cms,isotoma/django-cms,saintbird/django-cms,jeffreylu9/django-cms,jeffreylu9/django-cms,Jaccorot/django-cms,memnonila/django-cms,netzkolchose/django-cms,FinalAngel/django-cms,timgraham/django-cms,stefanw/django-cms,takeshineshiro/django-cms,nimbis/django-cms,mkoistinen/django-cms,robmagee/django-cms,jsma/django-cms,iddqd1/django-cms,rryan/django-cms,jproffitt/django-cms,qnub/django-cms,FinalAngel/django-cms,andyzsf/django-cms,rscnt/django-cms,qnub/django-cms,jeffreylu9/django-cms,Jaccorot/django-cms,takeshineshiro/django-cms,bittner/django-cms,keimlink/django-cms,wyg3958/django-cms,youprofit/django-cms,frnhr/django-cms,isotoma/django-cms,philippze/django-cms,petecummings/django-cms,AlexProfi/django-cms,memnonila/django-cms,keimlink/django-cms,evildmp/django-cms,josjevv/django-cms,vxsx/django-cms,Vegasvikk/django-cms,frnhr/django-cms,yakky/django-cms,jproffitt/django-cms,webu/django-cms,divio/django-cms,iddqd1/django-cms,wuzhihui1123/django-cms,SmithsonianEnterprises/django-cms,donce/django-cms,dhorelik/django-cms,youprofit/django-cms,farhaadila/django-cms,intip/django-cms,benzkji/django-cms,irudayarajisawa/django-cms,leture/django-cms,farhaadila/django-cms,timgraham/django-cms,mkoistinen/django-cms,dhorelik/django-cms,jsma/django-cms,SofiaReis/django-cms,stefanfoulis/django-cms,intip/django-cms,bittner/django-cms,vad/django-cms,czpython/django-cms,bittner/django-cms,leture/django-cms,AlexProfi/django-cms,cyberintruder/django-cms,datakortet/django-cms,chmberl/django-cms,chkir/django-cms,vxsx/django-cms,stefanfoulis/django-cms,netzkolchose/django-cms,liuyisiyisi/django-cms,DylannCordel/django-cms,sephii/django-cms,cyberintruder/django-cms,wyg3958/django-cms,wuzhihui1123/django-cms,dhorelik/django-cms,netzkolchose/django-cms,Vegasvikk/django-cms,stefanw/django-cms,saintbird/django-cms,vad/django-cms,nimbis/django-cms,vxsx/django-cms,rsalmaso/django-cms,leture/django-cms,netzkolchose/django-cms,intip/django-cms,jsma/django-cms,benzkji/django-cms,mkoistinen/django-cms,sznekol/django-cms,nimbis/django-cms,sznekol/django-cms,owers19856/django-cms,youprofit/django-cms,SachaMPS/django-cms,vxsx/django-cms,vad/django-cms,yakky/django-cms,mkoistinen/django-cms,stefanfoulis/django-cms,evildmp/django-cms,robmagee/django-cms,datakortet/django-cms,jproffitt/django-cms,wuzhihui1123/django-cms,rryan/django-cms,iddqd1/django-cms,yakky/django-cms,philippze/django-cms,Livefyre/django-cms,DylannCordel/django-cms,czpython/django-cms,rscnt/django-cms,donce/django-cms,qnub/django-cms,Vegasvikk/django-cms,kk9599/django-cms,rsalmaso/django-cms,stefanw/django-cms,andyzsf/django-cms,SachaMPS/django-cms,yakky/django-cms,chmberl/django-cms,chkir/django-cms,SmithsonianEnterprises/django-cms,chkir/django-cms,wyg3958/django-cms,memnonila/django-cms,sznekol/django-cms,stefanfoulis/django-cms,jeffreylu9/django-cms,liuyisiyisi/django-cms,AlexProfi/django-cms,datakortet/django-cms,rscnt/django-cms,frnhr/django-cms,czpython/django-cms,owers19856/django-cms,sephii/django-cms,stefanw/django-cms,andyzsf/django-cms,FinalAngel/django-cms,FinalAngel/django-cms,sephii/django-cms,Livefyre/django-cms,timgraham/django-cms,jproffitt/django-cms,keimlink/django-cms | cms/__init__.py | cms/__init__.py | # -*- coding: utf-8 -*-
__version__ = '3.0.10.post0'
default_app_config = 'cms.apps.CMSConfig'
| # -*- coding: utf-8 -*-
__version__ = '3.0.10'
default_app_config = 'cms.apps.CMSConfig'
| bsd-3-clause | Python |
d1b9dece3282aea0464c37484be6aab086a7e082 | use sys.executable instead of 'python' for running regression tests | pypa/setuptools_scm,pypa/setuptools_scm,RonnyPfannschmidt/setuptools_scm,RonnyPfannschmidt/setuptools_scm | testing/test_regressions.py | testing/test_regressions.py | import sys
from setuptools_scm import get_version
from setuptools_scm.git import parse
from setuptools_scm.utils import do_ex, do
import pytest
def test_pkginfo_noscmroot(tmpdir, monkeypatch):
"""if we are indeed a sdist, the root does not apply"""
monkeypatch.delenv("SETUPTOOLS_SCM_DEBUG")
# we should get the version from pkg-info if git is broken
p = tmpdir.ensure('sub/package', dir=1)
tmpdir.mkdir('.git')
p.join('setup.py').write(
'from setuptools import setup;'
'setup(use_scm_version={"root": ".."})')
_, stderr, ret = do_ex(sys.executable + ' setup.py --version', p)
assert 'setuptools-scm was unable to detect version for' in stderr
assert ret == 1
p.join("PKG-INFO").write('Version: 1.0')
res = do(sys.executable + ' setup.py --version', p)
assert res == '1.0'
do('git init', p.dirpath())
res = do(sys.executable + ' setup.py --version', p)
assert res == '1.0'
def test_pip_egg_info(tmpdir, monkeypatch):
"""if we are indeed a sdist, the root does not apply"""
# we should get the version from pkg-info if git is broken
p = tmpdir.ensure('sub/package', dir=1)
tmpdir.mkdir('.git')
p.join('setup.py').write(
'from setuptools import setup;'
'setup(use_scm_version={"root": ".."})')
with pytest.raises(LookupError):
get_version(root=p.strpath)
p.ensure('pip-egg-info/random.egg-info/PKG-INFO').write('Version: 1.0')
assert get_version(root=p.strpath) == '1.0'
def test_use_scm_version_callable(tmpdir, monkeypatch):
"""use of callable as use_scm_version argument"""
monkeypatch.delenv("SETUPTOOLS_SCM_DEBUG")
p = tmpdir.ensure('sub/package', dir=1)
p.join('setup.py').write(
'''from setuptools import setup
def vcfg():
from setuptools_scm.version import guess_next_dev_version
def vs(v):
return guess_next_dev_version(v)
return {"version_scheme": vs}
setup(use_scm_version=vcfg)
''')
p.join("PKG-INFO").write('Version: 1.0')
res = do(sys.executable + ' setup.py --version', p)
assert res == '1.0'
@pytest.mark.skipif(sys.platform != 'win32',
reason="this bug is only valid on windows")
def test_case_mismatch_on_windows_git(tmpdir):
"""Case insensitive path checks on Windows"""
p = tmpdir.ensure("CapitalizedDir", dir=1)
do('git init', p)
res = parse(str(p).lower())
assert res is not None
| import sys
from setuptools_scm import get_version
from setuptools_scm.git import parse
from setuptools_scm.utils import do_ex, do
import pytest
def test_pkginfo_noscmroot(tmpdir, monkeypatch):
"""if we are indeed a sdist, the root does not apply"""
monkeypatch.delenv("SETUPTOOLS_SCM_DEBUG")
# we should get the version from pkg-info if git is broken
p = tmpdir.ensure('sub/package', dir=1)
tmpdir.mkdir('.git')
p.join('setup.py').write(
'from setuptools import setup;'
'setup(use_scm_version={"root": ".."})')
_, stderr, ret = do_ex('python setup.py --version', p)
assert 'setuptools-scm was unable to detect version for' in stderr
assert ret == 1
p.join("PKG-INFO").write('Version: 1.0')
res = do('python setup.py --version', p)
assert res == '1.0'
do('git init', p.dirpath())
res = do('python setup.py --version', p)
assert res == '1.0'
def test_pip_egg_info(tmpdir, monkeypatch):
"""if we are indeed a sdist, the root does not apply"""
# we should get the version from pkg-info if git is broken
p = tmpdir.ensure('sub/package', dir=1)
tmpdir.mkdir('.git')
p.join('setup.py').write(
'from setuptools import setup;'
'setup(use_scm_version={"root": ".."})')
with pytest.raises(LookupError):
get_version(root=p.strpath)
p.ensure('pip-egg-info/random.egg-info/PKG-INFO').write('Version: 1.0')
assert get_version(root=p.strpath) == '1.0'
def test_use_scm_version_callable(tmpdir, monkeypatch):
"""use of callable as use_scm_version argument"""
monkeypatch.delenv("SETUPTOOLS_SCM_DEBUG")
p = tmpdir.ensure('sub/package', dir=1)
p.join('setup.py').write(
'''from setuptools import setup
def vcfg():
from setuptools_scm.version import guess_next_dev_version
def vs(v):
return guess_next_dev_version(v)
return {"version_scheme": vs}
setup(use_scm_version=vcfg)
''')
p.join("PKG-INFO").write('Version: 1.0')
res = do('python setup.py --version', p)
assert res == '1.0'
@pytest.mark.skipif(sys.platform != 'win32',
reason="this bug is only valid on windows")
def test_case_mismatch_on_windows_git(tmpdir):
"""Case insensitive path checks on Windows"""
p = tmpdir.ensure("CapitalizedDir", dir=1)
do('git init', p)
res = parse(str(p).lower())
assert res is not None
| mit | Python |
93ff82b816ffb70748a797777a3fd7060dd2c6de | Use ufo2ft, use loadFilterFromString | googlei18n/ufo2ft,googlefonts/ufo2ft,jamesgk/ufo2fdk | Lib/ufo2ft/filters/__main__.py | Lib/ufo2ft/filters/__main__.py | import argparse
import importlib
import logging
from fontTools.misc.cliTools import makeOutputFileName
from ufo2ft.filters import getFilterClass, logger, loadFilterFromString
try:
import ufoLib2
loader = ufoLib2.Font
except ImportError:
import defcon
loader = defcon.Font
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Filter a UFO file")
parser.add_argument("--output", "-o", metavar="OUTPUT", help="output file name")
parser.add_argument(
"--include", metavar="GLYPHS", help="comma-separated list of glyphs to filter"
)
parser.add_argument(
"--exclude", metavar="GLYPHS", help="comma-separated list of glyphs to not filter"
)
parser.add_argument("ufo", metavar="UFO", help="UFO file")
parser.add_argument("filters", metavar="FILTER", nargs="+", help="filter name")
args = parser.parse_args()
if not args.output:
args.output = makeOutputFileName(args.ufo)
ufo = loader(args.ufo)
filterargs = ""
if args.include:
filterargs = "(include=%s)" % ",".join(
['"%s"' % g for g in args.include.split(",")]
)
if args.exclude:
filterargs = "(exclude=%s)" % ",".join(
['"%s"' % g for g in args.exclude.split(",")]
)
for filtername in args.filters:
f = loadFilterFromString(filtername + filterargs)
f(ufo)
logger.info("Written on %s" % args.output)
ufo.save(args.output)
| import argparse
import importlib
import logging
import defcon
from fontTools.misc.cliTools import makeOutputFileName
from ufo2ft.filters import getFilterClass, logger
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Filter a UFO file")
parser.add_argument("--output", "-o", metavar="OUTPUT", help="output file name")
parser.add_argument(
"--include", metavar="GLYPHS", help="comma-separated list of glyphs to filter"
)
parser.add_argument(
"--exclude", metavar="GLYPHS", help="comma-separated list of glyphs to not filter"
)
parser.add_argument("ufo", metavar="UFO", help="UFO file")
parser.add_argument("filters", metavar="FILTER", nargs="+", help="filter name")
args = parser.parse_args()
if not args.output:
args.output = makeOutputFileName(args.ufo)
ufo = defcon.Font(args.ufo)
filterargs = {}
if args.include:
filterargs["include"] = args.include.split(",")
if args.exclude:
filterargs["exclude"] = args.exclude.split(",")
for filtername in args.filters:
try:
if "." in filtername:
module = importlib.import_module(filtername)
shortfiltername = (filtername.split("."))[-1]
className = shortfiltername[0].upper() + shortfiltername[1:] + "Filter"
f = getattr(module, className)(**filterargs)
else:
f = getFilterClass(filtername)(**filterargs)
except Exception as e:
raise ValueError("Couldn't find filter %s: %s" % (filtername, e))
f(ufo)
logger.info("Written on %s" % args.output)
ufo.save(args.output)
| mit | Python |
fac976dabac7a3bf9d8103269519b15319f12c40 | fix broken functional tests | mark-in/securedrop-app-code,mark-in/securedrop-app-code,mark-in/securedrop-app-code,mark-in/securedrop-app-code | tests/functional/source_navigation_steps.py | tests/functional/source_navigation_steps.py | import tempfile
class SourceNavigationSteps():
def _source_visits_source_homepage(self):
self.driver.get(self.source_location)
self.assertEqual("SecureDrop | Protecting Journalists and Sources", self.driver.title)
def _source_chooses_to_submit_documents(self):
self.driver.find_element_by_id('submit-documents-button').click()
codename = self.driver.find_element_by_css_selector('#codename')
self.assertTrue(len(codename.text) > 0)
self.source_name = codename.text
def _source_continues_to_submit_page(self):
continue_button = self.driver.find_element_by_id('continue-button')
continue_button.click()
headline = self.driver.find_element_by_class_name('headline')
self.assertEqual('Submit documents and messages', headline.text)
def _source_submits_a_file(self):
with tempfile.NamedTemporaryFile() as file:
file.write(self.secret_message)
file.seek(0)
filename = file.name
filebasename = filename.split('/')[-1]
file_upload_box = self.driver.find_element_by_css_selector('[name=fh]')
file_upload_box.send_keys(filename)
submit_button = self.driver.find_element_by_css_selector(
'button[type=submit]')
submit_button.click()
notification = self.driver.find_element_by_css_selector( 'p.notification')
expected_notification = "Thanks! We received your document '%s'." % filebasename
self.assertIn(expected_notification, notification.text)
def _source_submits_a_message(self):
text_box = self.driver.find_element_by_css_selector('[name=msg]')
text_box.send_keys(self.secret_message) # send_keys = type into text box
submit_button = self.driver.find_element_by_css_selector(
'button[type=submit]')
submit_button.click()
notification = self.driver.find_element_by_css_selector( 'p.notification')
self.assertIn('Thanks! We received your message.', notification.text)
| import tempfile
class SourceNavigationSteps():
def _source_visits_source_homepage(self):
self.driver.get(self.source_location)
self.assertEqual("SecureDrop | Protecting Journalists and Sources", self.driver.title)
def _source_chooses_to_submit_documents(self):
self.driver.find_element_by_id('submit-documents-button').click()
codename = self.driver.find_element_by_css_selector('#codename')
self.assertTrue(len(codename.text) > 0)
self.source_name = codename.text
def _source_continues_to_submit_page(self):
continue_button = self.driver.find_element_by_id('continue-button')
continue_button.click()
headline = self.driver.find_element_by_class_name('headline')
self.assertEqual('Submit documents and messages', headline.text)
def _source_submits_a_file(self):
with tempfile.NamedTemporaryFile() as file:
file.write(self.secret_message)
file.seek(0)
filename = file.name
filebasename = filename.split('/')[-1]
file_upload_box = self.driver.find_element_by_css_selector('[name=fh]')
file_upload_box.send_keys(filename)
submit_button = self.driver.find_element_by_css_selector(
'button[type=submit]')
submit_button.click()
notification = self.driver.find_element_by_css_selector( 'p.notification')
expected_notification = "Thanks! We received your document '%s'." % filebasename
self.assertEquals(expected_notification, notification.text)
def _source_submits_a_message(self):
text_box = self.driver.find_element_by_css_selector('[name=msg]')
text_box.send_keys(self.secret_message) # send_keys = type into text box
submit_button = self.driver.find_element_by_css_selector(
'button[type=submit]')
submit_button.click()
notification = self.driver.find_element_by_css_selector( 'p.notification')
self.assertEquals('Thanks! We received your message.', notification.text)
| agpl-3.0 | Python |
432163e49fbe6bd40f53082138dc50de6a71b6da | Fix test_update_client_order_comments | alexandriagroup/fnapy,alexandriagroup/fnapy | tests/offline/test_client_order_comments.py | tests/offline/test_client_order_comments.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
# Python modules
from __future__ import unicode_literals
# Project modules
from tests import make_requests_get_mock, fake_manager
from tests.offline import ContextualTest
def test_query_client_order_comments(monkeypatch, fake_manager):
context = ContextualTest(monkeypatch, fake_manager,
'query_client_order_comments', 'client_order_comments_query')
with context:
fake_manager.query_client_order_comments(paging=1)
def test_update_client_order_comments(monkeypatch, fake_manager):
context = ContextualTest(monkeypatch, fake_manager,
'update_client_order_comments', 'client_order_comments_update')
with context:
fake_manager.update_client_order_comments(seller_comment='Hello',
order_fnac_id='8D7472DB-7EAF-CE05-A960-FC12B812FA14')
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
# Python modules
from __future__ import unicode_literals
# Project modules
from tests import make_requests_get_mock, fake_manager
from tests.offline import ContextualTest
def test_query_client_order_comments(monkeypatch, fake_manager):
context = ContextualTest(monkeypatch, fake_manager,
'query_client_order_comments', 'client_order_comments_query')
with context:
fake_manager.query_client_order_comments(paging=1)
def test_update_client_order_comments(monkeypatch, fake_manager):
context = ContextualTest(monkeypatch, fake_manager,
'update_client_order_comments', 'client_order_comments_update')
with context:
fake_manager.update_client_order_comments(seller_comment='Hello',
offer_fnac_id='8D7472DB-7EAF-CE05-A960-FC12B812FA14')
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.