commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
16c0f9fee7ee9f67a64645f41584083373ebb2cd | fix indexing in the donation db model | blueprints/donations/donation_model.py | blueprints/donations/donation_model.py | __author__ = 'HansiHE'
from mongoengine import *
from datetime import datetime
from blueprints.auth.user_model import User
class TransactionLog(Document):
username = StringField()
date = DateTimeField(required=True, default=datetime.utcnow)
data = DictField()
class DonationTransactionStatus(EmbeddedDocument):
date = DateTimeField(required=True, default=datetime.utcnow)
status = StringField(required=True) # = payment_status
reason = StringField() # = pending_reason if exists or reason_code if exists
valid = BooleanField(required=True)
gross = FloatField(default=0)
fee = FloatField(default=0)
complete_data = DictField()
class Transaction(Document):
amount = FloatField(required=True) # = the actual calculated amount
created = DateTimeField(required=True, default=datetime.utcnow)
# https://developer.paypal.com/webapps/developer/docs/classic/ipn/integration-guide/IPNandPDTVariables/#id091EB04C0HS__id0913D0E0UQU
# Canceled_Reversal: valid=true
# Completed: valid=true
# Created: valid=false
# Denied: valid=false
# Expired: valid=false
# Failed: valid=false
# Pending: valid=true, reason=pending_reason
# Refunded: valid=false
# Reversed: valid=false, reason=ReasonCode
# Processed: valid=true
# Voided: valid=false
meta = {
'collection': 'financial_transactions',
'allow_inheritance': True,
'indexes': [
'amount'
]
}
class DonationTransaction(Transaction):
username = StringField()
email = StringField()
gross = FloatField(required=True) # = the total amount donated
fee = FloatField(required=True) # = the amount paypal has robbed us for
payment_type = StringField() # Should be either echeck or instant
transaction_id = StringField(unique=True) # = parent_txn_id or txn_id, unique id
valid = BooleanField() #Could be used for easy querying, should be set when payment_status is Pending or Completed. Changed to false if shit happens.
payment_status_events = ListField(EmbeddedDocumentField(DonationTransactionStatus)) # list of states received for this transaction
type = "donation"
meta = {
'allow_inheritance': True,
'indexes': [
'username',
# 'amount',
{
'fields': ['transaction_id'],
'unique': True,
'sparse': True
}
]
} # TODO: Make indexes work for inheritance
class PaymentTransaction(Transaction):
note = StringField()
period_begin = DateTimeField(required=True)
period_end = DateTimeField(required=True)
user = ReferenceField(User, dbref=False, required=True)
type = "payment" | __author__ = 'HansiHE'
from mongoengine import *
from datetime import datetime
from blueprints.auth.user_model import User
class TransactionLog(Document):
username = StringField()
date = DateTimeField(required=True, default=datetime.utcnow)
data = DictField()
class DonationTransactionStatus(EmbeddedDocument):
date = DateTimeField(required=True, default=datetime.utcnow)
status = StringField(required=True) # = payment_status
reason = StringField() # = pending_reason if exists or reason_code if exists
valid = BooleanField(required=True)
gross = FloatField(default=0)
fee = FloatField(default=0)
complete_data = DictField()
class Transaction(Document):
amount = FloatField(required=True) # = the actual calculated amount
created = DateTimeField(required=True, default=datetime.utcnow)
# https://developer.paypal.com/webapps/developer/docs/classic/ipn/integration-guide/IPNandPDTVariables/#id091EB04C0HS__id0913D0E0UQU
# Canceled_Reversal: valid=true
# Completed: valid=true
# Created: valid=false
# Denied: valid=false
# Expired: valid=false
# Failed: valid=false
# Pending: valid=true, reason=pending_reason
# Refunded: valid=false
# Reversed: valid=false, reason=ReasonCode
# Processed: valid=true
# Voided: valid=false
meta = {
'collection': 'financial_transactions',
'allow_inheritance': True,
#'indexes': [
# 'username',
# 'amount',
# {
# 'fields': ['transaction_id'],
# 'unique': True,
# 'sparse': True
# }
#]
} # TODO: Make indexes work for inheritance
class DonationTransaction(Transaction):
username = StringField()
email = StringField()
gross = FloatField(required=True) # = the total amount donated
fee = FloatField(required=True) # = the amount paypal has robbed us for
payment_type = StringField() # Should be either echeck or instant
transaction_id = StringField() # = parent_txn_id or txn_id, unique id # TODO: Add unique=true when indexes are working
valid = BooleanField() #Could be used for easy querying, should be set when payment_status is Pending or Completed. Changed to false if shit happens.
payment_status_events = ListField(EmbeddedDocumentField(DonationTransactionStatus)) # list of states received for this transaction
type = "donation"
class PaymentTransaction(Transaction):
note = StringField()
period_begin = DateTimeField(required=True)
period_end = DateTimeField(required=True)
user = ReferenceField(User, dbref=False, required=True)
type = "payment" | Python | 0.000001 |
b9c076865f4e0ff9b4ab007472cbab735ccf01ab | Bump version to 3.1.2 | osg_configure/version.py | osg_configure/version.py | __version__ = "3.1.2"
| __version__ = "3.1.1"
| Python | 0.000001 |
214f4094b6b5c2f4a43ff96567a7bbe87ba63d28 | Update bob.py | Python_sessions/session-2/practice_codes/bob.py | Python_sessions/session-2/practice_codes/bob.py | hello = "Hi Human, I am B.O.B. "
question1 = "What is your name? "
response1 = "Thats a lovely name! "
input(hello+question1)
print response1
answer_type = "Please answer in 'yes' or 'no'. "
question2 = "Can I help you? "
response2 = "I am a computer, not a human. "
input(question2+answer_type)
print response2
question3 = "Did you like that information? "
goodbye = "Great. Goodbye! "
input(question3+answer_type)
print goodbye
| hello = "Hi Human, I am B.O.B. "
question1 = "What is your name? "
response1 = "Thats a lovely name! "
input(hello+question1)
print response1
answer_type = "Please answer in 'yes' of 'no'. "
question2 = "Can I help you? "
response2 = "I am a computer, not a human. "
input(question2+answer_type)
print response2
question3 = "Did you like that information? "
goodbye = "Great. Goodbye! "
input(question3+answer_type)
print goodbye
| Python | 0.000002 |
0845eddc933e439fba77083c0668a3bcf74f975e | add index for format for python 2.6 | encrypted_fields/tests.py | encrypted_fields/tests.py |
import re
from datetime import datetime
from django.db import models, connection
from django.test import TestCase
from .fields import (
EncryptedCharField,
EncryptedTextField,
EncryptedDateTimeField,
EncryptedIntegerField,
)
class TestModel(models.Model):
char = EncryptedCharField(max_length=255, null=True)
text = EncryptedTextField(null=True)
datetime = EncryptedDateTimeField(null=True)
integer = EncryptedIntegerField(null=True)
class FieldTest(TestCase):
def get_db_value(self, field, model_id):
cursor = connection.cursor()
cursor.execute(
'select {0} '
'from encrypted_fields_testmodel '
'where id = {1};'.format(field, model_id)
)
return cursor.fetchone()[0]
def test_char_field_encrypted(self):
plaintext = 'Oh hi, test reader!'
model = TestModel()
model.char = plaintext
model.save()
ciphertext = self.get_db_value('char', model.id)
self.assertNotEqual(plaintext, ciphertext)
self.assertTrue('test' not in ciphertext)
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.char, plaintext)
def test_text_field_encrypted(self):
plaintext = 'Oh hi, test reader!' * 10
model = TestModel()
model.text = plaintext
model.save()
ciphertext = self.get_db_value('text', model.id)
self.assertNotEqual(plaintext, ciphertext)
self.assertTrue('test' not in ciphertext)
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.text, plaintext)
def test_datetime_field_encrypted(self):
plaintext = datetime.now()
model = TestModel()
model.datetime = plaintext
model.save()
ciphertext = self.get_db_value('datetime', model.id)
# Django's normal date serialization format
self.assertTrue(re.search('^\d\d\d\d-\d\d-\d\d', ciphertext) is None)
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.datetime, plaintext)
def test_integer_field_encrypted(self):
plaintext = 42
model = TestModel()
model.integer = plaintext
model.save()
ciphertext = self.get_db_value('integer', model.id)
self.assertNotEqual(plaintext, ciphertext)
self.assertNotEqual(plaintext, str(ciphertext))
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.integer, plaintext)
|
import re
from datetime import datetime
from django.db import models, connection
from django.test import TestCase
from .fields import (
EncryptedCharField,
EncryptedTextField,
EncryptedDateTimeField,
EncryptedIntegerField,
)
class TestModel(models.Model):
char = EncryptedCharField(max_length=255, null=True)
text = EncryptedTextField(null=True)
datetime = EncryptedDateTimeField(null=True)
integer = EncryptedIntegerField(null=True)
class FieldTest(TestCase):
def get_db_value(self, field, model_id):
cursor = connection.cursor()
cursor.execute(
'select {} '
'from encrypted_fields_testmodel '
'where id = {};'.format(field, model_id)
)
return cursor.fetchone()[0]
def test_char_field_encrypted(self):
plaintext = 'Oh hi, test reader!'
model = TestModel()
model.char = plaintext
model.save()
ciphertext = self.get_db_value('char', model.id)
self.assertNotEqual(plaintext, ciphertext)
self.assertTrue('test' not in ciphertext)
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.char, plaintext)
def test_text_field_encrypted(self):
plaintext = 'Oh hi, test reader!' * 10
model = TestModel()
model.text = plaintext
model.save()
ciphertext = self.get_db_value('text', model.id)
self.assertNotEqual(plaintext, ciphertext)
self.assertTrue('test' not in ciphertext)
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.text, plaintext)
def test_datetime_field_encrypted(self):
plaintext = datetime.now()
model = TestModel()
model.datetime = plaintext
model.save()
ciphertext = self.get_db_value('datetime', model.id)
# Django's normal date serialization format
self.assertTrue(re.search('^\d\d\d\d-\d\d-\d\d', ciphertext) is None)
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.datetime, plaintext)
def test_integer_field_encrypted(self):
plaintext = 42
model = TestModel()
model.integer = plaintext
model.save()
ciphertext = self.get_db_value('integer', model.id)
self.assertNotEqual(plaintext, ciphertext)
self.assertNotEqual(plaintext, str(ciphertext))
fresh_model = TestModel.objects.get(id=model.id)
self.assertEqual(fresh_model.integer, plaintext)
| Python | 0.000002 |
f44c7670ee06d0ff3976c11b921cc3f288b0259b | add TestMPEventLoopRunner.test_ProgressMonitor | tests/EventReader/test_MPEventLoopRunner.py | tests/EventReader/test_MPEventLoopRunner.py | from AlphaTwirl.EventReader import MPEventLoopRunner
import unittest
import os
##____________________________________________________________________________||
class MockReader(object):
def __init__(self):
self._results = None
def setResults(self, results):
self._results = results
def results(self):
return self._results
##____________________________________________________________________________||
class MockEventLoop(object):
def __init__(self, readers):
self.readers = readers
def __call__(self, progressReporter):
for reader in self.readers:
reader._results = 3456
return self.readers
##____________________________________________________________________________||
class MockEventLoopForProgressReporterTest(object):
def __init__(self, readers):
self.readers = readers
def __call__(self, progressReporter):
for reader in self.readers:
reader._results = [3456, progressReporter]
return self.readers
##____________________________________________________________________________||
class MockProgressReporter(object):
def report(self, event, component): pass
##____________________________________________________________________________||
class MockProgressMonitor(object):
def createReporter(self): return MockProgressReporter()
def addWorker(self, worker): pass
def monitor(self): pass
def last(self): pass
##____________________________________________________________________________||
class TestMPEventLoopRunner(unittest.TestCase):
def test_begin_end(self):
runner = MPEventLoopRunner()
runner.begin()
runner.end()
def test_run(self):
runner = MPEventLoopRunner()
runner.begin()
reader1 = MockReader()
reader2 = MockReader()
eventLoop = MockEventLoop([reader1, reader2])
runner.run(eventLoop)
self.assertIsNone(reader1._results)
self.assertIsNone(reader2._results)
runner.end()
self.assertEqual(3456, reader1._results)
self.assertEqual(3456, reader2._results)
def test_ProgressMonitor(self):
progressMonitor = MockProgressMonitor()
runner = MPEventLoopRunner(nprocesses = 3, progressMonitor = progressMonitor)
runner.begin()
reader1 = MockReader()
reader2 = MockReader()
eventLoop = MockEventLoopForProgressReporterTest([reader1, reader2])
runner.run(eventLoop)
self.assertIsNone(reader1._results)
self.assertIsNone(reader2._results)
runner.end()
self.assertEqual(3456, reader1._results[0])
self.assertEqual(3456, reader2._results[0])
# assert that the EventLoop received a ProgressReporter
self.assertIsInstance(reader1._results[1], MockProgressReporter)
self.assertIsInstance(reader2._results[1], MockProgressReporter)
##____________________________________________________________________________||
| from AlphaTwirl.EventReader import MPEventLoopRunner
import unittest
##____________________________________________________________________________||
class MockReader(object):
def __init__(self):
self._results = None
def setResults(self, results):
self._results = results
def results(self):
return self._results
##____________________________________________________________________________||
class MockEventLoop(object):
def __init__(self, readers):
self.readers = readers
def __call__(self, progressReporter):
for reader in self.readers:
reader._results = 3456
return self.readers
##____________________________________________________________________________||
class TestMPEventLoopRunner(unittest.TestCase):
def test_begin_end(self):
runner = MPEventLoopRunner()
runner.begin()
runner.end()
def test_run(self):
runner = MPEventLoopRunner()
runner.begin()
reader1 = MockReader()
reader2 = MockReader()
eventLoop = MockEventLoop([reader1, reader2])
runner.run(eventLoop)
self.assertIsNone(reader1._results)
self.assertIsNone(reader2._results)
runner.end()
self.assertEqual(3456, reader1._results)
self.assertEqual(3456, reader2._results)
##____________________________________________________________________________||
| Python | 0.000001 |
e6e0d96790d71caccb3f00487bfeeddccdc78139 | Fix variable and return value | app/raw/tasks.py | app/raw/tasks.py | from __future__ import absolute_import
from celery import shared_task
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from scrapy.utils.project import get_project_settings
import os
from raw.scraper.spiders.legco_library import LibraryAgendaSpider
from raw.scraper.spiders.members import LibraryMemberSpider
@shared_task
def run_scraper():
output_name = 'foo.jl'
spider = LibraryAgendaSpider()
settings = get_project_settings()
output_path = os.path.join(settings.get('DATA_DIR_BASE'), 'scrapes', output_name)
settings.overrides['FEED_URI'] = output_path
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
log.start(loglevel=log.INFO, logstdout=True)
reactor.run()
return output_path
| from __future__ import absolute_import
from celery import shared_task
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from scrapy.utils.project import get_project_settings
import os
from raw.scraper.spiders.legco_library import LibraryAgendaSpider
from raw.scraper.spiders.members import LibraryMemberSpider
@shared_task
def run_scraper():
output_name = 'foo.jl'
spider = LibraryAgendaSpider()
settings = get_project_settings()
url_path = os.path.join(settings.get('DATA_DIR_BASE'), 'scrapes', output_name)
settings.overrides['FEED_URI'] = url_path
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
log.start(loglevel=log.INFO, logstdout=True)
reactor.run()
return output_name
| Python | 0.000005 |
8c1e0e5a7aef661152fc76757fb8f1403af56133 | fix tests | tests/dataset/test_highly_variable_genes.py | tests/dataset/test_highly_variable_genes.py | from unittest import TestCase
import numpy as np
from scvi.dataset import GeneExpressionDataset, BrainLargeDataset
class TestHighlyVariableGenes(TestCase):
def test_sparse_no_batch_correction(self):
for flavor in ["seurat", "cell_ranger", "seurat_v3"]:
dataset = BrainLargeDataset(
save_path="tests/data",
sample_size_gene_var=10,
nb_genes_to_keep=128,
max_cells_to_keep=256,
)
n_genes = dataset.nb_genes
n_top = n_genes // 2
dataset.subsample_genes(mode=flavor, new_n_genes=n_top, n_bins=3)
assert dataset.nb_genes < n_genes
# For some reason the new number of genes can be slightly different than n_top
dataset._highly_variable_genes(flavor=flavor, n_bins=3)
dataset = BrainLargeDataset(
save_path="tests/data",
sample_size_gene_var=10,
nb_genes_to_keep=128,
max_cells_to_keep=256,
)
n_genes = dataset.nb_genes
dataset.subsample_genes()
assert dataset.nb_genes < n_genes, "subsample_genes did not filter out genes"
def test_batch_correction(self):
data = [
np.random.randint(1, 5, size=(50, 25)),
np.random.randint(1, 5, size=(50, 25)),
np.random.randint(1, 5, size=(50, 25)),
]
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
n_genes = dataset.nb_genes
n_top = n_genes // 2
dataset._highly_variable_genes(n_bins=3, flavor="seurat")
df = dataset._highly_variable_genes(
n_bins=3, n_top_genes=n_top, flavor="seurat"
)
assert df["highly_variable"].sum() >= n_top
dataset.subsample_genes(n_top_genes=n_top)
new_genes = dataset.nb_genes
assert n_genes > new_genes, "subsample_genes did not filter out genes"
pass
def test_dense_subsample_genes(self):
data = [
np.random.randint(1, 5, size=(50, 26)),
np.random.randint(1, 5, size=(50, 26)),
np.random.randint(1, 5, size=(50, 26)),
]
# With default
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
n_genes = dataset.nb_genes
n_top = n_genes // 2
dataset.subsample_genes(new_n_genes=n_top, mode="cell_ranger")
assert dataset.nb_genes == n_top
# With Seurat
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
dataset.subsample_genes(new_n_genes=n_top, mode="seurat")
assert dataset.nb_genes == n_top
# With Seurat v3
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
dataset.subsample_genes(new_n_genes=n_top, mode="seurat_v3")
assert dataset.nb_genes == n_top
| from unittest import TestCase
import numpy as np
from scvi.dataset import GeneExpressionDataset, BrainLargeDataset
class TestHighlyVariableGenes(TestCase):
def test_sparse_no_batch_correction(self):
for flavor in ["seurat", "cell_ranger", "seurat_v3"]:
dataset = BrainLargeDataset(
save_path="tests/data",
sample_size_gene_var=10,
nb_genes_to_keep=128,
max_cells_to_keep=256,
)
n_genes = dataset.nb_genes
n_top = n_genes // 2
dataset.subsample_genes(mode=flavor, new_n_genes=n_top, n_bins=3)
assert dataset.nb_genes < n_genes
# For some reason the new number of genes can be slightly different than n_top
dataset._highly_variable_genes(flavor=flavor, n_bins=3)
dataset = BrainLargeDataset(
save_path="tests/data",
sample_size_gene_var=10,
nb_genes_to_keep=128,
max_cells_to_keep=256,
)
n_genes = dataset.nb_genes
dataset.subsample_genes()
assert dataset.nb_genes < n_genes, "subsample_genes did not filter out genes"
def test_batch_correction(self):
data = [
np.random.randint(1, 5, size=(50, 25)),
np.random.randint(1, 5, size=(50, 25)),
np.random.randint(1, 5, size=(50, 25)),
]
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
n_genes = dataset.nb_genes
n_top = n_genes // 2
dataset.highly_variable_genes(n_bins=3, flavor="seurat")
dataset.highly_variable_genes(n_bins=3, flavor="seurat")
df = dataset.highly_variable_genes(n_bins=3, n_top_genes=n_top, flavor="seurat")
assert df["highly_variable"].sum() >= n_top
dataset.subsample_genes()
new_genes = dataset.nb_genes
assert n_genes > new_genes, "subsample_genes did not filter out genes"
pass
def test_dense_subsample_genes(self):
data = [
np.random.randint(1, 5, size=(50, 26)),
np.random.randint(1, 5, size=(50, 26)),
np.random.randint(1, 5, size=(50, 26)),
]
# With default
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
n_genes = dataset.nb_genes
n_top = n_genes // 2
dataset.subsample_genes(new_n_genes=n_top, mode="cell_ranger")
assert dataset.nb_genes == n_top
# With Seurat
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
dataset.subsample_genes(new_n_genes=n_top, mode="seurat")
assert dataset.nb_genes == n_top
# With Seurat v3
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
dataset.subsample_genes(new_n_genes=n_top, mode="seurat_v3")
assert dataset.nb_genes == n_top
| Python | 0.000001 |
600e68fc3e4b708090f5c3349d002ea9c3d2fbf8 | improve examples group | tests/examples/user_code/publisher_group.py | tests/examples/user_code/publisher_group.py | import time
from celery import chord, group
from .worker import function_aggregate, function_test
chord(
group(function_test.s(0, value=i) for i in range(1000)),
function_aggregate.s(from_chord=True)
)()
time.sleep(5)
| import time
from celery import chord, group
from .tasks import *
chord(
group(function_value.s(0, value=i) for i in range(1000)),
function_any.s(from_chord=True)
)()
time.sleep(5)
| Python | 0.000002 |
902cbd511f2f42948991713cdf0a98c4473c66c0 | add tqdm to hagrid setup.py | packages/hagrid/setup.py | packages/hagrid/setup.py | # stdlib
import platform
# third party
from setuptools import find_packages
from setuptools import setup
__version__ = "0.2.89"
DATA_FILES = {
"img": ["hagrid/img/*.png"],
}
packages = [
"ascii_magic",
"click",
"cryptography>=37.0.2",
"gitpython",
"jinja2",
"names",
"packaging>=21.3",
"paramiko",
"pyOpenSSL>=22.0.0",
"requests",
"rich",
"setuptools",
"virtualenv-api",
"virtualenv",
"PyYAML",
"tqdm",
]
if platform.system().lower() != "windows":
packages.extend(["ansible", "ansible-core"])
# Pillow binary wheels for Apple Silicon on Python 3.8 don't seem to work well
# try using Python 3.9+ for HAGrid on Apple Silicon
setup(
name="hagrid",
description="Happy Automation for Grid",
long_description="HAGrid is the swiss army knife of OpenMined's PySyft and PyGrid.",
long_description_content_type="text/plain",
version=__version__,
author="Andrew Trask <andrew@openmined.org>",
packages=find_packages(),
package_data=DATA_FILES,
install_requires=packages,
include_package_data=True,
entry_points={"console_scripts": ["hagrid = hagrid.cli:cli"]},
)
| # stdlib
import platform
# third party
from setuptools import find_packages
from setuptools import setup
__version__ = "0.2.89"
DATA_FILES = {
"img": ["hagrid/img/*.png"],
}
packages = [
"ascii_magic",
"click",
"cryptography>=37.0.2",
"gitpython",
"jinja2",
"names",
"packaging>=21.3",
"paramiko",
"pyOpenSSL>=22.0.0",
"requests",
"rich",
"setuptools",
"virtualenv-api",
"virtualenv",
"PyYAML",
]
if platform.system().lower() != "windows":
packages.extend(["ansible", "ansible-core"])
# Pillow binary wheels for Apple Silicon on Python 3.8 don't seem to work well
# try using Python 3.9+ for HAGrid on Apple Silicon
setup(
name="hagrid",
description="Happy Automation for Grid",
long_description="HAGrid is the swiss army knife of OpenMined's PySyft and PyGrid.",
long_description_content_type="text/plain",
version=__version__,
author="Andrew Trask <andrew@openmined.org>",
packages=find_packages(),
package_data=DATA_FILES,
install_requires=packages,
include_package_data=True,
entry_points={"console_scripts": ["hagrid = hagrid.cli:cli"]},
)
| Python | 0 |
f520d71d75dea757794b33f2d0e8a7c8c6204717 | Add legacy_url for accepted orgs | app/soc/modules/gsoc/views/accepted_orgs.py | app/soc/modules/gsoc/views/accepted_orgs.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the views for GSoC accepted orgs.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from django.conf.urls.defaults import url
from django.core.urlresolvers import reverse
from soc.logic.exceptions import AccessViolation
from soc.views.template import Template
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import lists
from soc.modules.gsoc.views.helper import url_patterns
from soc.modules.gsoc.logic.models.organization import logic as org_logic
class AcceptedOrgsList(Template):
"""Template for list of accepted organizations.
"""
def __init__(self, request, data):
self.request = request
self.data = data
r = data.redirect
list_config = lists.ListConfiguration()
list_config.addSimpleColumn('name', 'Name')
list_config.addSimpleColumn('link_id', 'Link ID', hidden=True)
list_config.setRowAction(
lambda e, *args, **kwargs: r.organization(e).urlOf('gsoc_org_home'))
list_config.addColumn('tags', 'Tags',
lambda e, *args, **kwargs: e.tags_string(e.org_tag))
self._list_config = list_config
def context(self):
description = 'List of organizations accepted into %s' % (
self.data.program.name)
list = lists.ListConfigurationResponse(self._list_config, 0, description)
return {
'lists': [list],
}
def getListData(self):
idx = lists.getListIndex(self.request)
if idx == 0:
fields = {'scope': self.data.program,
'status': ['active', 'inactive']}
response_builder = lists.QueryContentResponseBuilder(
self.request, self._list_config, org_logic, fields)
return response_builder.build()
else:
return None
def templatePath(self):
return "v2/modules/gsoc/accepted_orgs/_project_list.html"
class AcceptedOrgsPage(RequestHandler):
"""View for the accepted organizations page.
"""
def templatePath(self):
return 'v2/modules/gsoc/accepted_orgs/base.html'
def djangoURLPatterns(self):
return [
url(r'^gsoc/accepted_orgs/%s$' % url_patterns.PROGRAM, self,
name='gsoc_accepted_orgs'),
url(r'gsoc/program/accepted_orgs/%s$' % url_patterns.PROGRAM, self),
]
def checkAccess(self):
self.check.acceptedOrgsAnnounced()
def jsonContext(self):
list_content = AcceptedOrgsList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation(
'You do not have access to this data')
return list_content.content()
def context(self):
return {
'page_name': "Accepted organizations for %s" % self.data.program.name,
'accepted_orgs_list': AcceptedOrgsList(self.request, self.data),
}
| #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the views for GSoC accepted orgs.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from django.conf.urls.defaults import url
from django.core.urlresolvers import reverse
from soc.logic.exceptions import AccessViolation
from soc.views.template import Template
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import lists
from soc.modules.gsoc.views.helper import url_patterns
from soc.modules.gsoc.logic.models.organization import logic as org_logic
class AcceptedOrgsList(Template):
"""Template for list of accepted organizations.
"""
def __init__(self, request, data):
self.request = request
self.data = data
r = data.redirect
list_config = lists.ListConfiguration()
list_config.addSimpleColumn('name', 'Name')
list_config.addSimpleColumn('link_id', 'Link ID', hidden=True)
list_config.setRowAction(
lambda e, *args, **kwargs: r.organization(e).urlOf('gsoc_org_home'))
list_config.addColumn('tags', 'Tags',
lambda e, *args, **kwargs: e.tags_string(e.org_tag))
self._list_config = list_config
def context(self):
description = 'List of organizations accepted into %s' % (
self.data.program.name)
list = lists.ListConfigurationResponse(self._list_config, 0, description)
return {
'lists': [list],
}
def getListData(self):
idx = lists.getListIndex(self.request)
if idx == 0:
fields = {'scope': self.data.program,
'status': ['active', 'inactive']}
response_builder = lists.QueryContentResponseBuilder(
self.request, self._list_config, org_logic, fields)
return response_builder.build()
else:
return None
def templatePath(self):
return "v2/modules/gsoc/accepted_orgs/_project_list.html"
class AcceptedOrgsPage(RequestHandler):
"""View for the accepted organizations page.
"""
def templatePath(self):
return 'v2/modules/gsoc/accepted_orgs/base.html'
def djangoURLPatterns(self):
return [
url(r'^gsoc/accepted_orgs/%s$' % url_patterns.PROGRAM, self,
name='gsoc_accepted_orgs')
]
def checkAccess(self):
self.check.acceptedOrgsAnnounced()
def jsonContext(self):
list_content = AcceptedOrgsList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation(
'You do not have access to this data')
return list_content.content()
def context(self):
return {
'page_name': "Accepted organizations for %s" % self.data.program.name,
'accepted_orgs_list': AcceptedOrgsList(self.request, self.data),
}
| Python | 0 |
e04b71d4fed675e3d8333e59ecf1df5a67ce42ac | remove martor app from django | oeplatform/settings.py | oeplatform/settings.py | """
Django settings for oeplatform project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
try:
from .securitysettings import *
except:
raise Exception("No securitysettings found")
try:
from .martor_settings import *
except:
raise Exception("No martor_settings found")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sessions.backends.signed_cookies",
"bootstrap4",
"rest_framework",
"rest_framework.authtoken",
"modelview",
"modelview.templatetags.modelview_extras",
"login",
"base",
"base.templatetags.base_tags",
"widget_tweaks",
"dataedit",
"colorfield",
"literature",
"api",
"ontology",
"axes",
"captcha",
"django.contrib.postgres",
"fontawesome_5",
"tutorials",
)
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"login.middleware.DetachMiddleware",
"axes.middleware.AxesMiddleware",
)
ROOT_URLCONF = "oeplatform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "oeplatform.wsgi.application"
ONTOLOGY_FOLDER = "/tmp"
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Europe/Berlin"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
AUTH_USER_MODEL = "login.myuser"
LOGIN_URL = "/user/login"
LOGIN_REDIRECT_URL = "/"
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
)
}
| """
Django settings for oeplatform project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
try:
from .securitysettings import *
except:
raise Exception("No securitysettings found")
try:
from .martor_settings import *
except:
raise Exception("No martor_settings found")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sessions.backends.signed_cookies",
"bootstrap4",
"rest_framework",
"rest_framework.authtoken",
"modelview",
"modelview.templatetags.modelview_extras",
"login",
"base",
"base.templatetags.base_tags",
"widget_tweaks",
"dataedit",
"colorfield",
"literature",
"api",
"ontology",
"axes",
"captcha",
"django.contrib.postgres",
"fontawesome_5",
"tutorials",
"martor"
)
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"login.middleware.DetachMiddleware",
"axes.middleware.AxesMiddleware",
)
ROOT_URLCONF = "oeplatform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "oeplatform.wsgi.application"
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Europe/Berlin"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
AUTH_USER_MODEL = "login.myuser"
LOGIN_URL = "/user/login"
LOGIN_REDIRECT_URL = "/"
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
)
}
| Python | 0.000001 |
b6c78bc88b53e2cfbda4ef4d337ff1971f805051 | Change enum ordering | paperwork_parser/base.py | paperwork_parser/base.py | import inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
TEXT = 1
NUMBER = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
| import inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
NUMBER = 1
TEXT = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
| Python | 0.000001 |
35bc179c6e6c7c8d9230de8da0672a106a372954 | Install plugins using package. | testcases/cloud_admin/run_sos_report.py | testcases/cloud_admin/run_sos_report.py | #!/usr/bin/python
import os
import time
from eucaops import Eucaops
from eutester.eutestcase import EutesterTestCase
from eutester.machine import Machine
class SampleTest(EutesterTestCase):
def __init__(self):
self.setuptestcase()
self.setup_parser()
self.start_time = self.ticket_number = int(time.time())
self.parser.add_argument("--remote-dir", default="/root/euca-sosreport-" + str(self.start_time) + "/")
self.parser.add_argument("--local-dir", default=os.getcwd())
self.parser.add_argument("--package-url", default="http://mongo.beldurnik.com/RPMS/eucalyptus-sos-plugins-0.1-0.el6.noarch.rpm")
self.get_args()
# Setup basic eutester object
self.tester = Eucaops( config_file=self.args.config,password=self.args.password)
def clean_method(self):
pass
def Install(self):
"""
This is where the test description goes
"""
for machine in self.tester.get_component_machines():
assert isinstance(machine, Machine)
machine.install("sos")
machine.sys("yum install -y " + self.args.package_url)
def Run(self):
for machine in self.tester.get_component_machines():
assert isinstance(machine, Machine)
machine.sys("mkdir -p " + self.args.remote_dir)
machine.sys("sosreport --batch --tmp-dir " + self.args.remote_dir + " --ticket-number " + str(self.ticket_number),code=0)
def Download(self):
for machine in self.tester.get_component_machines():
assert isinstance(machine, Machine)
remote_tarball_path = machine.sys("ls -1 " + self.args.remote_dir + "*" + str(self.ticket_number) + "*.xz", code=0)[0]
tarball = remote_tarball_path.split("/")[-1]
local_tarball_path = self.args.local_dir + '/' + tarball
self.tester.debug("Downloading file to: " + local_tarball_path)
machine.sftp.get(remote_tarball_path, local_tarball_path)
def RunAll(self):
self.Install()
self.Run()
self.Download()
if __name__ == "__main__":
testcase = SampleTest()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list
list = testcase.args.tests or ["RunAll"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result) | #!/usr/bin/python
import os
import time
from eucaops import Eucaops
from eutester.eutestcase import EutesterTestCase
from eutester.machine import Machine
class SampleTest(EutesterTestCase):
def __init__(self):
self.setuptestcase()
self.setup_parser()
self.start_time = self.ticket_number = int(time.time())
self.parser.add_argument("--remote-dir", default="/root/euca-sosreport-" + str(self.start_time) + "/")
self.parser.add_argument("--local-dir", default=os.getcwd())
self.parser.add_argument("--git-repo", default="https://github.com/risaacson/eucalyptus-sosreport-plugins.git")
self.get_args()
# Setup basic eutester object
self.tester = Eucaops( config_file=self.args.config,password=self.args.password)
def clean_method(self):
pass
def Install(self):
"""
This is where the test description goes
"""
for machine in self.tester.get_component_machines():
assert isinstance(machine, Machine)
machine.install("sos")
machine.install("git")
machine.sys("git clone " + self.args.git_repo)
machine.sys("cp /root/eucalyptus-sosreport-plugins/sos/plugins/euca*.py /usr/lib/python2.6/site-packages/sos/plugins/")
def Run(self):
for machine in self.tester.get_component_machines():
assert isinstance(machine, Machine)
machine.sys("mkdir -p " + self.args.remote_dir)
machine.sys("sosreport --batch --tmp-dir " + self.args.remote_dir + " --ticket-number " + str(self.ticket_number),code=0)
def Download(self):
for machine in self.tester.get_component_machines():
assert isinstance(machine, Machine)
remote_tarball_path = machine.sys("ls -1 " + self.args.remote_dir + "*" + str(self.ticket_number) + "*.xz", code=0)[0]
tarball = remote_tarball_path.split("/")[-1]
local_tarball_path = self.args.local_dir + '/' + tarball
self.tester.debug("Downloading file to: " + local_tarball_path)
machine.sftp.get(remote_tarball_path, local_tarball_path)
def RunAll(self):
self.Install()
self.Run()
self.Download()
if __name__ == "__main__":
testcase = SampleTest()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list
list = testcase.args.tests or ["RunAll"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result) | Python | 0 |
10ba0ea095e4765a2d60751371f7dca8e36e2d18 | Fix infinite loop in grit headers clobbering script. | build/win/clobber_generated_headers.py | build/win/clobber_generated_headers.py | #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script helps workaround IncrediBuild problem on Windows.
# See http://crbug.com/17706.
import os
import sys
_SRC_PATH = os.path.join(os.path.dirname(__file__), '..', '..')
sys.path.append(os.path.join(_SRC_PATH, 'tools', 'grit'))
import grit.grd_reader
# We need to apply the workaround only on Windows.
if os.name != 'nt':
sys.exit(0)
def total_split(path):
components = []
while path:
head, tail = os.path.split(path)
if not tail:
break
components.append(tail)
path = head
return list(reversed(components))
for path in sys.argv[1:]:
path = os.path.join('src', path)
path_components = total_split(path)
root = grit.grd_reader.Parse(path)
output_files = [node.GetOutputFilename() for node in root.GetOutputFiles()]
output_headers = [file for file in output_files if file.endswith('.h')]
for build_type in ('Debug', 'Release'):
build_path = os.path.join(_SRC_PATH, 'chrome', build_type)
# We guess target file output based on path of the grd file (the first
# path component after 'src').
intermediate_path = os.path.join(build_path, 'obj',
'global_intermediate', path_components[1])
for header in output_headers:
full_path = os.path.join(intermediate_path, header)
try:
os.remove(full_path)
print 'Clobbered ' + full_path
except OSError:
print 'Could not remove ' + full_path + '. Continuing.'
| #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script helps workaround IncrediBuild problem on Windows.
# See http://crbug.com/17706.
import os
import sys
_SRC_PATH = os.path.join(os.path.dirname(__file__), '..', '..')
sys.path.append(os.path.join(_SRC_PATH, 'tools', 'grit'))
import grit.grd_reader
# We need to apply the workaround only on Windows.
if os.name != 'nt':
sys.exit(0)
def total_split(path):
components = []
while path:
head, tail = os.path.split(path)
components.append(tail)
path = head
return list(reversed(components))
for path in sys.argv[1:]:
path = os.path.join('src', path)
path_components = total_split(path)
root = grit.grd_reader.Parse(path)
output_files = [node.GetOutputFilename() for node in root.GetOutputFiles()]
output_headers = [file for file in output_files if file.endswith('.h')]
for build_type in ('Debug', 'Release'):
build_path = os.path.join(_SRC_PATH, 'chrome', build_type)
# We guess target file output based on path of the grd file (the first
# path component after 'src').
intermediate_path = os.path.join(build_path, 'obj',
'global_intermediate', path_components[1])
for header in output_headers:
full_path = os.path.join(intermediate_path, header)
try:
os.remove(full_path)
print 'Clobbered ' + full_path
except OSError:
print 'Could not remove ' + full_path + '. Continuing.' | Python | 0.000029 |
e71870736959efcde2188bdcbd89838b67ca8582 | Add AbstractSanitizer/AbstractValidator class to import path | pathvalidate/__init__.py | pathvalidate/__init__.py | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._base import AbstractSanitizer, AbstractValidator
from ._common import (
Platform,
ascii_symbols,
normalize_platform,
replace_ansi_escape,
replace_unprintable_char,
unprintable_ascii_chars,
validate_null_string,
validate_pathtype,
)
from ._filename import FileNameSanitizer, is_valid_filename, sanitize_filename, validate_filename
from ._filepath import (
FilePathSanitizer,
is_valid_filepath,
sanitize_file_path,
sanitize_filepath,
validate_file_path,
validate_filepath,
)
from ._ltsv import sanitize_ltsv_label, validate_ltsv_label
from ._symbol import replace_symbol, validate_symbol
from .error import (
ErrorReason,
InvalidCharError,
InvalidLengthError,
InvalidReservedNameError,
NullNameError,
ReservedNameError,
ValidationError,
ValidReservedNameError,
)
| """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._common import (
Platform,
ascii_symbols,
normalize_platform,
replace_ansi_escape,
replace_unprintable_char,
unprintable_ascii_chars,
validate_null_string,
validate_pathtype,
)
from ._filename import FileNameSanitizer, is_valid_filename, sanitize_filename, validate_filename
from ._filepath import (
FilePathSanitizer,
is_valid_filepath,
sanitize_file_path,
sanitize_filepath,
validate_file_path,
validate_filepath,
)
from ._ltsv import sanitize_ltsv_label, validate_ltsv_label
from ._symbol import replace_symbol, validate_symbol
from .error import (
ErrorReason,
InvalidCharError,
InvalidLengthError,
InvalidReservedNameError,
NullNameError,
ReservedNameError,
ValidationError,
ValidReservedNameError,
)
| Python | 0 |
42609dfaf39c09fa591ff1b40e23ab1795a6d7a5 | test fix | vitrage/tests/unit/datasources/test_alarm_transformer_base.py | vitrage/tests/unit/datasources/test_alarm_transformer_base.py | # Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.alarm_properties import AlarmProperties as AlarmProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.tests.unit.datasources.test_transformer_base import \
BaseTransformerTest
LOG = logging.getLogger(__name__)
# noinspection PyProtectedMember
class BaseAlarmTransformerTest(BaseTransformerTest):
def _validate_alarm_vertex_props(self,
vertex,
expected_name,
expected_datasource_name,
expected_sample_time):
self._validate_base_vertex_props(vertex,
expected_name,
expected_datasource_name)
self.assertEqual(EntityCategory.ALARM, vertex[VProps.CATEGORY])
self.assertEqual(expected_sample_time, vertex[VProps.SAMPLE_TIMESTAMP])
if self._is_erroneous(vertex):
self.assertEqual(AlarmProps.ACTIVE_STATE, vertex[VProps.STATE])
else:
self.assertEqual(AlarmProps.INACTIVE_STATE, vertex[VProps.STATE])
def _validate_host_neighbor(self,
wrapper,
alarm_id,
host_name):
self.assertEqual(1, len(wrapper.neighbors))
host_neighbor = wrapper.neighbors[0]
host_transformer = self.transformers[NOVA_HOST_DATASOURCE]
properties = {
VProps.ID: host_name,
VProps.TYPE: NOVA_HOST_DATASOURCE,
VProps.CATEGORY: EntityCategory.RESOURCE,
VProps.SAMPLE_TIMESTAMP: wrapper.vertex[VProps.SAMPLE_TIMESTAMP],
}
expected_neighbor = host_transformer. \
create_neighbor_placeholder_vertex(**properties)
self.assertEqual(expected_neighbor, host_neighbor.vertex)
# Validate neighbor edge
edge = host_neighbor.edge
self.assertEqual(edge.source_id, alarm_id)
self.assertEqual(edge.target_id, host_neighbor.vertex.vertex_id)
self.assertEqual(edge.label, EdgeLabel.ON)
def _validate_graph_action(self, wrapper):
if self._is_erroneous(wrapper.vertex):
self.assertEqual(GraphAction.UPDATE_ENTITY, wrapper.action)
else:
self.assertEqual(GraphAction.DELETE_ENTITY, wrapper.action)
@abc.abstractmethod
def _is_erroneous(self, vertex):
pass
| # Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.alarm_properties import AlarmProperties as AlarmProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.tests.unit.datasources.test_transformer_base import \
BaseTransformerTest
LOG = logging.getLogger(__name__)
# noinspection PyProtectedMember
class BaseAlarmTransformerTest(BaseTransformerTest):
def _validate_alarm_vertex_props(self,
vertex,
expected_name,
expected_datasource_name,
expected_sample_time):
self._validate_base_vertex_props(vertex,
expected_name,
expected_datasource_name)
self.assertEqual(EntityCategory.ALARM, vertex[VProps.CATEGORY])
self.assertEqual(expected_sample_time, vertex[VProps.SAMPLE_TIMESTAMP])
if self._is_erroneous(vertex):
self.assertEqual(AlarmProps.ACTIVE_STATE, vertex[VProps.STATE])
else:
self.assertEqual(AlarmProps.INACTIVE_STATE, vertex[VProps.STATE])
def _validate_host_neighbor(self,
wrapper,
alarm_id,
host_name):
self.assertEqual(1, len(wrapper.neighbors))
host_neighbor = wrapper.neighbors[0]
host_transformer = self.transformers[NOVA_HOST_DATASOURCE]
properties = {
VProps.ID: host_name,
VProps.TYPE: NOVA_HOST_DATASOURCE,
VProps.SAMPLE_TIMESTAMP: wrapper.vertex[VProps.SAMPLE_TIMESTAMP],
}
expected_neighbor = host_transformer.\
create_placeholder_vertex(**properties)
self.assertEqual(expected_neighbor, host_neighbor.vertex)
# Validate neighbor edge
edge = host_neighbor.edge
self.assertEqual(edge.source_id, alarm_id)
self.assertEqual(edge.target_id, host_neighbor.vertex.vertex_id)
self.assertEqual(edge.label, EdgeLabel.ON)
def _validate_graph_action(self, wrapper):
if self._is_erroneous(wrapper.vertex):
self.assertEqual(GraphAction.UPDATE_ENTITY, wrapper.action)
else:
self.assertEqual(GraphAction.DELETE_ENTITY, wrapper.action)
@abc.abstractmethod
def _is_erroneous(self, vertex):
pass
| Python | 0.000006 |
8e1f573edb01aac1df45030182ab73d423914f8f | check if REDIS_URL exists before connect to redis brain | robot.py | robot.py | # coding: utf-8
from __future__ import unicode_literals
from gevent.monkey import patch_all
patch_all()
import gevent
import logging
from gevent.pool import Pool
from redis import StrictRedis
from importlib import import_module
from slackclient import SlackClient
from settings import APPS, SLACK_TOKEN, REDIS_URL
pool = Pool(20)
CMD_PREFIX = '!'
logger = logging.getLogger()
class RedisBrain(object):
def __init__(self):
self.redis = None
if REDIS_URL:
try:
self.redis = StrictRedis(host=REDIS_URL)
except Exception as e:
logger.error(e)
def set(self, key, value):
if self.redis:
self.redis.set(key, value)
return True
else:
return False
def get(self, key):
if self.redis:
return self.redis.get(key)
return None
def lpush(self, key, value):
if self.redis:
self.redis.lpush(key, value)
return True
else:
return False
def lpop(self, key):
if self.redis:
return self.redis.lpop(key)
return None
def lindex(self, key):
if self.redis:
return self.redis.lindex(key)
return None
class Robot(object):
def __init__(self):
self.client = SlackClient(SLACK_TOKEN)
self.brain = RedisBrain()
self.apps, self.docs = self.load_apps()
def load_apps(self):
docs = ['='*14, 'Usage', '='*14]
apps = {}
for name in APPS:
app = import_module('apps.%s' % name)
docs.append(
'!%s: %s' % (', '.join(app.run.commands), app.run.__doc__)
)
for command in app.run.commands:
apps[command] = app
return apps, docs
def handle_messages(self, messages):
for channel, user, text in messages:
command, payloads = self.extract_command(text)
if not command:
continue
app = self.apps.get(command, None)
if not app:
continue
pool.apply_async(
func=app.run, args=(self, channel, user, payloads)
)
def extract_messages(self, events):
messages = []
for event in events:
channel = event.get('channel', '')
user = event.get('user', '')
text = event.get('text', '')
if channel and user and text:
messages.append((channel, user, text))
return messages
def extract_command(self, text):
if CMD_PREFIX != text[0]:
return (None, None)
tokens = text.split(' ', 1)
if 1 < len(tokens):
return tokens[0][1:], tokens[1]
else:
return (text[1:], '')
def rtm_connect(self):
conn = None
try:
conn = self.client.rtm_connect()
except Exception as e:
logger.error(e)
return conn
def read_message(self):
events = None
try:
events = self.client.rtm_read()
except Exception as e:
logger.error(e)
return events
def run(self):
if not self.rtm_connect():
raise RuntimeError(
'Can not connect to slack client. Check your settings.'
)
while True:
events = self.read_message()
if events:
messages = self.extract_messages(events)
self.handle_messages(messages)
gevent.sleep(0.3)
if '__main__' == __name__:
robot = Robot()
robot.run()
| # coding: utf-8
from __future__ import unicode_literals
from gevent.monkey import patch_all
patch_all()
import gevent
import logging
from gevent.pool import Pool
from redis import StrictRedis
from importlib import import_module
from slackclient import SlackClient
from settings import APPS, SLACK_TOKEN, REDIS_URL
pool = Pool(20)
CMD_PREFIX = '!'
logger = logging.getLogger()
class RedisBrain(object):
def __init__(self):
try:
self.redis = StrictRedis(host=REDIS_URL)
except Exception as e:
logger.error(e)
self.redis = None
def set(self, key, value):
if self.redis:
self.redis.set(key, value)
return True
else:
return False
def get(self, key):
if self.redis:
return self.redis.get(key)
return None
def lpush(self, key, value):
if self.redis:
self.redis.lpush(key, value)
return True
else:
return False
def lpop(self, key):
if self.redis:
return self.redis.lpop(key)
return None
def lindex(self, key):
if self.redis:
return self.redis.lindex(key)
return None
class Robot(object):
def __init__(self):
self.client = SlackClient(SLACK_TOKEN)
self.brain = RedisBrain()
self.apps, self.docs = self.load_apps()
def load_apps(self):
docs = ['='*14, 'Usage', '='*14]
apps = {}
for name in APPS:
app = import_module('apps.%s' % name)
docs.append(
'!%s: %s' % (', '.join(app.run.commands), app.run.__doc__)
)
for command in app.run.commands:
apps[command] = app
return apps, docs
def handle_messages(self, messages):
for channel, user, text in messages:
command, payloads = self.extract_command(text)
if not command:
continue
app = self.apps.get(command, None)
if not app:
continue
pool.apply_async(
func=app.run, args=(self, channel, user, payloads)
)
def extract_messages(self, events):
messages = []
for event in events:
channel = event.get('channel', '')
user = event.get('user', '')
text = event.get('text', '')
if channel and user and text:
messages.append((channel, user, text))
return messages
def extract_command(self, text):
if CMD_PREFIX != text[0]:
return (None, None)
tokens = text.split(' ', 1)
if 1 < len(tokens):
return tokens[0][1:], tokens[1]
else:
return (text[1:], '')
def rtm_connect(self):
conn = None
try:
conn = self.client.rtm_connect()
except Exception as e:
logger.error(e)
return conn
def read_message(self):
events = None
try:
events = self.client.rtm_read()
except Exception as e:
logger.error(e)
return events
def run(self):
if not self.rtm_connect():
raise RuntimeError(
'Can not connect to slack client. Check your settings.'
)
while True:
events = self.read_message()
if events:
messages = self.extract_messages(events)
self.handle_messages(messages)
gevent.sleep(0.3)
if '__main__' == __name__:
robot = Robot()
robot.run()
| Python | 0 |
395617afca4d242de12e2a75a3ae7d2a258f75a7 | use template string | paystackapi/constants.py | paystackapi/constants.py | """Script used to define constants used across codebase."""
PAYSTACK_SECRET_KEY = 'sk_test_0a246ef179dc841f42d20959bebdd790f69605d8'
HEADERS = {'Authorization': 'Bearer {}'}
API_URL = 'https://api.paystack.co/'
| """Script used to define constants used across codebase."""
PAYSTACK_SECRET_KEY = 'sk_test_0a246ef179dc841f42d20959bebdd790f69605d8'
HEADERS = {'Authorization': 'Bearer ' + PAYSTACK_SECRET_KEY}
API_URL = 'https://api.paystack.co/'
| Python | 0.000001 |
39beb9cbb3d0158dab58787cbe95651c8ec66db9 | Bump up minor version. | patroni/version.py | patroni/version.py | __version__ = '0.76'
| __version__ = '0.75'
| Python | 0 |
d0568b2c132ebe2cdf1f656ee96442a0888257cd | add NSecurity class | CorpFin/Security.py | CorpFin/Security.py | from HelpyFuncs.SymPy import sympy_theanify
class Security:
def __init__(self, label='', bs_val=0., val=0.):
self.label = label
self.bs_val_expr = bs_val
self.bs_val = sympy_theanify(bs_val)
self.val_expr = val
self.val = sympy_theanify(val)
def __call__(self, **kwargs):
if self.label:
s = ' "%s"' % self.label
else:
s = ''
return 'Security' + s + ': BS Val = %.3g, Val = %.3g' % (self.bs_val(**kwargs), self.val(**kwargs))
DOLLAR = Security(label='$', bs_val=1., val=1.)
class NSecurity:
def __init__(self, n=1, security=DOLLAR):
self.n = n
self.security = security
| from HelpyFuncs.SymPy import sympy_theanify
class Security:
def __init__(self, label='', bs_val=0., val=0.):
self.label = label
self.bs_val_expr = bs_val
self.bs_val = sympy_theanify(bs_val)
self.val_expr = val
self.val = sympy_theanify(val)
def __call__(self, **kwargs):
if self.label:
s = ' "%s"' % self.label
else:
s = ''
return 'Security' + s + ': BS Val = %.3g, Val = %.3g' % (self.bs_val(**kwargs), self.val(**kwargs))
DOLLAR = Security(label='$', bs_val=1., val=1.)
| Python | 0 |
99818f02ebc46debe349a6c1b6bba70be6e04968 | Update error message for no plugins | skimage/io/_plugins/null_plugin.py | skimage/io/_plugins/null_plugin.py | __all__ = ['imshow', 'imread', 'imsave', '_app_show']
import warnings
message = '''\
No plugin has been loaded. Please refer to the docstring for ``skimage.io``
for a list of available plugins. You may specify a plugin explicitly as
an argument to ``imread``, e.g. ``imread("image.jpg", plugin='pil')``.
'''
def imshow(*args, **kwargs):
warnings.warn(RuntimeWarning(message))
def imread(*args, **kwargs):
warnings.warn(RuntimeWarning(message))
def imsave(*args, **kwargs):
warnings.warn(RuntimeWarning(message))
_app_show = imshow
| __all__ = ['imshow', 'imread', 'imsave', '_app_show']
import warnings
message = '''\
No plugin has been loaded. Please refer to
skimage.io.plugins()
for a list of available plugins.'''
def imshow(*args, **kwargs):
warnings.warn(RuntimeWarning(message))
def imread(*args, **kwargs):
warnings.warn(RuntimeWarning(message))
def imsave(*args, **kwargs):
warnings.warn(RuntimeWarning(message))
_app_show = imshow
| Python | 0 |
265052b981e04afe4815e9dceafbb7f2b06d2b0c | disable script host key checking | king/name-server.py | king/name-server.py | from twisted.internet import reactor
from twisted.names import dns, client, server
from rpyc.utils.factory import ssh_connect
from plumbum import SshMachine
from threading import Thread
import argparse
parser = argparse.ArgumentParser(description='Central Name Server')
parser.add_argument('--full', default=False, action='store_true', help='This instance will act as the endpoint for integration testing')
arguments = parser.parse_args()
class DNSServerFactory(server.DNSServerFactory):
def handleQuery(self, message, protocol, address):
try:
query = message.queries[0]
target = query.name.name
print 'Target:', target
query_type = target.split('.')[0]
if query_type == 'ns1':
A = dns.RRHeader(name=target, type=dns.A, cls=dns.IN, ttl=0,
payload=dns.Record_A(address='54.244.114.147', ttl=None))
args = (self, ([A], [], []), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
elif query_type == 'ns2':
A = dns.RRHeader(name=target, type=dns.A, cls=dns.IN, ttl=0,
payload=dns.Record_A(address='54.244.114.167', ttl=None))
args = (self, ([A], [], []), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
else:
query_id = int(target.split('.')[1])
origin = target.split('.')[2].split('---')
origin_ns_name = '.'.join(origin[4:])
origin_ip = '.'.join(origin[:4])
target = '.'.join(target.split('.')[2:])
print query_type, origin_ip, origin_ns_name
if query_type == 'full' and arguments.full:
Thread(target=full_rpc, args=(origin_ip, query_id)).start()
NS = dns.RRHeader(name=target, type=dns.NS, cls=dns.IN, ttl=0, auth=True,
payload=dns.Record_NS(name=origin_ns_name, ttl=0))
A = dns.RRHeader(name=origin_ns_name, type=dns.A, cls=dns.IN, ttl=0,
payload=dns.Record_A(address=origin_ip, ttl=None))
ans = []
auth = [NS]
add = [A]
args = (self, (ans, auth, add), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
except Exception, e:
print "Bad Request", e
def full_rpc(origin_ip, query_id):
try:
rem = SshMachine(origin_ip, user='ucb_268_measure', keyfile='~/.ssh/id_rsa', ssh_opts=["StrictHostKeyChecking no", "-o UserKnownHostsFile=/dev/null"])
conn = ssh_connect(rem, 18861)
conn.root.exposed_full_response(query_id, 'End Point Reached')
except Exception, e:
print "Could not perform RPC"
factory = DNSServerFactory()
protocol = dns.DNSDatagramProtocol(factory)
reactor.listenUDP(53, protocol)
reactor.listenTCP(53, factory)
reactor.run()
| from twisted.internet import reactor
from twisted.names import dns, client, server
from rpyc.utils.factory import ssh_connect
from plumbum import SshMachine
from threading import Thread
import argparse
parser = argparse.ArgumentParser(description='Central Name Server')
parser.add_argument('--full', default=False, action='store_true', help='This instance will act as the endpoint for integration testing')
arguments = parser.parse_args()
class DNSServerFactory(server.DNSServerFactory):
def handleQuery(self, message, protocol, address):
try:
query = message.queries[0]
target = query.name.name
print 'Target:', target
query_type = target.split('.')[0]
if query_type == 'ns1':
A = dns.RRHeader(name=target, type=dns.A, cls=dns.IN, ttl=0,
payload=dns.Record_A(address='54.244.114.147', ttl=None))
args = (self, ([A], [], []), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
elif query_type == 'ns2':
A = dns.RRHeader(name=target, type=dns.A, cls=dns.IN, ttl=0,
payload=dns.Record_A(address='54.244.114.167', ttl=None))
args = (self, ([A], [], []), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
else:
query_id = int(target.split('.')[1])
origin = target.split('.')[2].split('---')
origin_ns_name = '.'.join(origin[4:])
origin_ip = '.'.join(origin[:4])
target = '.'.join(target.split('.')[2:])
print query_type, origin_ip, origin_ns_name
if query_type == 'full' and arguments.full:
Thread(target=full_rpc, args=(origin_ip, query_id)).start()
NS = dns.RRHeader(name=target, type=dns.NS, cls=dns.IN, ttl=0, auth=True,
payload=dns.Record_NS(name=origin_ns_name, ttl=0))
A = dns.RRHeader(name=origin_ns_name, type=dns.A, cls=dns.IN, ttl=0,
payload=dns.Record_A(address=origin_ip, ttl=None))
ans = []
auth = [NS]
add = [A]
args = (self, (ans, auth, add), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
except Exception, e:
print "Bad Request", e
def full_rpc(origin_ip, query_id):
try:
rem = SshMachine(origin_ip, user='ucb_268_measure', keyfile='~/.ssh/id_rsa')
conn = ssh_connect(rem, 18861)
conn.root.exposed_full_response(query_id, 'End Point Reached')
except Exception, e:
print "Could not perform RPC"
factory = DNSServerFactory()
protocol = dns.DNSDatagramProtocol(factory)
reactor.listenUDP(53, protocol)
reactor.listenTCP(53, factory)
reactor.run()
| Python | 0 |
c9170cb4c0d63a6dc75f0fa7ca76faa688a1678a | Make tags optional | ppb/forms.py | ppb/forms.py | from pinax.blog.forms import FIELDS, AdminPostForm
from pinax.blog.models import Post
from taggit.forms import TagField
FIELDS.append("tags")
class AdminPostTagsForm(AdminPostForm):
tags = TagField(required=False)
class Meta:
model = Post
fields = FIELDS
| from pinax.blog.forms import FIELDS, AdminPostForm
from pinax.blog.models import Post
from taggit.forms import TagField
FIELDS.append("tags")
class AdminPostTagsForm(AdminPostForm):
tags = TagField()
class Meta:
model = Post
fields = FIELDS
| Python | 0.000001 |
9ee9ba34e447e99c868fcb43d40ce905cebf5fb9 | Add list and define functions. | noah/noah.py | noah/noah.py | import json
class Noah(object):
def __init__(self, dictionary_file):
self.dictionary = json.load(dictionary_file)
def list(self):
return '\n'.join([entry['word'] for entry in self.dictionary])
def define(self, word):
entry = next((x for x in self.dictionary if x['word'] == word), None)
if not entry is None:
return '%s (%s)' % (entry['word'], entry['part_of_speech'])
def main():
with open('../dictionaries/english.json') as dictionary:
n = Noah(dictionary)
print n.list()
print n.define('aardvark')
if __name__ == '__main__':
main() | import json
class Noah(object):
pass | Python | 0 |
48e15ea8494d72ee2a4cb7d05b5ee5d626d581c5 | Add groups to serf inventory plugin | plugins/inventory/serf.py | plugins/inventory/serf.py | #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.python.org/pypi/serfclient
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.python.org/pypi/serfclient
from serfclient import SerfClient, EnvironmentConfig
try:
import json
except ImportError:
import simplejson as json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
| #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.python.org/pypi/serfclient
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import os
import sys
# https://pypi.python.org/pypi/serfclient
from serfclient import SerfClient, EnvironmentConfig
try:
import json
except ImportError:
import simplejson as json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
meta = get_meta(data)
print(json.dumps({_key: nodes, '_meta': meta}))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0 |
0dd2bd0a8d2b041672afdf66666df63e2dd1a044 | Add author friends url. | rest/urls.py | rest/urls.py | # Author: Braedy Kuzma
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^posts/$', views.PostsView.as_view(), name='posts'),
url(r'^posts/(?P<pid>[0-9a-fA-F\-]+)/$', views.PostView.as_view(),
name='post'),
url(r'^posts/(?P<pid>[0-9a-fA-F\-]+)/comments/$',
views.CommentView.as_view(), name='comments'),
url(r'^author/(?P<aid>[0-9a-fA-F\-]+)/$', views.AuthorView.as_view(),
name='author'),
url(r'^author/(?P<aid>[0-9a-fA-F\-]+)/friends/$',
views.AuthorFriendsView.as_view(), name='friends')
]
| # Author: Braedy Kuzma
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^posts/(?P<pid>[0-9a-fA-F\-]+)/$', views.PostView.as_view(),
name='post'),
url(r'^posts/$', views.PostsView.as_view(), name='posts'),
url(r'^author/(?P<aid>[0-9a-fA-F\-]+)/$', views.AuthorView.as_view(),
name='author'),
url(r'^posts/(?P<pid>[0-9a-fA-F\-]+)/comments/$',
views.CommentView.as_view(), name='comments')
]
| Python | 0 |
0cf7fda731a71524651de95821b444b5c554260e | Move initial_fetch() up | inferno-cli.py | inferno-cli.py | #!/usr/bin/env python3
import argparse
import collections
import datetime
import logging
import os
import re
import sys
import time
import bs4
import requests
import setproctitle
import util
class Shoutbox:
base_url = ""
inferno_url = ""
s = None
lines = []
read = collections.deque(maxlen=21)
def __init__(self, base_url, cookie={}, inferno_path="/infernoshout.php", base_path="/index.php"):
self.base_url = base_url
self.inferno_url = self.base_url + inferno_path
self.s = requests.Session()
self.s.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0",
"X-Requested-With": "XMLHttpRequest",
"Referer": self.base_url + base_path,
})
if cookie:
self.s.cookies.update(cookie)
def _parse(self, html):
MAGIC = "<<~!PARSE_SHOUT!~>>"
try:
active_users = util.atoi(html)
logging.info("%d active users" % active_users)
html = html[len(str(active_users)):]
except ValueError:
pass
if not html.startswith(MAGIC):
logging.warning("ignoring bogus html: %s" % html)
return
html = html.lstrip(MAGIC)
h = bs4.BeautifulSoup(html)
# put the full URL after the text inside the "a" tag
for a in h.find_all("a"):
if "href" not in a.attrs:
continue
if a['href'] == "#":
continue
a.string.replace_with("%s (%s)" % (a.string, a['href']))
for br in h.find_all("br"):
br.string = "\n"
chat = h.get_text()
# remove timestamps - they're relative, and thus they make read lines appear as unread when the day changes
chat = re.sub("^\[[^\]]*\] ", "", chat, flags=re.MULTILINE)
return chat
def _get(self):
params = {
"action": "getshouts",
"timestamp": datetime.datetime.now().strftime("%s200"),
}
try:
r = self.s.get(self.inferno_url, params=params)
return r.text
except requests.exceptions.ConnectionError as e:
logging.warn("connection error: %s" % e)
return ""
def initial_fetch(self):
self.update()
for i in self.lines:
self.read.append(i)
self.lines = []
def update(self):
l = self._parse(self._get()).rstrip("\n").split('\n')
self.lines.extend(l)
def print_new(self):
for i in self.lines:
if i not in self.read:
print(i, flush=True)
self.read.append(i)
else:
logging.debug("skipping line " + i)
self.lines = []
def main():
logging.basicConfig(level=logging.INFO)
os.environ["SPT_NOENV"] = "true"
setproctitle.setproctitle(sys.argv[0])
parser = argparse.ArgumentParser(description="Command line feed for Inferno Shoutbox")
parser.add_argument("-b", "--backlog", action="store_true", help="Display the backlog after connecting")
parser.add_argument("url", help="Base URL of the forum")
parser.add_argument("cookies", help="Cookies in the standard Cookie header format (RFC 6265, section 4.1.1)")
args = parser.parse_args()
s = Shoutbox(args.url, util.dict_from_cookie_str(args.cookies))
if args.backlog:
s.update()
s.print_new()
else:
s.initial_fetch()
while True:
time.sleep(5)
s.update()
s.print_new()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import argparse
import collections
import datetime
import logging
import os
import re
import sys
import time
import bs4
import requests
import setproctitle
import util
class Shoutbox:
base_url = ""
inferno_url = ""
s = None
lines = []
read = collections.deque(maxlen=21)
def __init__(self, base_url, cookie={}, inferno_path="/infernoshout.php", base_path="/index.php"):
self.base_url = base_url
self.inferno_url = self.base_url + inferno_path
self.s = requests.Session()
self.s.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0",
"X-Requested-With": "XMLHttpRequest",
"Referer": self.base_url + base_path,
})
if cookie:
self.s.cookies.update(cookie)
def _parse(self, html):
MAGIC = "<<~!PARSE_SHOUT!~>>"
try:
active_users = util.atoi(html)
logging.info("%d active users" % active_users)
html = html[len(str(active_users)):]
except ValueError:
pass
if not html.startswith(MAGIC):
logging.warning("ignoring bogus html: %s" % html)
return
html = html.lstrip(MAGIC)
h = bs4.BeautifulSoup(html)
# put the full URL after the text inside the "a" tag
for a in h.find_all("a"):
if "href" not in a.attrs:
continue
if a['href'] == "#":
continue
a.string.replace_with("%s (%s)" % (a.string, a['href']))
for br in h.find_all("br"):
br.string = "\n"
chat = h.get_text()
# remove timestamps - they're relative, and thus they make read lines appear as unread when the day changes
chat = re.sub("^\[[^\]]*\] ", "", chat, flags=re.MULTILINE)
return chat
def _get(self):
params = {
"action": "getshouts",
"timestamp": datetime.datetime.now().strftime("%s200"),
}
try:
r = self.s.get(self.inferno_url, params=params)
return r.text
except requests.exceptions.ConnectionError as e:
logging.warn("connection error: %s" % e)
return ""
def update(self):
l = self._parse(self._get()).rstrip("\n").split('\n')
self.lines.extend(l)
def print_new(self):
for i in self.lines:
if i not in self.read:
print(i, flush=True)
self.read.append(i)
else:
logging.debug("skipping line " + i)
self.lines = []
def initial_fetch(self):
self.update()
for i in self.lines:
self.read.append(i)
self.lines = []
def main():
logging.basicConfig(level=logging.INFO)
os.environ["SPT_NOENV"] = "true"
setproctitle.setproctitle(sys.argv[0])
parser = argparse.ArgumentParser(description="Command line feed for Inferno Shoutbox")
parser.add_argument("-b", "--backlog", action="store_true", help="Display the backlog after connecting")
parser.add_argument("url", help="Base URL of the forum")
parser.add_argument("cookies", help="Cookies in the standard Cookie header format (RFC 6265, section 4.1.1)")
args = parser.parse_args()
s = Shoutbox(args.url, util.dict_from_cookie_str(args.cookies))
if args.backlog:
s.update()
s.print_new()
else:
s.initial_fetch()
while True:
time.sleep(5)
s.update()
s.print_new()
if __name__ == "__main__":
main()
| Python | 0.000005 |
6fce2e52715f1a77edb19eca8b1133875fff3d34 | Set HearingViewSet read Only | kk/views/hearing.py | kk/views/hearing.py | import django_filters
from rest_framework import viewsets
from rest_framework import serializers
from rest_framework import filters
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from kk.models import Hearing
from .image import ImageFieldSerializer, ImageSerializer
class HearingFilter(django_filters.FilterSet):
next_closing = django_filters.DateTimeFilter(name='close_at', lookup_type='gt')
class Meta:
model = Hearing
fields = ['next_closing', ]
# Serializer for labels. Get label names instead of IDs.
class LabelSerializer(serializers.RelatedField):
def to_representation(self, value):
return value.label
class HearingSerializer(serializers.ModelSerializer):
labels = LabelSerializer(many=True, read_only=True)
images = ImageFieldSerializer(many=True, read_only=True)
class Meta:
model = Hearing
fields = ['abstract', 'heading', 'borough', 'n_comments', 'labels', 'close_at', 'created_at',
'latitude', 'longitude', 'servicemap_url', 'images']
class HearingViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint for hearings.
"""
queryset = Hearing.objects.all()
serializer_class = HearingSerializer
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
#ordering_fields = ('created_at',)
#ordering = ('-created_at',)
#filter_class = HearingFilter
def get_queryset(self):
next_closing = self.request.query_params.get('next_closing', None)
if next_closing is not None:
return self.queryset.filter(close_at__gt=next_closing).order_by('close_at')[:1]
return self.queryset.order_by('-created_at')
@detail_route(methods=['get'])
def images(self, request, pk=None):
hearing = self.get_object()
images = hearing.images.all()
page = self.paginate_queryset(images)
if page is not None:
serializer = ImageSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = ImageSerializer(images, many=True)
return Response(serializer.data)
# temporary for query debug purpose
def _list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
print(queryset.query)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
| import django_filters
from rest_framework import viewsets
from rest_framework import serializers
from rest_framework import filters
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from kk.models import Hearing
from .image import ImageFieldSerializer, ImageSerializer
class HearingFilter(django_filters.FilterSet):
next_closing = django_filters.DateTimeFilter(name='close_at', lookup_type='gt')
class Meta:
model = Hearing
fields = ['next_closing', ]
# Serializer for labels. Get label names instead of IDs.
class LabelSerializer(serializers.RelatedField):
def to_representation(self, value):
return value.label
class HearingSerializer(serializers.ModelSerializer):
labels = LabelSerializer(many=True, read_only=True)
images = ImageFieldSerializer(many=True, read_only=True)
class Meta:
model = Hearing
fields = ['abstract', 'heading', 'borough', 'n_comments', 'labels', 'close_at', 'created_at',
'latitude', 'longitude', 'servicemap_url', 'images']
class HearingViewSet(viewsets.ModelViewSet):
"""
API endpoint for hearings.
"""
queryset = Hearing.objects.all()
serializer_class = HearingSerializer
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
#ordering_fields = ('created_at',)
#ordering = ('-created_at',)
#filter_class = HearingFilter
def get_queryset(self):
next_closing = self.request.query_params.get('next_closing', None)
if next_closing is not None:
return self.queryset.filter(close_at__gt=next_closing).order_by('close_at')[:1]
return self.queryset.order_by('-created_at')
@detail_route(methods=['get'])
def images(self, request, pk=None):
hearing = self.get_object()
images = hearing.images.all()
page = self.paginate_queryset(images)
if page is not None:
serializer = ImageSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = ImageSerializer(images, many=True)
return Response(serializer.data)
# temporary for query debug purpose
def _list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
print(queryset.query)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
| Python | 0 |
b0fa9031b4eabd33a6c6f8f27e22351b14e1eeee | Set a new primary avatar when deleting the primary avatar. | avatar/views.py | avatar/views.py | import os.path
from avatar.models import Avatar, avatar_file_path
from avatar.forms import PrimaryAvatarForm, DeleteAvatarForm
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
def _get_next(request):
"""
The part that's the least straightforward about views in this module is how they
determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the following order:
1. If there is a variable named ``next`` in the *POST* parameters, the view will
redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters, the view will
redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers, the view will
redirect to that previous page.
"""
next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next
def change(request, extra_context={}, next_override=None):
avatars = Avatar.objects.filter(user=request.user).order_by('-primary')
if avatars.count() > 0:
avatar = avatars[0]
kwargs = {'initial': {'choice': avatar.id}}
else:
avatar = None
kwargs = {}
primary_avatar_form = PrimaryAvatarForm(request.POST or None, user=request.user, **kwargs)
if request.method == "POST":
if 'avatar' in request.FILES:
path = avatar_file_path(user=request.user,
filename=request.FILES['avatar'].name)
avatar = Avatar(
user = request.user,
primary = True,
avatar = path,
)
new_file = avatar.avatar.storage.save(path, request.FILES['avatar'])
avatar.save()
request.user.message_set.create(
message=_("Successfully uploaded a new avatar."))
if 'choice' in request.POST and primary_avatar_form.is_valid():
avatar = Avatar.objects.get(id=
primary_avatar_form.cleaned_data['choice'])
avatar.primary = True
avatar.save()
request.user.message_set.create(
message=_("Successfully updated your avatar."))
return HttpResponseRedirect(next_override or _get_next(request))
return render_to_response(
'avatar/change.html',
extra_context,
context_instance = RequestContext(
request,
{ 'avatar': avatar,
'avatars': avatars,
'primary_avatar_form': primary_avatar_form,
'next': next_override or _get_next(request), }
)
)
change = login_required(change)
def delete(request, extra_context={}, next_override=None):
avatars = Avatar.objects.filter(user=request.user).order_by('-primary')
if avatars.count() > 0:
avatar = avatars[0]
else:
avatar = None
delete_avatar_form = DeleteAvatarForm(request.POST or None, user=request.user)
if request.method == 'POST':
if delete_avatar_form.is_valid():
ids = delete_avatar_form.cleaned_data['choices']
if unicode(avatar.id) in ids and avatars.count() > len(ids):
for a in avatars:
if unicode(a.id) not in ids:
a.primary = True
a.save()
break
Avatar.objects.filter(id__in=ids).delete()
request.user.message_set.create(
message=_("Successfully deleted the requested avatars."))
return HttpResponseRedirect(next_override or _get_next(request))
return render_to_response(
'avatar/confirm_delete.html',
extra_context,
context_instance = RequestContext(
request,
{ 'avatar': avatar,
'avatars': avatars,
'delete_avatar_form': delete_avatar_form,
'next': next_override or _get_next(request), }
)
)
change = login_required(change)
| import os.path
from avatar.models import Avatar, avatar_file_path
from avatar.forms import PrimaryAvatarForm, DeleteAvatarForm
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
def _get_next(request):
"""
The part that's the least straightforward about views in this module is how they
determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the following order:
1. If there is a variable named ``next`` in the *POST* parameters, the view will
redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters, the view will
redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers, the view will
redirect to that previous page.
"""
next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next
def change(request, extra_context={}, next_override=None):
avatars = Avatar.objects.filter(user=request.user).order_by('-primary')
if avatars.count() > 0:
avatar = avatars[0]
kwargs = {'initial': {'choice': avatar.id}}
else:
avatar = None
kwargs = {}
primary_avatar_form = PrimaryAvatarForm(request.POST or None, user=request.user, **kwargs)
if request.method == "POST":
if 'avatar' in request.FILES:
path = avatar_file_path(user=request.user,
filename=request.FILES['avatar'].name)
avatar = Avatar(
user = request.user,
primary = True,
avatar = path,
)
new_file = avatar.avatar.storage.save(path, request.FILES['avatar'])
avatar.save()
request.user.message_set.create(
message=_("Successfully uploaded a new avatar."))
if 'choice' in request.POST and primary_avatar_form.is_valid():
avatar = Avatar.objects.get(id=
primary_avatar_form.cleaned_data['choice'])
avatar.primary = True
avatar.save()
request.user.message_set.create(
message=_("Successfully updated your avatar."))
return HttpResponseRedirect(next_override or _get_next(request))
return render_to_response(
'avatar/change.html',
extra_context,
context_instance = RequestContext(
request,
{ 'avatar': avatar,
'avatars': avatars,
'primary_avatar_form': primary_avatar_form,
'next': next_override or _get_next(request), }
)
)
change = login_required(change)
def delete(request, extra_context={}, next_override=None):
avatars = Avatar.objects.filter(user=request.user).order_by('-primary')
if avatars.count() > 0:
avatar = avatars[0]
else:
avatar = None
delete_avatar_form = DeleteAvatarForm(request.POST or None, user=request.user)
if request.method == 'POST':
if delete_avatar_form.is_valid():
ids = delete_avatar_form.cleaned_data['choices']
Avatar.objects.filter(id__in=ids).delete()
request.user.message_set.create(
message=_("Successfully deleted the requested avatars."))
return HttpResponseRedirect(next_override or _get_next(request))
return render_to_response(
'avatar/confirm_delete.html',
extra_context,
context_instance = RequestContext(
request,
{ 'avatar': avatar,
'avatars': avatars,
'delete_avatar_form': delete_avatar_form,
'next': next_override or _get_next(request), }
)
)
change = login_required(change)
| Python | 0 |
829ddcdf0ceff4f43cf871b7438170d4e4971a70 | Fix cyclomatic complexity problem in exception handling | surveymonkey/exceptions.py | surveymonkey/exceptions.py | # -*- coding: utf-8 -*-
class SurveyMonkeyException(Exception):
def __init__(self, response):
data = response.json()
super(SurveyMonkeyException, self).__init__(data["error"]["message"])
self.status_code = response.status_code
self.error_code = data["error"]["id"]
class SurveyMonkeyBadRequest(SurveyMonkeyException):
pass
class SurveyMonkeyAuthorizationError(SurveyMonkeyException):
pass
class SurveyMonkeyPermissionError(SurveyMonkeyException):
pass
class SurveyMonkeyResourceNotFound(SurveyMonkeyException):
pass
class SurveyMonkeyResourceConflict(SurveyMonkeyException):
pass
class SurveyMonkeyRequestEntityTooLarge(SurveyMonkeyException):
pass
class SurveyMonkeyInternalServerError(SurveyMonkeyException):
pass
class SurveyMonkeyUserSoftDeleted(SurveyMonkeyException):
pass
class SurveyMonkeyUserDeleted(SurveyMonkeyException):
pass
def response_raises(response):
def _not_found(response):
if response.json()["error"]["id"] == "1052":
return SurveyMonkeyUserSoftDeleted
else:
return SurveyMonkeyResourceNotFound
def _client_error(code):
return {
400: SurveyMonkeyBadRequest,
401: SurveyMonkeyAuthorizationError,
403: SurveyMonkeyPermissionError,
409: SurveyMonkeyResourceConflict,
413: SurveyMonkeyRequestEntityTooLarge,
410: SurveyMonkeyUserDeleted
}.get(code)
def _server_error(code):
return {
500: SurveyMonkeyInternalServerError,
503: SurveyMonkeyInternalServerError
}.get(code)
code = response.status_code
if code == 200:
return
elif code == 404:
exception = _not_found(response)
elif 400 <= code <= 499:
exception = _client_error(code)
elif 500 <= code <= 599:
exception = _server_error(code)
raise exception(response)
| # -*- coding: utf-8 -*-
class SurveyMonkeyException(Exception):
def __init__(self, response):
data = response.json()
super(SurveyMonkeyException, self).__init__(data["error"]["message"])
self.status_code = response.status_code
self.error_code = data["error"]["id"]
class SurveyMonkeyBadRequest(SurveyMonkeyException):
pass
class SurveyMonkeyAuthorizationError(SurveyMonkeyException):
pass
class SurveyMonkeyPermissionError(SurveyMonkeyException):
pass
class SurveyMonkeyResourceNotFound(SurveyMonkeyException):
pass
class SurveyMonkeyResourceConflict(SurveyMonkeyException):
pass
class SurveyMonkeyRequestEntityTooLarge(SurveyMonkeyException):
pass
class SurveyMonkeyInternalServerError(SurveyMonkeyException):
pass
class SurveyMonkeyUserSoftDeleted(SurveyMonkeyException):
pass
class SurveyMonkeyUserDeleted(SurveyMonkeyException):
pass
def response_raises(response):
if response.status_code == 200:
return
elif response.status_code == 400:
raise SurveyMonkeyBadRequest(response)
elif response.status_code == 401:
raise SurveyMonkeyAuthorizationError(response)
elif response.status_code == 403:
raise SurveyMonkeyPermissionError(response)
elif response.status_code == 404:
if response.json()["error"]["id"] == "1052":
raise SurveyMonkeyUserSoftDeleted(response)
else:
raise SurveyMonkeyResourceNotFound(response)
elif response.status_code == 409:
raise SurveyMonkeyResourceConflict(response)
elif response.status_code == 413:
raise SurveyMonkeyRequestEntityTooLarge(response)
elif response.status_code in [500, 503]:
raise SurveyMonkeyInternalServerError(response)
elif response.status_code == 410:
raise SurveyMonkeyUserDeleted(response)
| Python | 0.000041 |
954c06d2715090e15dbe9a76dffb0eeabda06a48 | make flake8 happy | bids/grabbids/__init__.py | bids/grabbids/__init__.py | from .bids_layout import BIDSLayout
__all__ = ["BIDSLayout"]
| __all__ = ["bids_layout"]
| Python | 0 |
31c5071203fa234521cb8d3270f0c0f75488934d | Add test for IX prefixes. | peeringdb/tests.py | peeringdb/tests.py | from __future__ import unicode_literals
from django.test import TestCase
from django.utils import timezone
from .api import PeeringDB
from .models import Network, NetworkIXLAN
class PeeringDBTestCase(TestCase):
def test_time_last_sync(self):
api = PeeringDB()
# Test when no sync has been done
self.assertEqual(api.get_last_sync_time(), 0)
# Test of sync record with no objects
time_of_sync = timezone.now()
api.record_last_sync(
time_of_sync, {'added': 0, 'updated': 0, 'deleted': 0})
self.assertEqual(api.get_last_sync_time(), 0)
# Test of sync record with one object
time_of_sync = timezone.now()
api.record_last_sync(
time_of_sync, {'added': 1, 'updated': 0, 'deleted': 0})
self.assertEqual(api.get_last_sync_time(),
int(time_of_sync.timestamp()))
def test_get_autonomous_system(self):
api = PeeringDB()
asn = 15169
# Using an API call (no cached data)
autonomous_system = api.get_autonomous_system(asn)
self.assertEqual(autonomous_system.asn, asn)
# Save the data inside the cache
details = {
'id': autonomous_system.id,
'asn': autonomous_system.asn,
'name': autonomous_system.name,
}
network = Network(**details)
network.save()
# Using no API calls (cached data)
autonomous_system = api.get_autonomous_system(asn)
self.assertEqual(autonomous_system.asn, asn)
def test_get_ix_network(self):
api = PeeringDB()
ix_network_id = 29146
# Using an API call (no cached data)
ix_network = api.get_ix_network(ix_network_id)
self.assertEqual(ix_network.id, ix_network_id)
# Save the data inside the cache
details = {
'id': ix_network.id,
'asn': ix_network.asn,
'name': ix_network.name,
'ix_id': ix_network.ix_id,
'ixlan_id': ix_network.ixlan_id,
}
network_ixlan = NetworkIXLAN(**details)
network_ixlan.save()
# Using no API calls (cached data)
ix_network = api.get_ix_network(ix_network_id)
self.assertEqual(ix_network.id, ix_network_id)
def test_get_ix_networks_for_asn(self):
api = PeeringDB()
asn = 29467
known_ix_networks = [29146, 15321, 24292, 14658,
15210, 16774, 14657, 23162, 14659, 17707, 27863]
found_ix_networks = []
ix_networks = api.get_ix_networks_for_asn(asn)
for ix_network in ix_networks:
found_ix_networks.append(ix_network.id)
self.assertEqual(sorted(found_ix_networks), sorted(known_ix_networks))
def test_get_prefixes_for_ix_network(self):
api = PeeringDB()
ix_network_id = 29146
known_prefixes = ['2001:7f8:1::/64', '80.249.208.0/21']
found_prefixes = []
ix_prefixes = api.get_prefixes_for_ix_network(ix_network_id)
for ix_prefix in ix_prefixes:
found_prefixes.append(ix_prefix['prefix'])
self.assertEqual(sorted(found_prefixes), sorted(known_prefixes))
| from __future__ import unicode_literals
from django.test import TestCase
from django.utils import timezone
from .api import PeeringDB
from .models import Network, NetworkIXLAN
class PeeringDBTestCase(TestCase):
def test_time_last_sync(self):
api = PeeringDB()
# Test when no sync has been done
self.assertEqual(api.get_last_sync_time(), 0)
# Test of sync record with no objects
time_of_sync = timezone.now()
api.record_last_sync(
time_of_sync, {'added': 0, 'updated': 0, 'deleted': 0})
self.assertEqual(api.get_last_sync_time(), 0)
# Test of sync record with one object
time_of_sync = timezone.now()
api.record_last_sync(
time_of_sync, {'added': 1, 'updated': 0, 'deleted': 0})
self.assertEqual(api.get_last_sync_time(),
int(time_of_sync.timestamp()))
def test_get_autonomous_system(self):
api = PeeringDB()
asn = 15169
# Using an API call (no cached data)
autonomous_system = api.get_autonomous_system(asn)
self.assertEqual(autonomous_system.asn, asn)
# Save the data inside the cache
details = {
'id': autonomous_system.id,
'asn': autonomous_system.asn,
'name': autonomous_system.name,
}
network = Network(**details)
network.save()
# Using no API calls (cached data)
autonomous_system = api.get_autonomous_system(asn)
self.assertEqual(autonomous_system.asn, asn)
def test_get_ix_network(self):
api = PeeringDB()
ix_network_id = 29146
# Using an API call (no cached data)
ix_network = api.get_ix_network(ix_network_id)
self.assertEqual(ix_network.id, ix_network_id)
# Save the data inside the cache
details = {
'id': ix_network.id,
'asn': ix_network.asn,
'name': ix_network.name,
'ix_id': ix_network.ix_id,
'ixlan_id': ix_network.ixlan_id,
}
network_ixlan = NetworkIXLAN(**details)
network_ixlan.save()
# Using no API calls (cached data)
ix_network = api.get_ix_network(ix_network_id)
self.assertEqual(ix_network.id, ix_network_id)
def test_get_ix_networks_for_asn(self):
api = PeeringDB()
asn = 29467
known_ix_networks = [29146, 15321, 24292, 14658,
15210, 16774, 14657, 23162, 14659, 17707, 27863]
found_ix_networks = []
ix_networks = api.get_ix_networks_for_asn(asn)
for ix_network in ix_networks:
found_ix_networks.append(ix_network.id)
self.assertEqual(sorted(found_ix_networks), sorted(known_ix_networks))
| Python | 0 |
93eb1fb058629f25f919a9c5f3647702c2767b22 | test parsing nested rules and toplevel imports | peru/test/test_parser.py | peru/test/test_parser.py | from textwrap import dedent
import unittest
from peru.parser import parse_string
from peru.remote_module import RemoteModule
from peru.rule import Rule
class ParserTest(unittest.TestCase):
def test_parse_empty_file(self):
scope, local_module = parse_string("")
self.assertDictEqual(scope, {})
self.assertDictEqual(local_module.imports, {})
def test_parse_rule(self):
input = dedent("""\
rule foo:
build: echo hi
export: out/
""")
scope, local_module = parse_string(input)
self.assertIn("foo", scope)
rule = scope["foo"]
self.assertIsInstance(rule, Rule)
self.assertEqual(rule.name, "foo")
self.assertEqual(rule.build_command, "echo hi")
self.assertEqual(rule.export, "out/")
def test_parse_module(self):
input = dedent("""\
git module foo:
url: http://www.example.com/
rev: abcdefg
imports:
wham: bam/
thank: you/maam
""")
scope, local_module = parse_string(input)
self.assertIn("foo", scope)
module = scope["foo"]
self.assertIsInstance(module, RemoteModule)
self.assertEqual(module.name, "foo")
self.assertDictEqual(module.imports,
{"wham": "bam/",
"thank": "you/maam"})
self.assertDictEqual(module.plugin_fields,
{"url": "http://www.example.com/",
"rev": "abcdefg"})
def test_parse_nested_rule(self):
input = dedent("""\
git module bar:
rule baz:
""")
scope, local_module = parse_string(input)
self.assertIn("bar", scope)
module = scope["bar"]
self.assertIsInstance(module, RemoteModule)
self.assertIn("bar.baz", scope)
rule = scope["bar.baz"]
self.assertIsInstance(rule, Rule)
def test_parse_toplevel_imports(self):
input = dedent("""\
imports:
foo: bar/
""")
scope, local_module = parse_string(input)
self.assertDictEqual(scope, {})
self.assertDictEqual(local_module.imports, {"foo": "bar/"})
| from textwrap import dedent
import unittest
from peru.parser import parse_string
from peru.remote_module import RemoteModule
from peru.rule import Rule
class ParserTest(unittest.TestCase):
def test_parse_empty_file(self):
scope, local_module = parse_string("")
self.assertDictEqual(scope, {})
self.assertDictEqual(local_module.imports, {})
def test_parse_rule(self):
input = dedent("""\
rule foo:
build: echo hi
export: out/
""")
scope, local_module = parse_string(input)
self.assertIn("foo", scope)
rule = scope["foo"]
self.assertIsInstance(rule, Rule)
self.assertEqual(rule.name, "foo")
self.assertEqual(rule.build_command, "echo hi")
self.assertEqual(rule.export, "out/")
def test_parse_module(self):
input = dedent("""\
git module foo:
url: http://www.example.com/
rev: abcdefg
imports:
wham: bam/
thank: you/maam
""")
scope, local_module = parse_string(input)
self.assertIn("foo", scope)
module = scope["foo"]
self.assertIsInstance(module, RemoteModule)
self.assertEqual(module.name, "foo")
self.assertDictEqual(module.imports,
{"wham": "bam/",
"thank": "you/maam"})
self.assertDictEqual(module.plugin_fields,
{"url": "http://www.example.com/",
"rev": "abcdefg"})
| Python | 0 |
8bb77e1cf4c5ec284641a178a106300db2f5575d | Use UTC | petitions/views.py | petitions/views.py | from django.shortcuts import render, get_object_or_404, render, redirect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.db.models import F
from datetime import datetime
from petitions.models import Petition
from profile.models import Profile
def petition(request, petition_id):
petition = get_object_or_404(Petition, pk=petition_id)
author = Profile.objects.get(petitions_created=petition)
user = request.user
curr_user_signed = user.partner_set.filter(petitions_signed=petition).exists()
users_signed = Profile.objects.filter(petitions_signed=petition)
data_object = {
'petition': petition,
'current_user': user,
'curr_user_signed': curr_user_signed,
'users_signed': users_signed
}
return render(request, 'petition/'+str(petition_id), data_object)
@login_required
@require_POST
def petition_sign(request, petition_id):
petition = get_object_or_404(Petition, pk=petition_id)
user = request.user
user.profile.petitions_signed.add(petition)
user.save()
petition.update(signatures=F('signatures')+1)
petition.update(last_signed=datetime.utcnow())
petition.save()
return redirect('petition/sign/' + str(petition_id))
# HELPER FUNCTIONS #
# SORTING
def most_recent():
return Petition.objects.all() \
.filter(expires__gt=datetime.utcnow()) \
.exclude(has_response=True) \
.filter(published=True) \
.order_by('-created_at')
def most_signatures():
return Petition.objects.all() \
.filter(expires__gt=datetime.utcnow()) \
.exclude(has_response=True) \
.filter(published=True) \
.order_by('-signatures')
def last_signed():
return Petition.objects.all() \
.filter(expires_gt=datetime.utcnow()) \
.exclude(has_response=True) \
.filter(published=True) \
.order_by('-last_signed')
| from django.shortcuts import render, get_object_or_404, render, redirect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.db.models import F
from datetime import datetime
from petitions.models import Petition
from profile.models import Profile
def petition(request, petition_id):
petition = get_object_or_404(Petition, pk=petition_id)
author = Profile.objects.get(petitions_created=petition)
user = request.user
curr_user_signed = user.partner_set.filter(petitions_signed=petition).exists()
users_signed = Profile.objects.filter(petitions_signed=petition)
data_object = {
'petition': petition,
'current_user': user,
'curr_user_signed': curr_user_signed,
'users_signed': users_signed
}
return render(request, '', data_object)
@login_required
@require_POST
def petition_sign(request, petition_id):
petition = get_object_or_404(Petition, pk=petition_id)
user = request.user
user.profile.petitions_signed.add(petition)
user.save()
petition.update(signatures=F('signatures')+1)
petition.update(last_signed=datetime.now())
petition.save()
return redirect('petition/' + str(petition_id))
# HELPER FUNCTIONS #
# SORTING
def most_recent():
return Petition.objects.all() \
.filter(expires__gt=datetime.now()) \
.exclude(has_response=True) \
.filter(published=True) \
.order_by('-created_at')
def most_signatures():
return Petition.objects.all() \
.filter(expires__gt=datetime.now()) \
.exclude(has_response=True) \
.filter(published=True) \
.order_by('-signatures')
def last_signed():
return Petition.objects.all() \
.filter(expires_gt=datetime.now()) \
.exclude(has_response=True) \
.filter(published=True) \
.order_by('-last_signed')
| Python | 0 |
03b17837ed2c88692f1b99ec5b9b477f86fdddb6 | Update version to 2.2b4-dev | openslides/__init__.py | openslides/__init__.py | __author__ = 'OpenSlides Team <support@openslides.org>'
__description__ = 'Presentation and assembly system'
__version__ = '2.2b4-dev'
__license__ = 'MIT'
__url__ = 'https://openslides.org'
args = None
| __author__ = 'OpenSlides Team <support@openslides.org>'
__description__ = 'Presentation and assembly system'
__version__ = '2.2b3'
__license__ = 'MIT'
__url__ = 'https://openslides.org'
args = None
| Python | 0 |
356fdc5d69dadbddeb7cd064593ab31b7993a0bc | Use shared helper code for palevoccbot. | abusehelper/contrib/abusech/palevoccbot.py | abusehelper/contrib/abusech/palevoccbot.py | """
abuse.ch Palevo C&C feed RSS bot.
Maintainer: Lari Huttunen <mit-code@huttu.net>
"""
from abusehelper.core import bot
from . import is_ip, split_description, AbuseCHFeedBot
class PalevoCcBot(AbuseCHFeedBot):
feeds = bot.ListParam(default=["https://palevotracker.abuse.ch/?rssfeed"])
# If treat_as_dns_source is set, the feed ip is dropped.
treat_as_dns_source = bot.BoolParam()
def parse_title(self, title):
pieces = title.split(None, 1)
host = pieces[0]
if is_ip(host):
yield "ip", host
else:
yield "host", host
if len(pieces) > 1:
yield "source time", pieces[1]
def parse_description(self, description):
for key, value in split_description(description):
if key == "status":
yield key, value
elif key == "sbl" and value.lower() != "not listed":
yield key + " id", value
elif key == "ip address" and not self.treat_as_dns_source:
yield "ip", value
if __name__ == "__main__":
PalevoCcBot.from_command_line().execute()
| """
abuse.ch Palevo C&C feed RSS bot.
Maintainer: Lari Huttunen <mit-code@huttu.net>
"""
from abusehelper.core import bot, events
from abusehelper.contrib.rssbot.rssbot import RSSBot
from . import is_ip
class PalevoCcBot(RSSBot):
feeds = bot.ListParam(default=["https://palevotracker.abuse.ch/?rssfeed"])
# If treat_as_dns_source is set, the feed ip is dropped.
treat_as_dns_source = bot.BoolParam()
def create_event(self, **keys):
event = events.Event()
# handle link data
link = keys.get("link", None)
if link:
event.add("description url", link)
# handle title data
title = keys.get("title", None)
if title:
host, date = title.split()
if is_ip(host):
event.add("ip", host)
else:
event.add("host", host)
event.add("source time", date)
# handle description data
description = keys.get("description", None)
if description:
for part in description.split(","):
pair = part.split(":", 1)
if len(pair) < 2:
continue
key = pair[0].strip()
value = pair[1].strip()
if not key or not value:
continue
if key == "Status":
event.add(key.lower(), value)
elif key == "SBL" and value != "Not listed":
key = key.lower() + " id"
event.add(key, value)
elif key == "IP address":
if not self.treat_as_dns_source:
event.add("ip", value)
event.add("feed", "abuse.ch")
event.add("malware", "Palevo")
event.add("type", "c&c")
return event
if __name__ == "__main__":
PalevoCcBot.from_command_line().execute()
| Python | 0 |
49f68d89d28650609e92db515063d3847d614eb8 | use isotropic sigma | tests/mesh/test_cylMeshInnerProducts.py | tests/mesh/test_cylMeshInnerProducts.py | from SimPEG import Mesh
import numpy as np
import sympy
from sympy.abc import r, t, z
import unittest
TOL = 1e-1
class CylInnerProducts_Test(unittest.TestCase):
def test_FaceInnerProduct(self):
# Here we will make up some j vectors that vary in space
# j = [j_r, j_z] - to test face inner products
j = sympy.Matrix([
r**2 * z,
r * z**2
])
# Create an isotropic sigma vector
Sig = sympy.Matrix([
[100/sympy.pi*(r*z)**2, 0 ],
[ 0 , 100/sympy.pi*(r*z)**2],
])
# Do the inner product! - we are in cyl coordinates!
jTSj = j.T*Sig*j
ans = sympy.integrate(
sympy.integrate(
sympy.integrate(r * jTSj, (r,0,1)), # we are in cyl coordinates
(t,0,2.*sympy.pi)),
(z,0,1))[0] # The `[0]` is to make it an int.
def get_vectors(mesh):
""" Get Vectors sig, sr. jx from sympy"""
f_jr = sympy.lambdify((r,z), j[0], 'numpy')
f_jz = sympy.lambdify((r,z), j[1], 'numpy')
f_sigr = sympy.lambdify((r,z), Sig[0], 'numpy')
f_sigz = sympy.lambdify((r,z), Sig[1], 'numpy')
jr = f_jr(mesh.gridFx[:,0], mesh.gridFx[:,2])
jz = f_jz(mesh.gridFz[:,0], mesh.gridFz[:,2])
sigr = f_sigr(mesh.gridCC[:,0], mesh.gridCC[:,2])
return sigr, np.r_[jr, jz]
n = 100.
mesh = Mesh.CylMesh([n, 1, n])
sig, jv = get_vectors(mesh)
MfSig = mesh.getFaceInnerProduct(sig)
numeric_ans = jv.T.dot(MfSig.dot(jv))
print('------ Testing Face Inner Product-----------')
print(' Analytic: {analytic}, Numeric: {numeric}'.format(analytic=ans, numeric=numeric_ans))
assert(np.abs(ans-numeric_ans) < TOL)
if __name__ == '__main__':
unittest.main()
| from SimPEG import Mesh
import numpy as np
import sympy
from sympy.abc import r, t, z
import unittest
TOL = 1e-1
class CylInnerProducts_Test(unittest.TestCase):
def test_FaceInnerProduct(self):
# Here we will make up some j vectors that vary in space
# j = [j_r, j_z] - to test face inner products
j = sympy.Matrix([
r**2 * z,
r * z**2
])
# Create an isotropic sigma vector
Sig = sympy.Matrix([
[540/sympy.pi*(r*z)**2, 0 ],
[ 0 , 540/sympy.pi*(r*z)**4],
])
# Do the inner product! - we are in cyl coordinates!
jTSj = j.T*Sig*j
ans = sympy.integrate(
sympy.integrate(
sympy.integrate(r * jTSj, (r,0,1)), # we are in cyl coordinates
(t,0,2.*sympy.pi)),
(z,0,1))[0] # The `[0]` is to make it an int.
def get_vectors(mesh):
""" Get Vectors sig, sr. jx from sympy"""
f_jr = sympy.lambdify((r,z), j[0], 'numpy')
f_jz = sympy.lambdify((r,z), j[1], 'numpy')
f_sigr = sympy.lambdify((r,z), Sig[0], 'numpy')
f_sigz = sympy.lambdify((r,z), Sig[1], 'numpy')
jr = f_jr(mesh.gridFx[:,0], mesh.gridFx[:,2])
jz = f_jz(mesh.gridFz[:,0], mesh.gridFz[:,2])
sigr = f_sigr(mesh.gridCC[:,0], mesh.gridCC[:,2])
return sigr, np.r_[jr, jz]
n = 100.
mesh = Mesh.CylMesh([n, 1, n])
sig, jv = get_vectors(mesh)
MfSig = mesh.getFaceInnerProduct(sig)
numeric_ans = jv.T.dot(MfSig.dot(jv))
print('------ Testing Face Inner Product-----------')
print(' Analytic: {analytic}, Numeric: {numeric}'.format(analytic=ans, numeric=numeric_ans))
assert(np.abs(ans-numeric_ans) < TOL)
if __name__ == '__main__':
unittest.main()
| Python | 0.000004 |
490230242d51d23650406085a7af92dfbb14c16d | Use shop ID from order | byceps/blueprints/shop/orders/views.py | byceps/blueprints/shop/orders/views.py | """
byceps.blueprints.shop.orders.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ....services.party import service as party_service
from ....services.shop.order import service as order_service
from ....services.shop.shop import service as shop_service
from ....services.snippet.transfer.models import Scope
from ....util.framework.blueprint import create_blueprint
from ....util.framework.templating import templated
from ...authentication.decorators import login_required
from ...snippet.templating import render_snippet_as_partial
blueprint = create_blueprint('shop_orders', __name__)
@blueprint.route('')
@login_required
@templated
def index():
"""List orders placed by the current user for the current party."""
current_user = g.current_user
party = party_service.find_party(g.party_id)
shop = shop_service.find_shop_for_party(party.id)
if shop:
orders = order_service.get_orders_placed_by_user_for_shop(
current_user.id, shop.id)
else:
orders = []
return {
'party_title': party.title,
'orders': orders,
}
@blueprint.route('/<uuid:order_id>')
@login_required
@templated
def view(order_id):
"""Show a single order (if it belongs to the current user and party)."""
current_user = g.current_user
order = order_service.find_order_with_details(order_id)
if order is None:
abort(404)
if order.placed_by_id != current_user.id:
# Order was not placed by the current user.
abort(404)
shop = shop_service.get_shop(order.shop_id)
if shop.party_id != g.party_id:
# Order does not belong to the current party.
abort(404)
template_context = {
'order': order,
}
if order.is_open:
template_context['payment_instructions'] \
= _get_payment_instructions(order)
return template_context
def _get_payment_instructions(order):
scope = Scope('shop', str(order.shop_id))
context = {'order_number': order.order_number}
return render_snippet_as_partial('payment_instructions', scope=scope,
context=context)
| """
byceps.blueprints.shop.orders.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ....services.party import service as party_service
from ....services.shop.order import service as order_service
from ....services.shop.shop import service as shop_service
from ....services.snippet.transfer.models import Scope
from ....util.framework.blueprint import create_blueprint
from ....util.framework.templating import templated
from ...authentication.decorators import login_required
from ...snippet.templating import render_snippet_as_partial
blueprint = create_blueprint('shop_orders', __name__)
@blueprint.route('')
@login_required
@templated
def index():
"""List orders placed by the current user for the current party."""
current_user = g.current_user
party = party_service.find_party(g.party_id)
shop = shop_service.find_shop_for_party(party.id)
if shop:
orders = order_service.get_orders_placed_by_user_for_shop(
current_user.id, shop.id)
else:
orders = []
return {
'party_title': party.title,
'orders': orders,
}
@blueprint.route('/<uuid:order_id>')
@login_required
@templated
def view(order_id):
"""Show a single order (if it belongs to the current user and party)."""
current_user = g.current_user
order = order_service.find_order_with_details(order_id)
if order is None:
abort(404)
if order.placed_by_id != current_user.id:
# Order was not placed by the current user.
abort(404)
shop = shop_service.get_shop(order.shop_id)
if shop.party_id != g.party_id:
# Order does not belong to the current party.
abort(404)
template_context = {
'order': order,
}
if order.is_open:
template_context['payment_instructions'] \
= _get_payment_instructions(shop.id, order.order_number)
return template_context
def _get_payment_instructions(shop_id, order_number):
scope = Scope('shop', str(shop_id))
context = {'order_number': order_number}
return render_snippet_as_partial('payment_instructions', scope=scope,
context=context)
| Python | 0 |
652711e9a4884a31be74df6ae791e47dcd401871 | remove deprecated test suite declarations | account_partner_required/tests/__init__.py | account_partner_required/tests/__init__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account partner required module for OpenERP
# Copyright (C) 2014 Acsone (http://acsone.eu).
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_account_partner_required
| # -*- encoding: utf-8 -*-
##############################################################################
#
# Account partner required module for OpenERP
# Copyright (C) 2014 Acsone (http://acsone.eu).
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_account_partner_required
fast_suite = [
]
checks = [
test_account_partner_required,
]
| Python | 0 |
55983401814bc0e7158d213885ebdfdbc7e02e9b | Add dependency on the requests module and refactor | DeployUtil/authentication.py | DeployUtil/authentication.py | import requests
import json
#TODO: give an indicator of success
#TODO: handle errors a bit better.
def do_pair(ip, pin, **_args):
# IF YOU DON'T DO THIS OVER HTTPS YOU WILL GET 308s to goto HTTPS
# But we cannot verify our HTTPS cert yet because we cannot get it off
# of all devices.
# If the tooling gets smarter about what its talking to, then we can
# make an educated choice.
scheme = 'https://'
port = ''
api = '/api/authorize/pair?pin={pin}&persistent=0'
request_url = scheme + ip + port + api.format_map({'pin':pin})
with requests.Session() as session:
response = session.post(request_url, verify=False)
cookie_filename = 'deployUtil.cookies'
cookies = requests.utils.dict_from_cookiejar(response.cookies)
with open(cookie_filename,'w') as cookie_file:
json.dump(cookies, cookie_file)
| import urllib.request
import http.cookiejar
import DeployUtil.toolsession as session
#TODO: give an indicator of success
#TODO: handle errors a bit better.
def do_pair(ip, pin, **_args):
# IF YOU DON'T DO THIS OVER HTTPS YOU WILL GET 308s to goto HTTPS
scheme = 'https://'
port = ''
api = '/api/authorize/pair?pin={pin}&persistent=0'
verb = 'POST'
request_url = scheme + ip + port + api.format_map({'pin':pin})
https_handler = session.create_toolsess_httpsHandler()
request = urllib.request.Request(url=request_url, method=verb)
cookies = urllib.request.HTTPCookieProcessor(http.cookiejar.MozillaCookieJar("deployUtil.cookies"))
opener = urllib.request.build_opener(https_handler, cookies)
resp = opener.open(request)
cookies.cookiejar.save(ignore_discard=True)
| Python | 0.000001 |
40957fe0b273f92a28e0b5f27cc4a46ba5e1f2b8 | Add coverage pragma | sktracker/trajectories/__init__.py | sktracker/trajectories/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
from .trajectories import Trajectories
try: # pragma: no cover
from . import draw
__all__ = ['Trajectories', 'draw']
except ImportError: # pragma: no cover
log.warning('''Matplotlib can't be imported,'''
'''drawing module won't be available ''')
__all__ = ['Trajectories']
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
from .trajectories import Trajectories
try:
from . import draw
__all__ = ['Trajectories', 'draw']
except ImportError:
log.warning('''Matplotlib can't be imported,'''
'''drawing module won't be available ''')
__all__ = ['Trajectories']
| Python | 0.000003 |
0610cebeccbd8431906558506ba22654e1104ea9 | fix test | corehq/ex-submodules/couchexport/tests/test_writers.py | corehq/ex-submodules/couchexport/tests/test_writers.py | # coding: utf-8
from codecs import BOM_UTF8
import StringIO
from couchexport.writers import ZippedExportWriter, CsvFileWriter, PythonDictWriter
from django.test import SimpleTestCase
from mock import patch, Mock
class ZippedExportWriterTests(SimpleTestCase):
def setUp(self):
self.zip_file_patch = patch('zipfile.ZipFile')
self.MockZipFile = self.zip_file_patch.start()
self.path_mock = Mock()
self.path_mock.get_path.return_value = 'tmp'
self.writer = ZippedExportWriter()
self.writer.archive_basepath = 'path'
self.writer.tables = [self.path_mock]
self.writer.file = Mock()
def tearDown(self):
self.zip_file_patch.stop()
del self.writer
def test_zipped_export_writer_unicode(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: u'ひらがな'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
def test_zipped_export_writer_utf8(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: '\xe3\x81\xb2\xe3\x82\x89\xe3\x81\x8c\xe3\x81\xaa'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
class CsvFileWriterTests(SimpleTestCase):
def test_csv_file_writer_bom(self):
"""
CsvFileWriter should prepend a byte-order mark to the start of the CSV file for Excel
"""
writer = CsvFileWriter()
headers = ['ham', 'spam', 'eggs']
writer.open('Spam')
writer.write_row(headers)
writer.finish()
file_start = writer.get_file().read(6)
self.assertEqual(file_start, BOM_UTF8 + 'ham')
class HeaderNameTest(SimpleTestCase):
def test_names_matching_case(self):
writer = PythonDictWriter()
stringio = StringIO.StringIO()
table_index_1 = "case_Sensitive"
table_index_2 = "case_sensitive"
table_headers = [[]]
writer.open(
[
(table_index_1, table_headers),
(table_index_2, table_headers)
],
stringio
)
writer.close()
preview = writer.get_preview()
first_sheet_name = preview[0]['table_name']
second_sheet_name = preview[1]['table_name']
self.assertNotEqual(
first_sheet_name.lower(),
second_sheet_name.lower(),
"Sheet names must not be equal. Comparison is NOT case sensitive. Names were '{}' and '{}'".format(
first_sheet_name, second_sheet_name
)
)
def test_max_header_length(self):
writer = PythonDictWriter()
writer.max_table_name_size = 10
stringio = StringIO.StringIO()
table_index = "my_table_index"
table_headers = [("first header", "second header")]
writer.open(
[(table_index, table_headers)],
stringio
)
writer.close()
preview = writer.get_preview()
self.assertGreater(len(table_index), writer.max_table_name_size)
self.assertLessEqual(len(preview[0]['table_name']), writer.max_table_name_size)
| # coding: utf-8
from codecs import BOM_UTF8
import StringIO
from couchexport.writers import ZippedExportWriter, CsvFileWriter, PythonDictWriter
from django.test import SimpleTestCase
from mock import patch, Mock
class ZippedExportWriterTests(SimpleTestCase):
def setUp(self):
self.zip_file_patch = patch('zipfile.ZipFile')
self.MockZipFile = self.zip_file_patch.start()
self.path_mock = Mock()
self.path_mock.get_path.return_value = 'tmp'
self.writer = ZippedExportWriter()
self.writer.tables = [self.path_mock]
self.writer.file = Mock()
def tearDown(self):
self.zip_file_patch.stop()
del self.writer
def test_zipped_export_writer_unicode(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: u'ひらがな'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
def test_zipped_export_writer_utf8(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: '\xe3\x81\xb2\xe3\x82\x89\xe3\x81\x8c\xe3\x81\xaa'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
class CsvFileWriterTests(SimpleTestCase):
def test_csv_file_writer_bom(self):
"""
CsvFileWriter should prepend a byte-order mark to the start of the CSV file for Excel
"""
writer = CsvFileWriter()
headers = ['ham', 'spam', 'eggs']
writer.open('Spam')
writer.write_row(headers)
writer.finish()
file_start = writer.get_file().read(6)
self.assertEqual(file_start, BOM_UTF8 + 'ham')
class HeaderNameTest(SimpleTestCase):
def test_names_matching_case(self):
writer = PythonDictWriter()
stringio = StringIO.StringIO()
table_index_1 = "case_Sensitive"
table_index_2 = "case_sensitive"
table_headers = [[]]
writer.open(
[
(table_index_1, table_headers),
(table_index_2, table_headers)
],
stringio
)
writer.close()
preview = writer.get_preview()
first_sheet_name = preview[0]['table_name']
second_sheet_name = preview[1]['table_name']
self.assertNotEqual(
first_sheet_name.lower(),
second_sheet_name.lower(),
"Sheet names must not be equal. Comparison is NOT case sensitive. Names were '{}' and '{}'".format(
first_sheet_name, second_sheet_name
)
)
def test_max_header_length(self):
writer = PythonDictWriter()
writer.max_table_name_size = 10
stringio = StringIO.StringIO()
table_index = "my_table_index"
table_headers = [("first header", "second header")]
writer.open(
[(table_index, table_headers)],
stringio
)
writer.close()
preview = writer.get_preview()
self.assertGreater(len(table_index), writer.max_table_name_size)
self.assertLessEqual(len(preview[0]['table_name']), writer.max_table_name_size)
| Python | 0.000002 |
92ebaf9e1507acf1fc10f4448fc00db43508f23b | Allow alternate string key lookup for enums | djstripe/enums.py | djstripe/enums.py | from enum import Enum as _Enum
from django.utils.decorators import classproperty
class Enum(_Enum):
@classproperty
def choices(cls):
return tuple((cls.keys.get(k, k), v.value) for k, v in cls.__members__.items())
@classproperty
def keys(cls):
# Returns a mapping of key overrides.
# This allows using syntactically-incorrect values as keys,
# such as keywords ("pass") or spaces ("Diners Club").
# This cannot be an attribute, otherwise it would show up as a choice.
return {}
class CardTokenizationMethod(Enum):
apple_pay = "Apple Pay"
android_pay = "Android Pay"
| from enum import Enum as _Enum
from django.utils.decorators import classproperty
class Enum(_Enum):
@classproperty
def choices(cls):
return tuple((k, v.value) for k, v in cls.__members__.items())
class CardTokenizationMethod(Enum):
apple_pay = "Apple Pay"
android_pay = "Android Pay"
| Python | 0 |
eadfd0c784d077db4b48facb5e7161c76ede598a | remove commented out code | graphitepager/worker.py | graphitepager/worker.py | import datetime
import time
import redis
import requests
import requests.exceptions
from graphitepager.config import get_config
from graphitepager.description import get_descriptions
from graphitepager.description import missing_target_descriptions
from graphitepager.graphite_data_record import GraphiteDataRecord
from graphitepager.graphite_target import get_records
from graphitepager.level import Level
from graphitepager.redis_storage import RedisStorage
from graphitepager.utils import parse_args
from notifiers.notifier_proxy import NotifierProxy
from notifiers.hipchat_notifier import HipChatNotifier
from notifiers.pagerduty_notifier import PagerdutyNotifier
from notifiers.pushbullet_notifier import PushBulletNotifier
from notifiers.slack_notifier import SlackNotifier
from notifiers.stdout_notifier import StdoutNotifier
def update_notifiers(notifier_proxy, alert, record, graphite_url):
alert_key = '{} {}'.format(alert.get('name'), record.target)
alert_level, value = alert.check_record(record)
description, html_description = get_descriptions(
graphite_url,
alert,
record,
alert_level,
value
)
notifier_proxy.notify(
alert,
alert_key,
alert_level,
description,
html_description
)
def create_notifier_proxy(config):
redis_url = config.get('REDISTOGO_URL', config.get('REDIS_URL', None))
STORAGE = RedisStorage(redis, redis_url)
klasses = [
HipChatNotifier,
PagerdutyNotifier,
PushBulletNotifier,
StdoutNotifier,
SlackNotifier,
]
notifier_proxy = NotifierProxy()
for klass in klasses:
notifier = klass(STORAGE, config)
if notifier.enabled:
print 'Enabling {0}'.format(notifier._domain)
notifier_proxy.add_notifier(notifier)
return notifier_proxy
def verify(args):
config = get_config(args.config)
config.alerts()
print 'Valid configuration, good job!'
return
def run(args):
config = get_config(args.config)
alerts = config.alerts()
notifier_proxy = create_notifier_proxy(config)
graphite_url = config.get('GRAPHITE_URL')
while True:
start_time = time.time()
seen_alert_targets = set()
for alert in alerts:
target = alert.get('target')
try:
records = get_records(
graphite_url,
requests.get,
GraphiteDataRecord,
target,
from_=alert.get('from'),
)
except requests.exceptions.RequestException:
description, html_description = missing_target_descriptions(
graphite_url,
alert,
target,
Level.NO_DATA,
None
)
notifier_proxy.notify(
alert,
target,
Level.NO_DATA,
description,
html_description
)
records = []
for record in records:
name = alert.get('name')
target = record.target
if (name, target) not in seen_alert_targets:
update_notifiers(
notifier_proxy,
alert,
record,
graphite_url
)
seen_alert_targets.add((name, target))
time_diff = time.time() - start_time
sleep_for = 60 - time_diff
if sleep_for > 0:
sleep_for = 60 - time_diff
print 'Sleeping for {0} seconds at {1}'.format(
sleep_for,
datetime.datetime.utcnow()
)
time.sleep(60 - time_diff)
def main():
args = parse_args()
if args.command == 'verify':
return verify(args)
return run(args)
if __name__ == '__main__':
main()
| import datetime
import time
import redis
import requests
import requests.exceptions
from graphitepager.config import get_config
from graphitepager.description import get_descriptions
from graphitepager.description import missing_target_descriptions
from graphitepager.graphite_data_record import GraphiteDataRecord
from graphitepager.graphite_target import get_records
from graphitepager.level import Level
from graphitepager.redis_storage import RedisStorage
from graphitepager.utils import parse_args
from notifiers.notifier_proxy import NotifierProxy
from notifiers.hipchat_notifier import HipChatNotifier
from notifiers.pagerduty_notifier import PagerdutyNotifier
from notifiers.pushbullet_notifier import PushBulletNotifier
from notifiers.slack_notifier import SlackNotifier
from notifiers.stdout_notifier import StdoutNotifier
def update_notifiers(notifier_proxy, alert, record, graphite_url):
alert_key = '{} {}'.format(alert.get('name'), record.target)
alert_level, value = alert.check_record(record)
description, html_description = get_descriptions(
graphite_url,
alert,
record,
alert_level,
value
)
notifier_proxy.notify(
alert,
alert_key,
alert_level,
description,
html_description
)
def create_notifier_proxy(config):
redis_url = config.get('REDISTOGO_URL', config.get('REDIS_URL', None))
STORAGE = RedisStorage(redis, redis_url)
klasses = [
HipChatNotifier,
PagerdutyNotifier,
PushBulletNotifier,
StdoutNotifier,
SlackNotifier,
]
notifier_proxy = NotifierProxy()
for klass in klasses:
notifier = klass(STORAGE, config)
if notifier.enabled:
print 'Enabling {0}'.format(notifier._domain)
notifier_proxy.add_notifier(notifier)
return notifier_proxy
def verify(args):
config = get_config(args.config)
config.alerts()
print 'Valid configuration, good job!'
return
def run(args):
config = get_config(args.config)
alerts = config.alerts()
notifier_proxy = create_notifier_proxy(config)
graphite_url = config.get('GRAPHITE_URL')
while True:
start_time = time.time()
seen_alert_targets = set()
for alert in alerts:
target = alert.get('target')
try:
records = get_records(
graphite_url,
requests.get,
GraphiteDataRecord,
target,
from_=alert.get('from'),
)
except requests.exceptions.RequestException:
description, html_description = missing_target_descriptions(
graphite_url,
alert,
target,
Level.NO_DATA,
None
)
notifier_proxy.notify(
alert,
target,
Level.NO_DATA,
description,
html_description
)
records = []
for record in records:
name = alert.get('name')
target = record.target
if (name, target) not in seen_alert_targets:
# print 'Checking', (name, target)
update_notifiers(
notifier_proxy,
alert,
record,
graphite_url
)
seen_alert_targets.add((name, target))
# else:
# print 'Seen', (name, target)
time_diff = time.time() - start_time
sleep_for = 60 - time_diff
if sleep_for > 0:
sleep_for = 60 - time_diff
print 'Sleeping for {0} seconds at {1}'.format(
sleep_for,
datetime.datetime.utcnow()
)
time.sleep(60 - time_diff)
def main():
args = parse_args()
if args.command == 'verify':
return verify(args)
return run(args)
if __name__ == '__main__':
main()
| Python | 0 |
ec5cc5d30b50e12c2e11f6206c220b4f2731e352 | implement class | pgmapcss/misc/pgcache.py | pgmapcss/misc/pgcache.py | class PGCache:
def __init__(self, id, read_id=False, read_geo=False):
global PGCaches
try:
PGCaches
except:
PGCaches = {}
PGCaches[id] = self
self.id = id
self.read_id = read_id
self.read_geo = read_geo
self.cache_id = len(PGCaches)
def add(self, data, id=None, geo=None):
import pickle
try:
self.plan_add
except:
self.plan_add = plpy.prepare('insert into _pgmapcss_PGCache values (\'' + str(self.cache_id).replace("'", "''") + '\', $1, $2, $3)', [ 'bytea', 'text', 'geometry' ])
if id is None and self.read_id and 'id' in data:
id = data['id']
if geo is None and self.read_geo and 'geo' in data:
geo = data['geo']
plpy.execute(self.plan_add, [ pickle.dumps(data), id, geo ])
def get(self, id=None):
import pickle
if id is None:
try:
self.plan_get
except:
self.plan_get = plpy.prepare('select * from _pgmapcss_PGCache where cache_id=' + str(self.cache_id).replace("'", "''"), [])
cursor = plpy.cursor(self.plan_get, [])
else:
try:
self.plan_get_id
except:
self.plan_get_id = plpy.prepare('select * from _pgmapcss_PGCache where id=ANY($1) and cache_id=' + str(self.cache_id).replace("'", "''"), ['text[]'])
if type(id) == str:
id = [ id ]
cursor = plpy.cursor(self.plan_get_id, [id])
for r in cursor:
yield pickle.loads(r['data'])
def prepare(self, query, param_type=[]):
return plpy.prepare(query.replace('{table}', '(select data, id, geo from _pgmapcss_PGCache where cache_id=' + str(self.cache_id).replace("'", "''") + ') t'), param_type)
def execute(self, plan, param=[]):
import pickle
ret = []
for r in plpy.execute(plan, param):
if 'data' in r:
r['data'] = pickle.loads(r['data'])
ret.append(r)
return ret
def cursor(self, plan, param=[]):
import pickle
ret = []
for r in plpy.cursor(plan, param):
if 'data' in r:
r['data'] = pickle.loads(r['data'])
yield r
def get_PGCache(id, read_id=False, read_geo=False):
global PGCaches
try:
PGCaches
except:
PGCaches = {}
return PGCaches[id]
| class PGCache:
def __init__(self, id, read_id=False, read_geo=False):
global PGCaches
try:
PGCaches
except:
PGCaches = {}
PGCaches[id] = self
def add(self, data, id=None, geo=None):
pass
def get(self, id=None):
pass
def query(self, qry):
pass
def get_PGCache(id, read_id=False, read_geo=False):
global PGCaches
try:
PGCaches
except:
PGCaches = {}
return PGCaches[id]
| Python | 0.000001 |
6094b147dccc4abf3ef23d5e54b1e23a955d6ecb | remove prints | slider/templatetags/slider_tags.py | slider/templatetags/slider_tags.py | # -*- coding: utf-8 -*-
from django import template
from slider.models import SliderImage
register = template.Library()
@register.assignment_tag
def get_slider_images(limit=False, randomize=True, slider=1):
qs = SliderImage.objects.filter(is_visible=True,slider=slider)
if randomize:
qs = qs.order_by('?')
if limit:
qs = qs[0:limit]
return qs
| # -*- coding: utf-8 -*-
from django import template
from slider.models import SliderImage
register = template.Library()
@register.assignment_tag
def get_slider_images(limit=False, randomize=True, slider=1):
qs = SliderImage.objects.filter(is_visible=True,slider=slider)
print randomize
if randomize:
qs = qs.order_by('?')
if limit:
qs = qs[0:limit]
return qs
| Python | 0.000012 |
2378a64ab1e106c8f6f455a9023d350eaf627767 | add manual | oa_manual.py | oa_manual.py | from collections import defaultdict
from time import time
from util import elapsed
# things to set here:
# license, free_metadata_url, free_pdf_url
# free_fulltext_url is set automatically from free_metadata_url and free_pdf_url
def get_overrides_dict():
override_dict = defaultdict(dict)
# cindy wu example
override_dict["10.1038/nature21360"]["free_pdf_url"] = "https://arxiv.org/pdf/1703.01424.pdf"
# example from twitter
override_dict["10.1021/acs.jproteome.5b00852"]["free_pdf_url"] = "http://pubs.acs.org/doi/pdfplus/10.1021/acs.jproteome.5b00852"
# have the unpaywall example go straight to the PDF, not the metadata page
override_dict["10.1098/rspa.1998.0160"]["free_pdf_url"] = "https://arxiv.org/pdf/quant-ph/9706064.pdf"
# missed, not in BASE, from Maha Bali in email
override_dict["10.1080/13562517.2014.867620"]["free_pdf_url"] = "http://dar.aucegypt.edu/bitstream/handle/10526/4363/Final%20Maha%20Bali%20TiHE-PoD-Empowering_Sept30-13.pdf"
# otherwise links to figshare match that only has data, not the article
override_dict["10.1126/science.aaf3777"]["free_pdf_url"] = None
override_dict["10.1126/science.aaf3777"]["free_metadata_url"] = None
#otherwise links to a metadata page that doesn't have the PDF because have to request a copy: https://openresearch-repository.anu.edu.au/handle/1885/103608
override_dict["10.1126/science.aad2622"]["free_pdf_url"] = "https://lra.le.ac.uk/bitstream/2381/38048/6/Waters%20et%20al%20draft_post%20review_v2_clean%20copy.pdf"
# otherwise led to http://www.researchonline.mq.edu.au/vital/access/services/Download/mq:39727/DS01 and authorization error
override_dict["10.1111/j.1461-0248.2008.01185.x"]["free_pdf_url"] = None
# override old-style webpage
override_dict["10.1210/jc.2016-2141"]["free_pdf_url"] = "https://academic.oup.com/jcem/article-lookup/doi/10.1210/jc.2016-2141"
override_dict["10.1210/jc.2016-2141"]["evidence"] = "hybrid manual"
# not indexing this location yet, from @rickypo
override_dict["10.1207/s15327957pspr0203_4"]["free_pdf_url"] = "http://www2.psych.ubc.ca/~schaller/528Readings/Kerr1998.pdf"
return override_dict
| from collections import defaultdict
from time import time
from util import elapsed
# things to set here:
# license, free_metadata_url, free_pdf_url
# free_fulltext_url is set automatically from free_metadata_url and free_pdf_url
def get_overrides_dict():
override_dict = defaultdict(dict)
# cindy wu example
override_dict["10.1038/nature21360"]["free_pdf_url"] = "https://arxiv.org/pdf/1703.01424.pdf"
# example from twitter
override_dict["10.1021/acs.jproteome.5b00852"]["free_pdf_url"] = "http://pubs.acs.org/doi/pdfplus/10.1021/acs.jproteome.5b00852"
# have the unpaywall example go straight to the PDF, not the metadata page
override_dict["10.1098/rspa.1998.0160"]["free_pdf_url"] = "https://arxiv.org/pdf/quant-ph/9706064.pdf"
# missed, not in BASE, from Maha Bali in email
override_dict["10.1080/13562517.2014.867620"]["free_pdf_url"] = "http://dar.aucegypt.edu/bitstream/handle/10526/4363/Final%20Maha%20Bali%20TiHE-PoD-Empowering_Sept30-13.pdf"
# otherwise links to figshare match that only has data, not the article
override_dict["10.1126/science.aaf3777"]["free_pdf_url"] = None
override_dict["10.1126/science.aaf3777"]["free_metadata_url"] = None
#otherwise links to a metadata page that doesn't have the PDF because have to request a copy: https://openresearch-repository.anu.edu.au/handle/1885/103608
override_dict["10.1126/science.aad2622"]["free_pdf_url"] = "https://lra.le.ac.uk/bitstream/2381/38048/6/Waters%20et%20al%20draft_post%20review_v2_clean%20copy.pdf"
# otherwise led to http://www.researchonline.mq.edu.au/vital/access/services/Download/mq:39727/DS01 and authorization error
override_dict["10.1111/j.1461-0248.2008.01185.x"]["free_pdf_url"] = None
# override old-style webpage
override_dict["10.1210/jc.2016-2141"]["free_pdf_url"] = "https://academic.oup.com/jcem/article-lookup/doi/10.1210/jc.2016-2141"
override_dict["10.1210/jc.2016-2141"]["evidence"] = "hybrid manual"
return override_dict
| Python | 0 |
8206ea76804cf08298eeab8673b2326440aa8663 | check for existing bonds before drawing | orbis/gui/sketchpad.py | orbis/gui/sketchpad.py | import matplotlib
import matplotlib.patches
import numpy
import wx
from plots import Plot
#====================================================================================
class SketchPad(Plot):
"""sketch pad for drawing molecules"""
ATOM_RADIUS = 0.1
PICK_TOLERANCE = 5
#----------------------------------------------------------------------
def __init__(self,*args,**kwargs):
super(SketchPad,self).__init__(*args,**kwargs)
self.axes = self.figure.add_subplot(1,1,1)
self.axes.set_aspect("equal")
self.up_atom = None
self.down_atom = None
#---------------------------------------------------------------------------
def on_button_down(self,event):
super(SketchPad,self).on_button_down(event)
self.down_atom = self.atom_at_event_point(event)
#---------------------------------------------------------------------------
def atom_at_event_point(self,event):
for patch in self.axes.patches:
event_in_atom,_ = patch.contains(event)
if event_in_atom:
return patch
#---------------------------------------------------------------------------
def on_button_up(self,event):
super(SketchPad,self).on_button_up(event)
self.up_atom = self.atom_at_event_point(event)
if self.new_atom_requested():
self.add_atom()
elif self.new_bond_requested() and not self.bond_exists(self.up_atom,self.down_atom):
self.add_bond()
#---------------------------------------------------------------------------
def on_pick(self,event):
super(SketchPad,self).on_pick(event)
#---------------------------------------------------------------------------
def new_atom_requested(self):
return self.was_click() and not self.was_pick()
#---------------------------------------------------------------------------
def new_bond_requested(self):
start_and_finish_atoms = None not in (self.up_atom, self.down_atom)
unique_atoms = self.up_atom is not self.down_atom
return start_and_finish_atoms and unique_atoms
#---------------------------------------------------------------------------
def bond_exists(self,atom_1,atom_2):
bond_coords = [sorted(bond.get_xydata().tolist()) for bond in self.axes.lines]
bond_to_check = sorted([list(atom_1.xy),list(atom_2.xy)])
return bond_to_check in bond_coords
#---------------------------------------------------------------------------
def get_atom_locations(self):
"""returns xy points of all atoms on sketchpad"""
return [atom.xy for atom in self.axes.patches]
#---------------------------------------------------------------------------
def add_atom(self):
"""Add a new atom to the sketchpad"""
coords = (self.mouse_up_x,self.mouse_up_y)
circ = matplotlib.patches.CirclePolygon(coords,self.ATOM_RADIUS,picker=self.PICK_TOLERANCE,resolution=40)
self.axes.add_patch(circ)
self.figure.canvas.draw()
#---------------------------------------------------------------------------
def add_bond(self):
"""add a new bond between down_atom and up_atom"""
x1,y1 = self.down_atom.xy
x2,y2 = self.up_atom.xy
self.axes.plot([x1,x2],[y1,y2])
self.figure.canvas.draw()
if __name__ == "__main__":
app = wx.App()
frame = wx.Frame(None)
sp = SketchPad(frame)
frame.Show()
app.MainLoop()
| import matplotlib
import matplotlib.patches
import numpy
import wx
from plots import Plot
#====================================================================================
class SketchPad(Plot):
"""sketch pad for drawing molecules"""
ATOM_RADIUS = 0.1
PICK_TOLERANCE = 5
#----------------------------------------------------------------------
def __init__(self,*args,**kwargs):
super(SketchPad,self).__init__(*args,**kwargs)
self.axes = self.figure.add_subplot(1,1,1)
self.axes.set_aspect("equal")
#---------------------------------------------------------------------------
def on_button_up(self,event):
super(SketchPad,self).on_button_up(event)
if self.new_atom_requested():
self.add_atom()
#---------------------------------------------------------------------------
def new_atom_requested(self):
return self.was_click() and not self.was_pick()
#---------------------------------------------------------------------------
def add_atom(self):
"""Add a new atom to the sketchpad"""
coords = (self.mouse_up_x,self.mouse_up_y)
circ = matplotlib.patches.Ellipse(coords,self.ATOM_RADIUS,self.ATOM_RADIUS,picker=self.PICK_TOLERANCE)
self.axes.add_patch(circ)
self.figure.canvas.draw()
if __name__ == "__main__":
app = wx.App()
frame = wx.Frame(None)
sp = SketchPad(frame)
frame.Show()
app.MainLoop()
| Python | 0 |
f6740a7b2662ce8ad4112757663cf0e4ab184394 | convert all sample fields to their respective types | rowprocsv.py | rowprocsv.py | """
Module for reading and exporting csv files exported from Concept2 RowPro
"""
import datetime
import tcx
class RowProCSV:
HEADER_SUMMARY = 'Date,TotalTime,TotalDistance,'
FIELDS_SUMMARY = [
'date', 'total_time', 'total_distance', 'avg_pace', 'unit', 'origin', 'total_cals', 'duty_cycle', 'type',
'format', 'slide', 'session_id', 'rowfile_id', 'avg_hr', 'last_hr', 'offset'
]
HEADER_SAMPLES = 'Time,Distance,Pace,Watts,Cals,SPM,HR,DutyCycle,Rowfile_Id'
FIELDS_SAMPLES = [
('time_ms', int),
('distance', float),
('pace', float),
('watts', float),
('cals', float),
('spm', int),
('hr', int),
('duty_cycle', float),
('rowfile_id', None),
]
date = None
datetime = None
total_time = None
total_distance = None
avg_pace = None
total_cals = None
slide = False
avg_hr = None
last_hr = None
samples = []
def __init__(self, filename):
lines = []
try:
with open(filename, 'r') as fp:
lines = fp.read().split("\r\n")
except IOError as e:
print 'Could not read file {}: {}'.format(filename, e)
summary_found = False
samples_found = False
while len(lines):
line = lines.pop(0)
if not line:
continue
if line.startswith(self.HEADER_SUMMARY):
line = lines.pop(0)
summary_data = line.split(',')
if len(summary_data) != len(self.FIELDS_SUMMARY):
print 'Warning: summary line only has {} fields, {} expected'.format(len(summary_data),
len(self.FIELDS_SUMMARY))
for field in self.FIELDS_SUMMARY:
if len(summary_data):
value = summary_data.pop(0)
if hasattr(self, field) is not None:
setattr(self, field, value)
# parse the date
try:
self.datetime = datetime.datetime.strptime(self.date, '%d/%m/%Y %H:%M:%S')
except Exception as ex:
print 'Error parsing date {}: {}'.format(self.date, ex)
# parse the slide value
self.slide = True if self.slide == 'True' else False
summary_found = True
continue
elif line.startswith(self.HEADER_SAMPLES):
while len(lines):
line = lines.pop(0).strip()
if not line:
break
sample_data = line.split(',')
sample = {}
for field, field_type in self.FIELDS_SAMPLES:
val = sample_data.pop(0) if len(sample_data) else None
if field_type is not None and val is not None:
# convert time from milliseconds to fractional seconds
try:
val = field_type(val)
except ValueError:
print 'Error converting field {} value "{}" to {}'.format(field, val, str(field_type))
sample[field] = val
# convert time from milliseconds to fractional seconds
sample['time'] = sample['time_ms'] / 1000.0
self.samples.append(sample)
samples_found = True
break
if not summary_found:
print 'Warning: summary section not found in file'
if not samples_found:
print 'Warning: samples section not found in file'
def get_data(self):
return {
'datetime': self.datetime,
'total_time': self.total_time,
'total_distance': self.total_distance,
'avg_pace': self.avg_pace,
'total_cals': self.total_cals,
'slide': self.slide,
'avg_hr': self.avg_hr,
'last_hr': self.last_hr,
'samples': self.samples,
}
| """
Module for reading and exporting csv files exported from Concept2 RowPro
"""
import datetime
import tcx
class RowProCSV:
HEADER_SUMMARY = 'Date,TotalTime,TotalDistance,'
FIELDS_SUMMARY = [
'date', 'total_time', 'total_distance', 'avg_pace', 'unit', 'origin', 'total_cals', 'duty_cycle', 'type',
'format', 'slide', 'session_id', 'rowfile_id', 'avg_hr', 'last_hr', 'offset'
]
HEADER_SAMPLES = 'Time,Distance,Pace,Watts,Cals,SPM,HR,DutyCycle,Rowfile_Id'
FIELDS_SAMPLES = ['time_ms', 'distance', 'pace', 'watts', 'cals', 'spm', 'hr', 'duty_cycle', 'rowfile_id']
date = None
datetime = None
total_time = None
total_distance = None
avg_pace = None
total_cals = None
slide = False
avg_hr = None
last_hr = None
samples = []
def __init__(self, filename):
lines = []
try:
with open(filename, 'r') as fp:
lines = fp.read().split("\r\n")
except IOError as e:
print 'Could not read file {}: {}'.format(filename, e)
summary_found = False
samples_found = False
while len(lines):
line = lines.pop(0)
if not line:
continue
if line.startswith(self.HEADER_SUMMARY):
line = lines.pop(0)
summary_data = line.split(',')
if len(summary_data) != len(self.FIELDS_SUMMARY):
print 'Warning: summary line only has {} fields, {} expected'.format(len(summary_data),
len(self.FIELDS_SUMMARY))
for field in self.FIELDS_SUMMARY:
if len(summary_data):
value = summary_data.pop(0)
if hasattr(self, field) is not None:
setattr(self, field, value)
# parse the date
try:
self.datetime = datetime.datetime.strptime(self.date, '%d/%m/%Y %H:%M:%S')
except Exception as ex:
print 'Error parsing date {}: {}'.format(self.date, ex)
# parse the slide value
self.slide = True if self.slide == 'True' else False
summary_found = True
continue
elif line.startswith(self.HEADER_SAMPLES):
while len(lines):
line = lines.pop(0).strip()
if not line:
break
sample_data = line.split(',')
sample = {}
for field in self.FIELDS_SAMPLES:
sample[field] = sample_data.pop(0) if len(sample_data) else None
# convert time from milliseconds to fractional seconds
try:
sample['time'] = float(sample['time_ms']) / 1000.0
except ValueError:
print 'Error converting "{}" to float'.format(sample['time_ms'])
self.samples.append(sample)
samples_found = True
break
if not summary_found:
print 'Warning: summary section not found in file'
if not samples_found:
print 'Warning: samples section not found in file'
def get_data(self):
return {
'datetime': self.datetime,
'total_time': self.total_time,
'total_distance': self.total_distance,
'avg_pace': self.avg_pace,
'total_cals': self.total_cals,
'slide': self.slide,
'avg_hr': self.avg_hr,
'last_hr': self.last_hr,
'samples': self.samples,
}
| Python | 0.002177 |
d3d9e0f5c0da8408bcdb241509cb7dd1f41fd4bd | use hash when title not present | src/you_get/extractors/imgur.py | src/you_get/extractors/imgur.py | #!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from .universal import *
class Imgur(VideoExtractor):
name = "Imgur"
stream_types = [
{'id': 'original'},
{'id': 'thumbnail'},
]
def prepare(self, **kwargs):
if re.search(r'imgur\.com/a/', self.url):
# album
content = get_content(self.url)
album = match1(content, r'album\s*:\s*({.*}),') or \
match1(content, r'image\s*:\s*({.*}),')
album = json.loads(album)
count = album['album_images']['count']
images = album['album_images']['images']
ext = images[0]['ext']
self.streams = {
'original': {
'src': ['http://i.imgur.com/%s%s' % (i['hash'], ext)
for i in images],
'size': sum([i['size'] for i in images]),
'container': ext[1:]
},
'thumbnail': {
'src': ['http://i.imgur.com/%ss%s' % (i['hash'], '.jpg')
for i in images],
'container': 'jpg'
}
}
self.title = album['title']
elif re.search(r'i\.imgur\.com/', self.url):
# direct image
_, container, size = url_info(self.url)
self.streams = {
'original': {
'src': [self.url],
'size': size,
'container': container
}
}
self.title = r1(r'i\.imgur\.com/([^./]*)', self.url)
else:
# gallery image
content = get_content(self.url)
image = json.loads(match1(content, r'image\s*:\s*({.*}),'))
ext = image['ext']
self.streams = {
'original': {
'src': ['http://i.imgur.com/%s%s' % (image['hash'], ext)],
'size': image['size'],
'container': ext[1:]
},
'thumbnail': {
'src': ['http://i.imgur.com/%ss%s' % (image['hash'], '.jpg')],
'container': 'jpg'
}
}
self.title = image['title'] or image['hash']
def extract(self, **kwargs):
if 'stream_id' in kwargs and kwargs['stream_id']:
i = kwargs['stream_id']
if 'size' not in self.streams[i]:
self.streams[i]['size'] = urls_size(self.streams[i]['src'])
site = Imgur()
download = site.download_by_url
download_playlist = site.download_by_url
| #!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from .universal import *
class Imgur(VideoExtractor):
name = "Imgur"
stream_types = [
{'id': 'original'},
{'id': 'thumbnail'},
]
def prepare(self, **kwargs):
if re.search(r'imgur\.com/a/', self.url):
# album
content = get_content(self.url)
album = match1(content, r'album\s*:\s*({.*}),') or \
match1(content, r'image\s*:\s*({.*}),')
album = json.loads(album)
count = album['album_images']['count']
images = album['album_images']['images']
ext = images[0]['ext']
self.streams = {
'original': {
'src': ['http://i.imgur.com/%s%s' % (i['hash'], ext)
for i in images],
'size': sum([i['size'] for i in images]),
'container': ext[1:]
},
'thumbnail': {
'src': ['http://i.imgur.com/%ss%s' % (i['hash'], '.jpg')
for i in images],
'container': 'jpg'
}
}
self.title = album['title']
elif re.search(r'i\.imgur\.com/', self.url):
# direct image
_, container, size = url_info(self.url)
self.streams = {
'original': {
'src': [self.url],
'size': size,
'container': container
}
}
self.title = r1(r'i\.imgur\.com/([^./]*)', self.url)
else:
# gallery image
content = get_content(self.url)
image = json.loads(match1(content, r'image\s*:\s*({.*}),'))
ext = image['ext']
self.streams = {
'original': {
'src': ['http://i.imgur.com/%s%s' % (image['hash'], ext)],
'size': image['size'],
'container': ext[1:]
},
'thumbnail': {
'src': ['http://i.imgur.com/%ss%s' % (image['hash'], '.jpg')],
'container': 'jpg'
}
}
self.title = image['title']
def extract(self, **kwargs):
if 'stream_id' in kwargs and kwargs['stream_id']:
i = kwargs['stream_id']
if 'size' not in self.streams[i]:
self.streams[i]['size'] = urls_size(self.streams[i]['src'])
site = Imgur()
download = site.download_by_url
download_playlist = site.download_by_url
| Python | 0.000083 |
c55f21aa4925f6227086dedca2a3f839db98d8e1 | implement unit tests for debug command | tests/lib/cmdline/commands/test_debug.py | tests/lib/cmdline/commands/test_debug.py | # Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import lib.cmdline.commands as commands
from .test_base import BaseCommandTest
class DebugTest(BaseCommandTest):
def setUp(self):
super().setUp()
self.command_class = commands.Debug
self.alias_commands = ['do_dbg']
# note: need patch the default action using the mangled named
@patch('lib.cmdline.commands.Debug._Debug__print_state')
def test_default_command_action(self, mock_print_state):
debug_cmd = self.get_instance()
debug_cmd.do_debug('')
self.assertTrue(mock_print_state.called)
@patch('builtins.print')
def test_print_state(self, mock_print):
debug_cmd = self.get_instance()
debug_cmd.do_debug('--print-state')
self.assertTrue(mock_print.called)
mock_print.assert_called_with(debug_cmd.engine)
@patch('code.interact')
def test_interactive(self, mock_interact):
debug_cmd = self.get_instance()
debug_cmd.do_debug('--interact')
self.assertTrue(mock_interact.called)
def test_new_state(self):
debug_cmd = self.get_instance()
debug_cmd.do_debug('--new-state')
self.assertTrue(self.mock_engine.new_game.called)
| # Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import skip
import lib.cmdline.commands as commands
from .test_base import BaseCommandTest
class DebugTest(BaseCommandTest):
def setUp(self):
super().setUp()
self.command_class = commands.Debug
self.alias_commands = ['do_dbg']
@skip('NI')
def test_print_state(self):
pass
@skip('NI')
def test_interactive(self):
pass
@skip('NI')
def test_new_state(self):
pass
| Python | 0.000005 |
ecdcfe6d3e5f076f92b38b4a76d2975bce2bc4a2 | add -b and -sp | rna_tools/tools/md/rna_minimize.py | rna_tools/tools/md/rna_minimize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ddd
"""
from __future__ import print_function
from openmm.app import *
from openmm import *
from openmm.unit import *
from sys import stdout
import argparse
from rna_tools.tools.mq.lib.timex import timex
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument("file", help="", default="") # nargs='+')
parser.add_argument("-b", "--box-size", help="", default=1, type=float) # nargs='+')
parser.add_argument("-sp", "--solv-padding", action="store_true")
parser.add_argument("--pymol",
action="store_true", help="be verbose")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if list != type(args.file):
args.file = [args.file]
for f in args.file:
print(f, '...')
t = timex.Timex()
t.start()
pdbout = f.replace('.pdb','') + '_min.pdb'
pdb = PDBFile(f)
log = f.replace('.pdb','') + '.log'
modeller = Modeller(pdb.topology, pdb.positions)
#ff = 'charmm36.xml' #ff14SB.xml' #amber14sb.xml' # 'amber14-all.xml'
ff = 'amber14-all.xml'
#ff = 'amberfb15.xml'
#ff = 'amber14/RNA.OL3.xml'
#ff = 'amber99sb.xml'
forcefield = ForceField(ff, 'amber14/tip3pfb.xml')
modeller.addHydrogens(forcefield)
#modeller.addSolvent(forcefield, ionicStrength=0.1*molar)
# modeller.addSolvent(forcefield, model='tip5p')
bs = args.box_size
modeller.addSolvent(forcefield, boxSize=Vec3(bs, bs, bs)*nanometers)
if args.solv_padding:
print(1*nanometers)
modeller.addSolvent(forcefield, padding=1*nanometers)
# 5.0, 3.5, 3.5
#modeller.addSolvent(forcefield, boxSize=Vec3(2, 2, 2)*nanometers)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, #nonbondedMethod=PME,
nonbondedCutoff=1*nanometer, constraints=HBonds)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond, 0.002*picoseconds)
simulation = Simulation(modeller.topology, system, integrator)
simulation.context.setPositions(modeller.positions)
simulation.minimizeEnergy()
# from http://zarbi.chem.yale.edu/ligpargen/openMM_tutorial.html
position = simulation.context.getState(getPositions=True).getPositions()
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
app.PDBFile.writeFile(simulation.topology, position,
open(pdbout, 'w'))
print('Energy at Minima is %3.3f kcal/mol' % (energy._value * KcalPerKJ))
print('saved ', pdbout)
if args.pymol:
os.system('open %s' % out)
print(t.end())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import argparse
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument("file", help="", default="") # nargs='+')
parser.add_argument("--pymol",
action="store_true", help="be verbose")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if list != type(args.file):
args.file = [args.file]
for f in args.file:
print(f, '...')
pdbout = f.replace('.pdb','') + '_min.pdb'
pdb = PDBFile(f)
log = f.replace('.pdb','') + '.log'
modeller = Modeller(pdb.topology, pdb.positions)
forcefield = ForceField('amber14-all.xml', 'amber14/tip3pfb.xml')
modeller.addHydrogens(forcefield)
#modeller.addSolvent(forcefield, ionicStrength=0.1*molar)
# modeller.addSolvent(forcefield, model='tip5p')
#modeller.addSolvent(forcefield, padding=0.5*nanometers)
modeller.addSolvent(forcefield, boxSize=Vec3(5.0, 3.5, 3.5)*nanometers)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, #nonbondedMethod=PME,
nonbondedCutoff=1*nanometer, constraints=HBonds)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond, 0.002*picoseconds)
simulation = Simulation(modeller.topology, system, integrator)
simulation.context.setPositions(modeller.positions)
simulation.minimizeEnergy()
# from http://zarbi.chem.yale.edu/ligpargen/openMM_tutorial.html
position = simulation.context.getState(getPositions=True).getPositions()
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
app.PDBFile.writeFile(simulation.topology, position,
open(pdbout, 'w'))
print('Energy at Minima is %3.3f kcal/mol' % (energy._value * KcalPerKJ))
print('saved ', pdbout)
if args.pymol:
os.system('open %s' % out)
| Python | 0.000004 |
482f9ffaf1c2998fafc924a91b07656d3c054c91 | fix string | bin/extract_darkmatter.py | bin/extract_darkmatter.py | #!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], "X")
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| #!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], 1)
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| Python | 0.999999 |
b49f22af96644daa67c8d75881f59edd98b652b0 | Fix import broken by s/html/serialization | kraken/transcrib.py | kraken/transcrib.py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
# -*- coding: utf-8 -*-
"""
Utility functions for ground truth transcription.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from kraken.serialization import max_bbox
from kraken.lib.exceptions import KrakenInputException
from jinja2 import Environment, PackageLoader
from itertools import izip_longest
from StringIO import StringIO
import regex
import base64
import os
class TranscriptionInterface(object):
def __init__(self, font=None, font_style=None):
env = Environment(loader=PackageLoader('kraken', 'templates'))
self.tmpl = env.get_template('layout.html')
self.pages = []
self.font = {'font': font, 'style': font_style}
self.page_idx = 1
self.line_idx = 1
self.seg_idx = 1
def add_page(self, im, segmentation=None, records=None):
"""
Adds an image to the transcription interface, optionally filling in
information from a list of ocr_record objects.
Args:
im (PIL.Image): Input image
records (list): A list of ocr_record objects.
"""
page = {}
fd = StringIO()
im.save(fd, format='png')
page['index'] = self.page_idx
self.page_idx += 1
page['img'] = 'data:image/png;base64,' + base64.b64encode(fd.getvalue())
page['lines'] = []
if records:
for record in records:
splits = regex.split(u'(\s+)', record.prediction)
bbox = max_bbox(record.cuts)
line_offset = 0
segments = []
for segment, whitespace in izip_longest(splits[0::2], splits[1::2]):
if len(segment):
seg_bbox = max_bbox(record.cuts[line_offset:line_offset + len(segment)])
segments.append({'bbox': '{}, {}. {}, {}'.format(*seg_bbox), 'text': segment, 'index': self.seg_idx})
self.seg_idx += 1
line_offset += len(segment)
if whitespace:
line_offset += len(whitespace)
page['lines'].append({'index': self.line_idx, 'recognition': segments,
'bbox': '{}, {}, {}, {}'.format(int(bbox[0]),
int(bbox[1]),
int(bbox[2]),
int(bbox[3]))})
self.line_idx += 1
elif segmentation:
for bbox in segmentation:
page['lines'].append({'index': self.line_idx, 'bbox': '{}, {}, {}, {}'.format(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))})
self.line_idx += 1
else:
raise KrakenInputException('Neither segmentations nor records given')
self.pages.append(page)
def write(self, fd):
"""
Writes the HTML file to a file descriptor.
Args:
fd (File): File descriptor to write to.
"""
fd.write(self.tmpl.render(pages=self.pages, font=self.font).encode('utf-8'))
| # -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
# -*- coding: utf-8 -*-
"""
Utility functions for ground truth transcription.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from kraken.html import max_bbox
from kraken.lib.exceptions import KrakenInputException
from jinja2 import Environment, PackageLoader
from itertools import izip_longest
from StringIO import StringIO
import regex
import base64
import os
class TranscriptionInterface(object):
def __init__(self, font=None, font_style=None):
env = Environment(loader=PackageLoader('kraken', 'templates'))
self.tmpl = env.get_template('layout.html')
self.pages = []
self.font = {'font': font, 'style': font_style}
self.page_idx = 1
self.line_idx = 1
self.seg_idx = 1
def add_page(self, im, segmentation=None, records=None):
"""
Adds an image to the transcription interface, optionally filling in
information from a list of ocr_record objects.
Args:
im (PIL.Image): Input image
records (list): A list of ocr_record objects.
"""
page = {}
fd = StringIO()
im.save(fd, format='png')
page['index'] = self.page_idx
self.page_idx += 1
page['img'] = 'data:image/png;base64,' + base64.b64encode(fd.getvalue())
page['lines'] = []
if records:
for record in records:
splits = regex.split(u'(\s+)', record.prediction)
bbox = max_bbox(record.cuts)
line_offset = 0
segments = []
for segment, whitespace in izip_longest(splits[0::2], splits[1::2]):
if len(segment):
seg_bbox = max_bbox(record.cuts[line_offset:line_offset + len(segment)])
segments.append({'bbox': '{}, {}. {}, {}'.format(*seg_bbox), 'text': segment, 'index': self.seg_idx})
self.seg_idx += 1
line_offset += len(segment)
if whitespace:
line_offset += len(whitespace)
page['lines'].append({'index': self.line_idx, 'recognition': segments,
'bbox': '{}, {}, {}, {}'.format(int(bbox[0]),
int(bbox[1]),
int(bbox[2]),
int(bbox[3]))})
self.line_idx += 1
elif segmentation:
for bbox in segmentation:
page['lines'].append({'index': self.line_idx, 'bbox': '{}, {}, {}, {}'.format(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))})
self.line_idx += 1
else:
raise KrakenInputException('Neither segmentations nor records given')
self.pages.append(page)
def write(self, fd):
"""
Writes the HTML file to a file descriptor.
Args:
fd (File): File descriptor to write to.
"""
fd.write(self.tmpl.render(pages=self.pages, font=self.font).encode('utf-8'))
| Python | 0.000001 |
19d5d98350c8ef6f8e3d9153a899a6ce466e5e21 | Rename `UserOwnedModelManager` to just `UserOwnedManager` for consistency with Django naming convention. Replace `_for_user` methods with methods that override the base manager methods - this should help enforce a user context for models, and implement initial set of method overrides in this manner. | owned_models/models.py | owned_models/models.py | from django.conf import settings
from django.db import models
class UserOwnedManager(models.Manager):
"""
Wraps standard Manager query methods and adds a required `user` argument, to enforce all calls
made through this manager to be made within a user context.
"""
def all(self, user):
return super(UserOwnedManager, self).filter(user = user)
def filter(self, user, **kwargs):
return super(UserOwnedManager, self).filter(user = user, **kwargs)
def exclude(self, user, **kwargs):
return self.filter(user).exclude(**kwargs)
def get(self, user, *args, **kwargs):
return super(UserOwnedManager, self).get(user = user, *args, **kwargs)
def create(self, user, **kwargs):
return super(UserOwnedManager, self).create(user = user, **kwargs)
def get_or_create(self, user, defaults = None, **kwargs):
if defaults is None:
defaults = {}
defaults['user'] = user
return super(UserOwnedManager, self).get_or_create(user = user, defaults = defaults, **kwargs)
def update_or_create(self, user, defaults = None, **kwargs):
if defaults is None:
defaults = {}
defaults['user'] = user
return super(UserOwnedManager, self).update_or_create(user = user, defaults = defaults, **kwargs)
class UserOwnedModel(models.Model):
"""
Base class for models that are owned by a user.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
objects = UserOwnedManager()
all_objects = models.Manager()
class Meta:
abstract = True
| from django.conf import settings
from django.db import models
class UserOwnedModelManager(models.Manager):
def filter_for_user(self, user, *args, **kwargs):
return super(UserOwnedModelManager, self).get_queryset().filter(user = user, *args, **kwargs)
def get_for_user(self, user, *args, **kwargs):
if 'user' in kwargs:
kwargs.pop('user')
return super(UserOwnedModelManager, self).get_queryset().get(user = user, *args, **kwargs)
def get_or_create_for_user(self, user, **kwargs):
return super(UserOwnedModelManager, self).get_or_create(user = user, **kwargs)
class UserOwnedModel(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
objects = UserOwnedModelManager()
class Meta:
abstract = True | Python | 0 |
aa6b1daedbd911c23857033bcc601bdae37627f0 | Fix the Stream wrapping class. It had moved from elsewhere, but wasn't corrected for its new home in util.py | subversion/bindings/swig/python/svn/util.py | subversion/bindings/swig/python/svn/util.py | #
# svn.util: public Python interface for miscellaneous bindings
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2001 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
#
# to retain backwards Python compat, we don't use 'import foo as bar'
import string
_string = string
del string
# bring all the symbols up into this module
### in the future, we may want to limit this, rename things, etc
from _util import *
def run_app(func, *args, **kw):
'''Run a function as an "APR application".
APR is initialized, and an application pool is created. Cleanup is
performed as the function exits (normally or via an exception.
'''
apr_initialize()
try:
pool = svn_pool_create(None)
try:
return apply(func, (pool,) + args, kw)
finally:
svn_pool_destroy(pool)
finally:
apr_terminate()
# some minor patchups
svn_pool_destroy = apr_pool_destroy
class Stream:
def __init__(self, stream):
self._stream = stream
def read(self, amt=None):
if amt is None:
# read the rest of the stream
chunks = [ ]
while 1:
data = svn_stream_read(self._stream, SVN_STREAM_CHUNK_SIZE)
if not data:
break
chunks.append(data)
return _string.join(chunks, '')
# read the amount specified
return svn_stream_read(self._stream, int(amt))
def write(self, buf):
### what to do with the amount written? (the result value)
svn_stream_write(self._stream, buf)
| #
# svn.util: public Python interface for miscellaneous bindings
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2001 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
#
# bring all the symbols up into this module
### in the future, we may want to limit this, rename things, etc
from _util import *
def run_app(func, *args, **kw):
'''Run a function as an "APR application".
APR is initialized, and an application pool is created. Cleanup is
performed as the function exits (normally or via an exception.
'''
apr_initialize()
try:
pool = svn_pool_create(None)
try:
return apply(func, (pool,) + args, kw)
finally:
svn_pool_destroy(pool)
finally:
apr_terminate()
# some minor patchups
svn_pool_destroy = apr_pool_destroy
class Stream:
def __init__(self, stream):
self._stream = stream
def read(self, amt=None):
if amt is None:
# read the rest of the stream
chunks = [ ]
while 1:
data = util.svn_stream_read(self._stream, util.SVN_STREAM_CHUNK_SIZE)
if not data:
break
chunks.append(data)
return string.join(chunks, '')
# read the amount specified
return util.svn_stream_read(self._stream, int(amt))
def write(self, buf):
### what to do with the amount written? (the result value)
util.svn_stream_write(self._stream, buf)
| Python | 0.999587 |
b41ad87d9cb941abce185d367c973e0a3f2802cb | Update flaskext sqlalchemy module import. | pystil/db.py | pystil/db.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 by Florian Mounier, Kozea
# This file is part of pystil, licensed under a 3-clause BSD license.
from flask_sqlalchemy import SQLAlchemy
from datetime import timedelta
from sqlalchemy import func, desc
from sqlalchemy.orm import column_property
from sqlalchemy.sql.expression import case
from . import patchpsycopg
db = SQLAlchemy()
count = func.count
sum_ = func.sum
distinct = func.distinct
date_part = func.date_part
date_trunc = func.date_trunc
split_part = func.split_part
strpos = func.strpos
substr = func.substr
length = func.length
array_agg = func.array_agg
def string(pkey=False):
return db.Column(db.String, primary_key=pkey)
def integer(pkey=False):
return db.Column(db.Integer, primary_key=pkey)
def decimal():
return db.Column(db.Numeric)
def datetime():
return db.Column(db.DateTime)
def date(pkey=False):
return db.Column(db.Date, primary_key=pkey)
def fields(clazz):
return [field
for field in clazz.__dict__
if not field.startswith("_")]
class Visit(db.Model):
"""This mapped class contains the visits"""
id = integer(pkey=True)
uuid = string()
browser_name = string()
hash = string()
host = string()
browser_version = string()
client_tz_offset = integer()
date = datetime()
last_visit = datetime()
ip = string()
language = string()
page = string()
platform = string()
query_string = db.Column('query', db.String)
referrer = string()
pretty_referrer = string()
referrer_domain = string()
site = string()
size = string()
time = db.Column(db.Interval)
country = string()
country_code = string()
city = string()
lat = decimal()
lng = decimal()
browser_name_version = column_property(
browser_name + ' ' + split_part(browser_version, '.', 1) +
case([
(browser_name.in_(['opera', 'safari', 'chrome']), '')],
else_='.' + split_part(browser_version, '.', 2)
))
day = column_property(
date_trunc('day', date))
hour = column_property(
date_part('hour', date))
spent_time = column_property(
case([
(time == None, None),
(time < timedelta(seconds=1), 0),
(time < timedelta(seconds=2), 1),
(time < timedelta(seconds=5), 2),
(time < timedelta(seconds=10), 3),
(time < timedelta(seconds=20), 4),
(time < timedelta(seconds=30), 5),
(time < timedelta(seconds=60), 6),
(time < timedelta(seconds=120), 7),
(time < timedelta(seconds=300), 8),
(time < timedelta(seconds=600), 9)
], else_=10))
subdomain = column_property(
case([
(split_part(host, '.', 3) != '', split_part(host, '.', 1))
], else_=None))
domain = column_property(
case([
(split_part(host, '.', 3) == '', host),
], else_=substr(host,
strpos(host, '.') + 1,
length(host) - strpos(host, '.') + 1)))
class Keys(db.Model):
"""This mapped lass contains the auth keys"""
id = integer(pkey=True)
key = string()
host = string()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 by Florian Mounier, Kozea
# This file is part of pystil, licensed under a 3-clause BSD license.
from flaskext.sqlalchemy import SQLAlchemy
from datetime import timedelta
from sqlalchemy import func, desc
from sqlalchemy.orm import column_property
from sqlalchemy.sql.expression import case
from . import patchpsycopg
db = SQLAlchemy()
count = func.count
sum_ = func.sum
distinct = func.distinct
date_part = func.date_part
date_trunc = func.date_trunc
split_part = func.split_part
strpos = func.strpos
substr = func.substr
length = func.length
array_agg = func.array_agg
def string(pkey=False):
return db.Column(db.String, primary_key=pkey)
def integer(pkey=False):
return db.Column(db.Integer, primary_key=pkey)
def decimal():
return db.Column(db.Numeric)
def datetime():
return db.Column(db.DateTime)
def date(pkey=False):
return db.Column(db.Date, primary_key=pkey)
def fields(clazz):
return [field
for field in clazz.__dict__
if not field.startswith("_")]
class Visit(db.Model):
"""This mapped class contains the visits"""
id = integer(pkey=True)
uuid = string()
browser_name = string()
hash = string()
host = string()
browser_version = string()
client_tz_offset = integer()
date = datetime()
last_visit = datetime()
ip = string()
language = string()
page = string()
platform = string()
query_string = db.Column('query', db.String)
referrer = string()
pretty_referrer = string()
referrer_domain = string()
site = string()
size = string()
time = db.Column(db.Interval)
country = string()
country_code = string()
city = string()
lat = decimal()
lng = decimal()
browser_name_version = column_property(
browser_name + ' ' + split_part(browser_version, '.', 1) +
case([
(browser_name.in_(['opera', 'safari', 'chrome']), '')],
else_='.' + split_part(browser_version, '.', 2)
))
day = column_property(
date_trunc('day', date))
hour = column_property(
date_part('hour', date))
spent_time = column_property(
case([
(time == None, None),
(time < timedelta(seconds=1), 0),
(time < timedelta(seconds=2), 1),
(time < timedelta(seconds=5), 2),
(time < timedelta(seconds=10), 3),
(time < timedelta(seconds=20), 4),
(time < timedelta(seconds=30), 5),
(time < timedelta(seconds=60), 6),
(time < timedelta(seconds=120), 7),
(time < timedelta(seconds=300), 8),
(time < timedelta(seconds=600), 9)
], else_=10))
subdomain = column_property(
case([
(split_part(host, '.', 3) != '', split_part(host, '.', 1))
], else_=None))
domain = column_property(
case([
(split_part(host, '.', 3) == '', host),
], else_=substr(host,
strpos(host, '.') + 1,
length(host) - strpos(host, '.') + 1)))
class Keys(db.Model):
"""This mapped lass contains the auth keys"""
id = integer(pkey=True)
key = string()
host = string()
| Python | 0 |
be10731cab38445a3d1c3a6df3703fba3fecc93f | Fix accessing argv | examples/move-by-label.py | examples/move-by-label.py | #!/usr/bin/env python
"""
Example script to move torrents based on their label set in ruTorrent.
./move-by-label.py USERNAME HOSTNAME [PATH]
"""
from __future__ import print_function
from time import sleep
import sys
from xirvik.client import ruTorrentClient
USERNAME = sys.argv[1]
HOST = sys.argv[2]
try:
PATH = sys.argv[3]
except IndexError:
PATH = ''
PREFIX = '/torrents/{}/{}'.format(USERNAME, PATH)
if __name__ == '__main__':
client = ruTorrentClient(HOST)
count = 0
for hash, info in client.list_torrents_dict().iteritems():
name = info['name'].encode('utf-8')
label = info['custom1']
move_to = '{}/{}'.format(PREFIX, label.lower())
# Ignore torrents that are hash checking, not finished hash checking,
# not complete or that are already moved
if (info['is_hash_checking'] or
not info['is_hash_checked'] or
info['left_bytes'] > 0 or
info['base_path'].startswith(move_to)):
continue
print('Moving {} to {}/'.format(name, move_to.encode('utf-8'), name))
client.move_torrent(hash, move_to)
# Sometimes the web server cannot handle so many requests, so only
# send 10 at a time
count += 1
if count and (count % 10) == 0:
sleep(10)
| #!/usr/bin/env python
"""
Example script to move torrents based on their label set in ruTorrent.
./move-by-label.py USERNAME HOSTNAME [PATH]
"""
from __future__ import print_function
from time import sleep
import sys
from xirvik.client import ruTorrentClient
USERNAME = sys.argv[1]
HOST = sys.arg[2]
try:
PATH = sys.argv[3]
except IndexError:
PATH = ''
PREFIX = '/torrents/{}/{}'.format(USERNAME, PATH)
if __name__ == '__main__':
client = ruTorrentClient(HOST)
count = 0
for hash, info in client.list_torrents_dict().iteritems():
name = info['name'].encode('utf-8')
label = info['custom1']
move_to = '{}/{}'.format(PREFIX, label.lower())
# Ignore torrents that are hash checking, not finished hash checking,
# not complete or that are already moved
if (info['is_hash_checking'] or
not info['is_hash_checked'] or
info['left_bytes'] > 0 or
info['base_path'].startswith(move_to)):
continue
print('Moving {} to {}/'.format(name, move_to.encode('utf-8'), name))
client.move_torrent(hash, move_to)
# Sometimes the web server cannot handle so many requests, so only
# send 10 at a time
count += 1
if count and (count % 10) == 0:
sleep(10)
| Python | 0.000006 |
b2fd88546c73e4aadb0e697233a7bfd20398e429 | Check the PBS_NUM_NODES environment variable first when auto_npar = True. | sshcustodian/vasp/sshjobs.py | sshcustodian/vasp/sshjobs.py | # File: sshcustodian/vasp/sshjobs.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
"""
"""
import os
import shutil
import math
from pymatgen.io.vasp import Incar
from pymatgen.io.smart import read_structure
from pymatgen.io.vasp.sets import MPVaspInputSet
from custodian.vasp.interpreter import VaspModder
from custodian.custodian.vasp import VaspJob
VASP_INPUT_FILES = {"INCAR", "POSCAR", "POTCAR", "KPOINTS"}
VASP_OUTPUT_FILES = ['DOSCAR', 'INCAR', 'KPOINTS', 'POSCAR', 'PROCAR',
'vasprun.xml', 'CHGCAR', 'CHG', 'EIGENVAL', 'OSZICAR',
'WAVECAR', 'CONTCAR', 'IBZKPT', 'OUTCAR']
class SSHVaspJob(VaspJob):
"""
"""
def __init__(self, vasp_cmd, output_file="vasp.out", suffix="",
final=True, backup=True,
default_vasp_input_set=MPVaspInputSet(), auto_npar=True,
auto_gamma=True, settings_override=None,
gamma_vasp_cmd=None, copy_magmom=False):
"""
"""
super(SSHVaspJob, self).__init__(vasp_cmd, output_file, suffix, final,
backup, default_vasp_input_set,
auto_npar, auto_gamma,
settings_override, gamma_vasp_cmd,
copy_magmom)
def setup(self):
"""
"""
files = os.listdir(".")
num_structures = 0
if not set(files).issuperset(VASP_INPUT_FILES):
for f in files:
try:
struct = read_structure(f)
num_structures += 1
except:
pass
if num_structures != 1:
raise RuntimeError("{} structures found. Unable to continue."
.format(num_structures))
else:
self.default_vis.write_input(struct, ".")
if self.backup:
for f in VASP_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
#Only optimized NPAR for non-HF and non-RPA calculations.
if not (incar.get("LHFCALC") or incar.get("LRPA") or
incar.get("LEPSILON")):
if incar.get("IBRION") in [5, 6, 7, 8]:
# NPAR should not be set for Hessian matrix
# calculations, whether in DFPT or otherwise.
del incar["NPAR"]
else:
import multiprocessing
# try pbs environment variable first
# try sge environment variable second
# Note!
# multiprocessing.cpu_count() will include hyperthreads
# in the CPU count, which will set NPAR to be too large
# and can cause the job to hang if you use compute
# nodes with scratch partitions.
ncores = (os.environ.get("PBS_NUM_NODES") or
os.environ.get('NSLOTS') or
multiprocessing.cpu_count())
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override)
| # File: sshcustodian/vasp/sshjobs.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
"""
"""
import os
import shutil
import math
from pymatgen.io.vasp import Incar
from pymatgen.io.smart import read_structure
from pymatgen.io.vasp.sets import MPVaspInputSet
from custodian.vasp.interpreter import VaspModder
from custodian.custodian.vasp import VaspJob
VASP_INPUT_FILES = {"INCAR", "POSCAR", "POTCAR", "KPOINTS"}
VASP_OUTPUT_FILES = ['DOSCAR', 'INCAR', 'KPOINTS', 'POSCAR', 'PROCAR',
'vasprun.xml', 'CHGCAR', 'CHG', 'EIGENVAL', 'OSZICAR',
'WAVECAR', 'CONTCAR', 'IBZKPT', 'OUTCAR']
class SSHVaspJob(VaspJob):
"""
"""
def __init__(self, vasp_cmd, output_file="vasp.out", suffix="",
final=True, backup=True,
default_vasp_input_set=MPVaspInputSet(), auto_npar=True,
auto_gamma=True, settings_override=None,
gamma_vasp_cmd=None, copy_magmom=False):
"""
"""
super(SSHVaspJob, self).__init__(vasp_cmd, output_file, suffix, final,
backup, default_vasp_input_set,
auto_npar, auto_gamma,
settings_override, gamma_vasp_cmd,
copy_magmom)
def setup(self):
"""
"""
files = os.listdir(".")
num_structures = 0
if not set(files).issuperset(VASP_INPUT_FILES):
for f in files:
try:
struct = read_structure(f)
num_structures += 1
except:
pass
if num_structures != 1:
raise RuntimeError("{} structures found. Unable to continue."
.format(num_structures))
else:
self.default_vis.write_input(struct, ".")
if self.backup:
for f in VASP_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
#Only optimized NPAR for non-HF and non-RPA calculations.
if not (incar.get("LHFCALC") or incar.get("LRPA") or
incar.get("LEPSILON")):
if incar.get("IBRION") in [5, 6, 7, 8]:
# NPAR should not be set for Hessian matrix
# calculations, whether in DFPT or otherwise.
del incar["NPAR"]
else:
import multiprocessing
# try sge environment variable first
# (since multiprocessing counts cores on the current machine only)
ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override)
| Python | 0 |
6ece7062e539e2196ff04c49f07913c884907878 | rearrange lines to make colors to player map clear | run-relax.py | run-relax.py | #!/usr/bin/env python
import sys
import time
from Mindwave.mindwave import BluetoothHeadset, FakeHeadset
# Note: on OS X, BluetoothHeadset will not work
from parameters import SharedParameters
from threads import HeadsetThread
from gameplay import GameObject
from game_effects import generate_player_renderer
from controller import AnimationController
from renderer import Renderer
from playlist import Playlist
PLAYER_ONE_ADDRESS = '74:E5:43:BE:39:71'
PLAYER_TWO_ADDRESS = '74:E5:43:B1:96:E0'
if __name__ == '__main__':
num_args = len(sys.argv)
test = num_args > 1 and sys.argv[1] == 'test'
ip_address = None
if test and num_args > 2:
ip_address = sys.argv[2]
elif num_args > 1:
ip_address = sys.argv[1]
# ip_address = '192.168.7.2:7890'
shared_params = SharedParameters()
if not test:
shared_params.targetFrameRate = 100.0
shared_params.use_keyboard_input = False
shared_params.debug = False
player1 = FakeHeadset(random_data = True) if test else BluetoothHeadset(PLAYER_ONE_ADDRESS)
yellowish = [1.0, 0.84, 0.28]
greenish = [0.2, 0.4, 0.]
renderer_high = generate_player_renderer(shared_params, greenish, yellowish)
player2 = FakeHeadset(random_data = True) if test else BluetoothHeadset(PLAYER_TWO_ADDRESS)
purple = [0.2, 0., 0.3]
pink = [0.7, 0.5, 0.4]
renderer_low = generate_player_renderer(shared_params, purple, pink, inverse=True)
game = GameObject(shared_params, renderer_low, renderer_high)
game.start()
controller = AnimationController(game_object=game,
renderer_low=renderer_low, renderer_high=renderer_high, params=shared_params, server=ip_address)
threads = [
HeadsetThread(shared_params, player1),
HeadsetThread(shared_params, player2, use_eeg2=True),
]
for thread in threads:
thread.start()
# start the lights
time.sleep(0.05)
controller.drawingLoop()
| #!/usr/bin/env python
import sys
import time
from Mindwave.mindwave import BluetoothHeadset, FakeHeadset
# Note: on OS X, BluetoothHeadset will not work
from parameters import SharedParameters
from threads import HeadsetThread
from gameplay import GameObject
from game_effects import generate_player_renderer
from controller import AnimationController
from renderer import Renderer
from playlist import Playlist
PLAYER_ONE_ADDRESS = '74:E5:43:BE:39:71'
PLAYER_TWO_ADDRESS = '74:E5:43:B1:96:E0'
if __name__ == '__main__':
num_args = len(sys.argv)
test = num_args > 1 and sys.argv[1] == 'test'
ip_address = None
if test and num_args > 2:
ip_address = sys.argv[2]
elif num_args > 1:
ip_address = sys.argv[1]
# ip_address = '192.168.7.2:7890'
shared_params = SharedParameters()
if not test:
shared_params.targetFrameRate = 100.0
shared_params.use_keyboard_input = False
shared_params.debug = False
player1 = FakeHeadset(random_data = True) if test else BluetoothHeadset(PLAYER_ONE_ADDRESS)
player2 = FakeHeadset(random_data = True) if test else BluetoothHeadset(PLAYER_TWO_ADDRESS)
yellowish = [1.0, 0.84, 0.28]
greenish = [0.2, 0.4, 0.]
purple = [0.2, 0., 0.3]
pink = [0.7, 0.5, 0.4]
renderer_low = generate_player_renderer(shared_params, purple, pink, inverse=True)
renderer_high = generate_player_renderer(shared_params, greenish, yellowish)
game = GameObject(shared_params, renderer_low, renderer_high)
game.start()
controller = AnimationController(game_object=game,
renderer_low=renderer_low, renderer_high=renderer_high, params=shared_params, server=ip_address)
threads = [
HeadsetThread(shared_params, player1),
HeadsetThread(shared_params, player2, use_eeg2=True),
]
for thread in threads:
thread.start()
# start the lights
time.sleep(0.05)
controller.drawingLoop()
| Python | 0 |
0f0fc4037997f6ae4eef019547e3c8d8cf05db9c | modify test data | drda/tests/test_derby.py | drda/tests/test_derby.py | ##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import unittest
import io
import decimal
import datetime
import drda
class TestDerby(unittest.TestCase):
host = 'localhost'
database = 'testdb;create=true'
port = 1527
def setUp(self):
self.connection = drda.connect(
host=self.host,
database=self.database,
port=self.port,
)
def tearDown(self):
self.connection.close()
def test_derby(self):
cur = self.connection.cursor()
cur.execute("""
CREATE TABLE test (
s VARCHAR(20),
i int,
d1 decimal(2, 1),
d2 decimal(18, 2)
)
""")
cur.execute("""
INSERT INTO test (s, i, d1, d2) VALUES
('abcdefghijklmnopq', 1, 1.1, 123456789.12),
('B', 2, 1.2, 2),
('C', 3, null, null)
""")
cur.execute("SELECT * FROM test")
| ##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import unittest
import io
import decimal
import datetime
import drda
class TestDerby(unittest.TestCase):
host = 'localhost'
database = 'testdb;create=true'
port = 1527
def setUp(self):
self.connection = drda.connect(
host=self.host,
database=self.database,
port=self.port,
)
def tearDown(self):
self.connection.close()
def test_derby(self):
cur = self.connection.cursor()
cur.execute("create table test (s varchar(20), i int, d decimal(18, 2))")
cur.execute("insert into test (s, i, d) values ('abcdefghijklmnopq', 1, 1.1)")
cur.execute("insert into test (s, i, d) values ('B', 2, 1.2)")
cur.execute("insert into test (s, i) values ('C', 3)")
cur.execute("select * from test")
| Python | 0.000006 |
2ab1c23ca4be991c174514998496ea4f7c8f6c3a | Make indentation consistent with other code | serve.py | serve.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module serves a WSGI application using werkzeug.
Author: Logan Raarup <logan@logan.dk>
"""
import importlib
import os
import sys
try:
from werkzeug import serving
except ImportError: # pragma: no cover
sys.exit('Unable to import werkzeug (run: pip install werkzeug)')
def serve(cwd, app, port, host='localhost'):
sys.path.insert(0, cwd)
wsgi_fqn = app.rsplit('.', 1)
wsgi_fqn_parts = wsgi_fqn[0].rsplit('/', 1)
if len(wsgi_fqn_parts) == 2:
sys.path.insert(0, os.path.join(cwd, wsgi_fqn_parts[0]))
wsgi_module = importlib.import_module(wsgi_fqn_parts[-1])
wsgi_app = getattr(wsgi_module, wsgi_fqn[1])
# Attempt to force Flask into debug mode
try:
wsgi_app.debug = True
except: # noqa: E722
pass
os.environ['IS_OFFLINE'] = 'True'
serving.run_simple(
host,
int(port),
wsgi_app,
use_debugger=True,
use_reloader=True,
use_evalex=True)
if __name__ == '__main__': # pragma: no cover
if len(sys.argv) != 5:
sys.exit('Usage: {} CWD APP PORT HOST'.format(
os.path.basename(sys.argv[0])))
serve(*sys.argv[1:])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module serves a WSGI application using werkzeug.
Author: Logan Raarup <logan@logan.dk>
"""
import importlib
import os
import sys
try:
from werkzeug import serving
except ImportError: # pragma: no cover
sys.exit('Unable to import werkzeug (run: pip install werkzeug)')
def serve(cwd, app, port, host='localhost'):
sys.path.insert(0, cwd)
wsgi_fqn = app.rsplit('.', 1)
wsgi_fqn_parts = wsgi_fqn[0].rsplit('/', 1)
if len(wsgi_fqn_parts) == 2:
sys.path.insert(0, os.path.join(cwd, wsgi_fqn_parts[0]))
wsgi_module = importlib.import_module(wsgi_fqn_parts[-1])
wsgi_app = getattr(wsgi_module, wsgi_fqn[1])
# Attempt to force Flask into debug mode
try:
wsgi_app.debug = True
except: # noqa: E722
pass
os.environ['IS_OFFLINE'] = 'True'
serving.run_simple(
str(host),
int(port),
wsgi_app,
use_debugger=True,
use_reloader=True,
use_evalex=True
)
if __name__ == '__main__': # pragma: no cover
if len(sys.argv) != 5:
sys.exit('Usage: {} CWD APP PORT HOST'.format(
os.path.basename(sys.argv[0])))
serve(*sys.argv[1:])
| Python | 0.000144 |
89ad9ad4e8d8820b89b65b281375782d80275446 | Fix status | aldryn_essential_addons_dashboard/views.py | aldryn_essential_addons_dashboard/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from versionfield.version import Version
from versionfield.constants import DEFAULT_NUMBER_BITS
from .models import Addon
import warnings
ZERO = Version('0.0.0', DEFAULT_NUMBER_BITS)
class CsrfExemptMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(CsrfExemptMixin, cls).as_view(**initkwargs)
return csrf_exempt(view)
class ProcessWebhookView(CsrfExemptMixin, View):
http_method_names = ['post', 'get']
def get_data(self, request):
payload = request.POST.get('payload', None)
return json.loads(payload) if payload else []
def get_job_python(self, job):
"""Given a single 'job' object, return the found Python."""
if job['config'] and job['config']['python']:
return job['config']['python']
return None
def get_max_python(self, matrix):
"""Returns the max. version of python in all the successful jobs."""
max_python = ZERO
for job in matrix:
if job["state"] == "finished" and job["status"] == 0:
job_python = Version(
self.get_job_python(job), DEFAULT_NUMBER_BITS)
if job_python and job_python > max_python:
max_python = job_python
if max_python > ZERO:
return max_python
return None
def get_job_django(self, job):
"""
Given a single 'job' object, return the found Django. This one is a bit
trickier as we'll have to parse it out of the ENV.
"""
pattern = re.compile('.*?django *= *(?<version>[0-9][0-9.]*).*?', re.I)
if job['config'] and job['config']['env']:
grps = re.match(pattern, job['config']['env'])
if grps:
return Version(grps.groups['django'], DEFAULT_NUMBER_BITS)
return None
def get_max_django(self, matrix):
"""Returns the max. version of django in all the successful jobs."""
max_django = ZERO
for job in matrix:
if job['state'] == 'finished' and job['status'] == 0:
job_django = Version(
self.get_job_django(job), DEFAULT_NUMBER_BITS)
if job_django and job_django > max_django:
max_django = job_django
if max_django > ZERO:
return max_django
return None
def process_data(self, addon, data):
if data['matrix']:
addon.max_python_version = self.get_max_python(data['matrix'])
addon.max_django_version = self.get_max_django(data['matrix'])
addon.build_passing = data['status'] == 0
warnings.warn('Updating "{0}" with: {1}, {2}, {3}'.format(
addon,
addon.max_python_version,
addon.max_django_version,
addon.build_passing,
))
addon.save()
def post(self, request, *args, **kwargs):
# TODO: See: http://docs.travis-ci.com/user/notifications/#Authorization-for-Webhooks
# Too bad the docs provide the wrong headers!
slug = request.META.get('HTTP_TRAVIS_REPO_SLUG', None)
auth = request.META.get('HTTP_AUTHORIZATION', None),
addon = None
try:
addon = Addon.objects.get(repo_slug=slug)
except Addon.DoesNotExist:
pass
if addon:
data = self.get_data(request)
if data:
self.process_data(addon, data)
return HttpResponse(status=200)
def get(self, request, *args, **kwargs):
"""Just for easier testing."""
print('Received a GET request!')
return HttpResponse(status=200)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from versionfield.version import Version
from versionfield.constants import DEFAULT_NUMBER_BITS
from .models import Addon
import warnings
ZERO = Version('0.0.0', DEFAULT_NUMBER_BITS)
class CsrfExemptMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(CsrfExemptMixin, cls).as_view(**initkwargs)
return csrf_exempt(view)
class ProcessWebhookView(CsrfExemptMixin, View):
http_method_names = ['post', 'get']
def get_data(self, request):
payload = request.POST.get('payload', None)
return json.loads(payload) if payload else []
def get_job_python(self, job):
"""Given a single 'job' object, return the found Python."""
if job['config'] and job['config']['python']:
return job['config']['python']
return None
def get_max_python(self, matrix):
"""Returns the max. version of python in all the successful jobs."""
max_python = ZERO
for job in matrix:
if job["state"] == "finished" and job["status"] == 0:
job_python = Version(
self.get_job_python(job), DEFAULT_NUMBER_BITS)
if job_python and job_python > max_python:
max_python = job_python
if max_python > ZERO:
return max_python
return None
def get_job_django(self, job):
"""
Given a single 'job' object, return the found Django. This one is a bit
trickier as we'll have to parse it out of the ENV.
"""
pattern = re.compile('.*?django *= *(?<version>[0-9][0-9.]*).*?', re.I)
if job['config'] and job['config']['env']:
grps = re.match(pattern, job['config']['env'])
if grps:
return Version(grps.groups['django'], DEFAULT_NUMBER_BITS)
return None
def get_max_django(self, matrix):
"""Returns the max. version of django in all the successful jobs."""
max_django = ZERO
for job in matrix:
if job['state'] == 'finished' and job['status'] == 0:
job_django = Version(
self.get_job_django(job), DEFAULT_NUMBER_BITS)
if job_django and job_django > max_django:
max_django = job_django
if max_django > ZERO:
return max_django
return None
def process_data(self, addon, data):
if data['matrix']:
addon.max_python_version = self.get_max_python(data['matrix'])
addon.max_django_version = self.get_max_django(data['matrix'])
addon.build_passing = data['matrix']['status'] == 0
warnings.warn('Updating "{0}" with: {1}, {2}, {3}'.format(
addon,
addon.max_python_version,
addon.max_django_version,
addon.build_passing,
))
addon.save()
def post(self, request, *args, **kwargs):
# TODO: See: http://docs.travis-ci.com/user/notifications/#Authorization-for-Webhooks
# Too bad the docs provide the wrong headers!
slug = request.META.get('HTTP_TRAVIS_REPO_SLUG', None)
auth = request.META.get('HTTP_AUTHORIZATION', None),
addon = None
try:
addon = Addon.objects.get(repo_slug=slug)
except Addon.DoesNotExist:
pass
if addon:
data = self.get_data(request)
if data:
self.process_data(addon, data)
return HttpResponse(status=200)
def get(self, request, *args, **kwargs):
"""Just for easier testing."""
print('Received a GET request!')
return HttpResponse(status=200)
| Python | 0.000001 |
cdedb1d6875a8ab5f42369b1801a1fc0ee205654 | Add option to generate coverage report | run_tests.py | run_tests.py | #!/usr/bin/env python
"""
Driver script for testing nu-TuLiP. Try calling it with "-h" flag.
SCL; 5 Sep 2013.
"""
import sys
import os.path
import nose
if __name__ == "__main__":
if ("-h" in sys.argv) or ("--help" in sys.argv):
print """Usage: run_tests.py [--cover] [--fast] [OPTIONS...] [[-]TESTFILES...]
TESTFILES... is space-separated list of test file names, where the
suffix "_test.py" is added to each given name. E.g.,
run_tests.py automaton
causes the automaton_test.py file to be used and no others. If no
arguments are given, then default is to run all tests. To exclude
tests that are marked as slow, use the flag "--fast".
If TESTFILES... each have a prefix of "-", then all tests *except*
those listed will be run. OPTIONS... are passed on to nose.
"""
exit(1)
if len(sys.argv) == 1:
nose.main()
if "--fast" in sys.argv:
skip_slow = True
sys.argv.remove("--fast")
else:
skip_slow = False
if "--cover" in sys.argv:
measure_coverage = True
sys.argv.remove("--cover")
else:
measure_coverage = False
argv = [sys.argv[0]]
if skip_slow:
argv.append("--attr=!slow")
if measure_coverage:
argv.extend(["--with-coverage", "--cover-html", "--cover-package=tulip"])
testfiles = []
excludefiles = []
for basename in sys.argv[1:]: # Only add extant file names
try:
with open(os.path.join("tests", basename+"_test.py"), "r") as f:
testfiles.append(basename+"_test.py")
except IOError:
if basename[0] == "-":
try:
with open(os.path.join("tests", basename[1:]+"_test.py"), "r") as f:
excludefiles.append(basename[1:]+"_test.py")
except IOError:
argv.append(basename)
else:
argv.append(basename)
if len(testfiles) > 0 and len(excludefiles) > 0:
print "You can specify files to exclude or include, but not both."
print "Try calling it with \"-h\" flag."
exit(1)
if len(excludefiles) > 0:
argv.append("--exclude="+"|".join(excludefiles))
argv.extend(testfiles)
nose.main(argv=argv)
| #!/usr/bin/env python
"""
Driver script for testing nu-TuLiP. Try calling it with "-h" flag.
SCL; 6 May 2013.
"""
import sys
import os.path
import nose
if __name__ == "__main__":
if ("-h" in sys.argv) or ("--help" in sys.argv):
print """Usage: run_tests.py [--fast] [OPTIONS...] [[-]TESTFILES...]
TESTFILES... is space-separated list of test file names, where the
suffix "_test.py" is added to each given name. E.g.,
run_tests.py automaton
causes the automaton_test.py file to be used and no others. If no
arguments are given, then default is to run all tests. To exclude
tests that are marked as slow, use the flag "--fast".
If TESTFILES... each have a prefix of "-", then all tests *except*
those listed will be run. OPTIONS... are passed on to nose.
"""
exit(1)
if len(sys.argv) == 1:
nose.main()
if "--fast" in sys.argv:
skip_slow = True
sys.argv.remove("--fast")
else:
skip_slow = False
argv = [sys.argv[0]]
if skip_slow:
argv.append("--attr=!slow")
testfiles = []
excludefiles = []
for basename in sys.argv[1:]: # Only add extant file names
try:
with open(os.path.join("tests", basename+"_test.py"), "r") as f:
testfiles.append(basename+"_test.py")
except IOError:
if basename[0] == "-":
try:
with open(os.path.join("tests", basename[1:]+"_test.py"), "r") as f:
excludefiles.append(basename[1:]+"_test.py")
except IOError:
argv.append(basename)
else:
argv.append(basename)
if len(testfiles) > 0 and len(excludefiles) > 0:
print "You can specify files to exclude or include, but not both."
print "Try calling it with \"-h\" flag."
exit(1)
if len(excludefiles) > 0:
argv.append("--exclude="+"|".join(excludefiles))
argv.extend(testfiles)
nose.main(argv=argv)
| Python | 0 |
b74d9d3a780082b8cb326a553a9b4c84ca5368be | Add IS_OFFLINE environment variable to serve | serve.py | serve.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module serves a WSGI application using werkzeug.
Author: Logan Raarup <logan@logan.dk>
"""
import importlib
import os
import sys
try:
from werkzeug import serving
except ImportError: # pragma: no cover
sys.exit('Unable to import werkzeug (run: pip install werkzeug)')
def serve(cwd, app, port):
sys.path.insert(0, cwd)
wsgi_fqn = app.rsplit('.', 1)
wsgi_fqn_parts = wsgi_fqn[0].rsplit('/', 1)
if len(wsgi_fqn_parts) == 2:
sys.path.insert(0, os.path.join(cwd, wsgi_fqn_parts[0]))
wsgi_module = importlib.import_module(wsgi_fqn_parts[-1])
wsgi_app = getattr(wsgi_module, wsgi_fqn[1])
# Attempt to force Flask into debug mode
try:
wsgi_app.debug = True
except:
pass
os.environ['IS_OFFLINE'] = 'True'
serving.run_simple(
'localhost', int(port), wsgi_app,
use_debugger=True, use_reloader=True, use_evalex=True)
if __name__ == '__main__': # pragma: no cover
if len(sys.argv) != 4:
sys.exit('Usage: {} CWD APP PORT'.format(
os.path.basename(sys.argv[0])))
serve(*sys.argv[1:])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module serves a WSGI application using werkzeug.
Author: Logan Raarup <logan@logan.dk>
"""
import importlib
import os
import sys
try:
from werkzeug import serving
except ImportError: # pragma: no cover
sys.exit('Unable to import werkzeug (run: pip install werkzeug)')
def serve(cwd, app, port):
sys.path.insert(0, cwd)
wsgi_fqn = app.rsplit('.', 1)
wsgi_fqn_parts = wsgi_fqn[0].rsplit('/', 1)
if len(wsgi_fqn_parts) == 2:
sys.path.insert(0, os.path.join(cwd, wsgi_fqn_parts[0]))
wsgi_module = importlib.import_module(wsgi_fqn_parts[-1])
wsgi_app = getattr(wsgi_module, wsgi_fqn[1])
# Attempt to force Flask into debug mode
try:
wsgi_app.debug = True
except:
pass
serving.run_simple(
'localhost', int(port), wsgi_app,
use_debugger=True, use_reloader=True, use_evalex=True)
if __name__ == '__main__': # pragma: no cover
if len(sys.argv) != 4:
sys.exit('Usage: {} CWD APP PORT'.format(
os.path.basename(sys.argv[0])))
serve(*sys.argv[1:])
| Python | 0 |
224aa339ee7f1720ebb3616aa62ba06975c1a11d | handle .blend model sources | pak_profiles/common.py | pak_profiles/common.py | #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
file_common_deps = {
"file_base": "DEPS",
"description": "Package DEPS file",
"action": "copy",
}
file_common_external_editor = {
"file_ext": [
"xcf",
"psd",
"ora",
],
"description": "External Editor File",
"action": "ignore",
}
file_common_metada_sidecar = {
"file_ext": [
"vorbiscomment",
],
"description": "Metadata Sidecar",
"action": "ignore",
}
file_common_texture = {
"file_ext": [
"jpg",
"jpeg",
"png",
"tga",
"bmp",
"webp",
"crn",
"dds",
],
"description": "Texture",
"action": "copy",
}
file_common_sound = {
"file_ext": [
"wav",
"flac",
"ogg",
"opus",
],
"description": "Sound File",
"action": "copy",
}
file_common_script = {
"file_ext": [
"shader",
"particle",
"trail",
],
"dir_ancestor_name": "scripts",
"description": "Common Script",
"action": "copy",
}
file_common_model = {
"file_ext": [
"ase",
"iqm",
"md3",
"md5anim",
"md5mesh",
"qc",
],
"description": "Common Model File",
"action": "copy",
}
file_common_model_source = {
"file_ext": [
"blend",
],
"description": "Common Model Source",
"action": "ignore",
}
file_common_text = {
"file_ext": [
"txt",
"md",
],
"description": "Common Text file",
"action": "copy",
}
file_common_readme = {
"inherit": "file_common_text",
"file_base": "README",
"description": "Common ReadMe file",
}
file_common_nullwav = {
"inherit": "file_common_sound",
"file_ext": "wav",
"file_base": "null",
"description": "Common NULL Sound File",
"action": "copy",
}
| #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
file_common_deps = {
"file_base": "DEPS",
"description": "Package DEPS file",
"action": "copy",
}
file_common_external_editor = {
"file_ext": [
"xcf",
"psd",
"ora",
],
"description": "External Editor File",
"action": "ignore",
}
file_common_metada_sidecar = {
"file_ext": [
"vorbiscomment",
],
"description": "Metadata Sidecar",
"action": "ignore",
}
file_common_texture = {
"file_ext": [
"jpg",
"jpeg",
"png",
"tga",
"bmp",
"webp",
"crn",
"dds",
],
"description": "Texture",
"action": "copy",
}
file_common_sound = {
"file_ext": [
"wav",
"flac",
"ogg",
"opus",
],
"description": "Sound File",
"action": "copy",
}
file_common_script = {
"file_ext": [
"shader",
"particle",
"trail",
],
"dir_ancestor_name": "scripts",
"description": "Common Script",
"action": "copy",
}
file_common_model = {
"file_ext": [
"ase",
"iqm",
"md3",
"md5anim",
"md5mesh",
"qc",
],
"description": "Common Model File",
"action": "copy",
}
file_common_text = {
"file_ext": [
"txt",
"md",
],
"description": "Common Text file",
"action": "copy",
}
file_common_readme = {
"inherit": "file_common_text",
"file_base": "README",
"description": "Common ReadMe file",
}
file_common_nullwav = {
"inherit": "file_common_sound",
"file_ext": "wav",
"file_base": "null",
"description": "Common NULL Sound File",
"action": "copy",
}
| Python | 0 |
52d835ec8a3dfec53c3cab23598be6f63da9addc | Update prims_minimum_spanning.py | algorithms/graph/prims_minimum_spanning.py | algorithms/graph/prims_minimum_spanning.py | '''
This Prim's Algorithm Code is for finding weight of minimum spanning tree
of a connected graph.
For argument graph, it should be a dictionary type
such as
graph = {
'a': [ [3, 'b'], [8,'c'] ],
'b': [ [3, 'a'], [5, 'd'] ],
'c': [ [8, 'a'], [2, 'd'], [4, 'e'] ],
'd': [ [5, 'b'], [2, 'c'], [6, 'e'] ],
'e': [ [4, 'c'], [6, 'd'] ]
}
where 'a','b','c','d','e' are nodes (these can be 1,2,3,4,5 as well)
'''
import heapq # for priority queue
# prim's algo. to find weight of minimum spanning tree
def prims(graph_used):
vis=[]
s=[[0,1]]
prim = []
mincost=0
while(len(s)>0):
v=heapq.heappop(s)
x=v[1]
if(x in vis):
continue
mincost += v[0]
prim.append(x)
vis.append(x)
for j in graph_used[x]:
i=j[-1]
if(i not in vis):
heapq.heappush(s,j)
return mincost
| import heapq # for priority queue
# prim's algo. to find weight of minimum spanning tree
def prims(graph):
vis=[]
s=[[0,1]]
prim = []
mincost=0
while(len(s)>0):
v=heapq.heappop(s)
x=v[1]
if(x in vis):
continue
mincost += v[0]
prim.append(x)
vis.append(x)
for j in g[x]:
i=j[-1]
if(i not in vis):
heapq.heappush(s,j)
return mincost
if __name__=="__main__":
# input number of nodes and edges in graph
n,e = map(int,input().split())
# initializing empty graph as a dictionary (of the form {int:list})
g=dict(zip([i for i in range(1,n+1)],[[] for i in range(n)]))
# input graph data
for i in range(e):
a,b,c=map(int,input().split())
g[a].append([c,b])
g[b].append([c,a])
# print weight of minimum spanning tree
print(prims(g))
''' tests-
Input : 4 5
1 2 7
1 4 6
2 4 9
4 3 8
2 3 6
Output : 19
Input : 5 6
1 2 3
1 3 8
2 4 5
3 4 2
3 5 4
4 5 6
Output : 14
'''
| Python | 0 |
0227f7e964dcc1a31b4accc657eb40f78b0282d1 | Deal with Django 1.8 deprecation/removal of django.test.simple. | run_tests.py | run_tests.py | #!/usr/bin/env python
import os, sys
from django.conf import settings
from django.test.utils import get_runner
import django
DIRNAME = os.path.dirname(__file__)
if django.VERSION[1] < 4:
# If the version is NOT django 4 or greater
# then remove the TZ setting.
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='mailqueue.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'cas',),
CAS_SERVER_URL = 'http://signin.cas.com',
)
else:
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='mailqueue.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'cas',),
USE_TZ=True,
CAS_SERVER_URL = 'http://signin.cas.com',)
try:
# Django 1.7 needs this, but other versions dont.
django.setup()
except AttributeError:
pass
try:
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
except ImportError:
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(['cas', ])
if failures:
sys.exit(failures)
| #!/usr/bin/env python
import os, sys
from django.conf import settings
import django
DIRNAME = os.path.dirname(__file__)
if django.VERSION[1] < 4:
# If the version is NOT django 4 or greater
# then remove the TZ setting.
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='mailqueue.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'cas',),
CAS_SERVER_URL = 'http://signin.cas.com',
)
else:
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='mailqueue.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'cas',),
USE_TZ=True,
CAS_SERVER_URL = 'http://signin.cas.com',)
try:
# Django 1.7 needs this, but other versions dont.
django.setup()
except AttributeError:
pass
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(['cas', ])
if failures:
sys.exit(failures) | Python | 0 |
608225fa8a62cb5c0aa42b1a2e371c1e1b731d58 | fix bug in GrandPrixWidget | driver27/admin/common.py | driver27/admin/common.py | from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from tabbed_admin import TabbedModelAdmin
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.encoding import force_text
class GrandPrixWidget(forms.widgets.Select):
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
data_circuit_attr = ''
if option_value:
from driver27.models import GrandPrix
grand_prix = GrandPrix.objects.filter(pk=option_value)
if grand_prix.count() and grand_prix.first().default_circuit:
data_circuit_attr = getattr(grand_prix.first().default_circuit, 'pk', '')
return format_html('<option value="{}"{} data-circuit="{}">{}</option>',
option_value, selected_html,
data_circuit_attr, force_text(option_label))
class Media:
js = ['driver27/js/select_default_circuit.js']
# http://stackoverflow.com/a/34567383
class AlwaysChangedModelForm(forms.ModelForm):
def is_empty_form(self, *args, **kwargs):
empty_form = True
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if data_value:
empty_form = False
break
return empty_form
def has_changed(self, *args, **kwargs):
""" Should returns True if data differs from initial.
By always returning true even unchanged inlines will get validated and saved."""
if self.instance.pk is None and self.initial:
if not self.changed_data:
return True
if self.is_empty_form():
return False
return super(AlwaysChangedModelForm, self).has_changed()
class RelatedCompetitionAdmin(object):
""" Aux class to share print_competitions method between driver and team """
def print_competitions(self, obj):
if hasattr(obj, 'competitions'):
return ', '.join('{competition}'.format(competition=competition)
for competition in obj.competitions.all())
else:
return None
print_competitions.short_description = _('competitions')
class CommonTabbedModelAdmin(TabbedModelAdmin):
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
if request and obj:
request._obj_ = obj
return super(CommonTabbedModelAdmin, self).get_form(request=request, obj=obj, **kwargs)
| from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from tabbed_admin import TabbedModelAdmin
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.encoding import force_text
class GrandPrixWidget(forms.widgets.Select):
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
data_circuit_attr = ''
if option_value:
from driver27.models import GrandPrix
grand_prix = GrandPrix.objects.filter(pk=option_value)
if grand_prix.count() and grand_prix.first().default_circuit:
data_circuit_attr = grand_prix.first().default_circuit.pk
return format_html('<option value="{}"{} data-circuit="{}">{}</option>',
option_value, selected_html,
data_circuit_attr, force_text(option_label))
class Media:
js = ['driver27/js/select_default_circuit.js']
# http://stackoverflow.com/a/34567383
class AlwaysChangedModelForm(forms.ModelForm):
def is_empty_form(self, *args, **kwargs):
empty_form = True
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if data_value:
empty_form = False
break
return empty_form
def has_changed(self, *args, **kwargs):
""" Should returns True if data differs from initial.
By always returning true even unchanged inlines will get validated and saved."""
if self.instance.pk is None and self.initial:
if not self.changed_data:
return True
if self.is_empty_form():
return False
return super(AlwaysChangedModelForm, self).has_changed()
class RelatedCompetitionAdmin(object):
""" Aux class to share print_competitions method between driver and team """
def print_competitions(self, obj):
if hasattr(obj, 'competitions'):
return ', '.join('{competition}'.format(competition=competition)
for competition in obj.competitions.all())
else:
return None
print_competitions.short_description = _('competitions')
class CommonTabbedModelAdmin(TabbedModelAdmin):
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
if request and obj:
request._obj_ = obj
return super(CommonTabbedModelAdmin, self).get_form(request=request, obj=obj, **kwargs)
| Python | 0 |
9e0f62c3eedd2c5376af4178a0ecf529898a041b | Update command doc for open | guild/commands/open_.py | guild/commands/open_.py | # Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
from . import runs_support
@click.command("open")
@runs_support.run_arg
@click.option("-p", "--path", metavar="PATH", help="Path to open under run directory.")
@click.option(
"-s", "--sourcecode", is_flag=True, help="Open run source code directory."
)
@click.option(
"-O",
"--output",
is_flag=True,
help="Open run output. Cannot be used with other options.",
)
@click.option("-c", "--cmd", metavar="CMD", help="Command used to open run.")
@click.option(
"--shell", is_flag=True, help="Open a new shell in run directory or PATH."
)
@click.option(
"--shell-cmd",
metavar="CMD",
help="Open a new shell in run directory or PATH using CMD.",
)
@runs_support.all_filters
@click.pass_context
@click_util.use_args
@click_util.render_doc
def open_(ctx, args):
"""Open a run path or output.
This command opens a path a single run.
{{ runs_support.run_arg }}
If `RUN` isn't specified, the latest run is selected.
### Run Paths
`--path` may be used to open a path within the run directory. By
default the run directory itself is opened. PATH must be relative.
`--sourcecode` may be used to open the run source code
directory. If `--path` is also specified, the path applies to the
source code directory rather than the run directory.
### Output
`--output` may be used to open the output for a run. This option
may not be used with other options.
### Open Command
`--cmd` may be used to specify the command used to open the
path. By default the system-defined program is used.
{{ runs_support.all_filters }}
"""
from . import open_impl
open_impl.main(args, ctx)
| # Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
from . import runs_support
@click.command("open")
@runs_support.run_arg
@click.option("-p", "--path", metavar="PATH", help="Path to open under run directory.")
@click.option(
"-s", "--sourcecode", is_flag=True, help="Open run source code directory."
)
@click.option(
"-O",
"--output",
is_flag=True,
help="Open run output. Cannot be used with other options.",
)
@click.option("-c", "--cmd", metavar="CMD", help="Command used to open run.")
@click.option(
"--shell", is_flag=True, help="Open a new shell in run directory or PATH."
)
@click.option(
"--shell-cmd",
metavar="CMD",
help="Open a new shell in run directory or PATH using CMD.",
)
@runs_support.all_filters
@click.pass_context
@click_util.use_args
@click_util.render_doc
def open_(ctx, args):
"""Open a run path.
This command opens a path a single run.
{{ runs_support.run_arg }}
If `RUN` isn't specified, the latest run is selected.
### Run Paths
`--path` may be used to open a path within the run directory. By
default the run directory itself is opened. PATH must be relative.
`--sourcecode` may be used to open the run source code
directory. If `--path` is also specified, the path applies to the
source code directory rather than the run directory.
### Output
`--output` may be used to open the output for a run. This option
may not be used with other options.
### Open Command
`--cmd` may be used to specify the command used to open the
path. By default the system-defined program is used.
{{ runs_support.all_filters }}
"""
from . import open_impl
open_impl.main(args, ctx)
| Python | 0 |
f12b3f5c5a1409f44fc2acbb54d53fc668028e4a | Set print options for numpy 1.14 to 1.13. | landlab/__init__.py | landlab/__init__.py | #! /usr/bin/env python
"""The Landlab
:Package name: TheLandlab
:Release date: 2013-03-24
:Authors: Greg Tucker, Nicole Gasparini, Erkan Istanbulluoglu, Daniel Hobley,
Sai Nudurupati, Jordan Adams, Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
import os
from numpy import set_printoptions
try:
set_printoptions(legacy='1.13')
except TypeError:
pass
finally:
del set_printoptions
from ._registry import registry
cite_as = registry.format_citations
__all__ = ['registry']
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_parameter_loader import load_params
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__.extend(['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester', 'load_params'])
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| #! /usr/bin/env python
"""The Landlab
:Package name: TheLandlab
:Release date: 2013-03-24
:Authors: Greg Tucker, Nicole Gasparini, Erkan Istanbulluoglu, Daniel Hobley,
Sai Nudurupati, Jordan Adams, Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
import os
from ._registry import registry
cite_as = registry.format_citations
__all__ = ['registry']
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_parameter_loader import load_params
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__.extend(['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester', 'load_params'])
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| Python | 0.000107 |
bcac5b2faf882fda49a3bff7eae147bcb8cbd460 | Fix spelling of setup-readme.md | setup.py | setup.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
from pathlib import Path
this_directory = Path(__file__).parent
setup_readme_text = (this_directory / "setup-readme.md").read_text()
#include_dirs=[np.get_include()],
setup(
name='itk-tubetk',
version='1.1',
author='Stephen R. Aylward',
author_email='stephen.aylward@kitware.com',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK',
description=r'An open-source toolkit, led by Kitware, Inc., for the segmentation, registration, and analysis of tubes and surfaces in images.',
long_description=setup_readme_text,
long_description_content_type='text/markdown',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit Tubes Vessels Nerves Ultrasound MRI CT Medical',
url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK/',
project_urls={
'Dashboard': 'https://open.cdash.org/index.php?project=TubeTK',
'Issue Tracker': 'https://github.com/InsightSoftwareConsortium/ITKTubeTK/issues',
'Testing Data': 'https://data.kitware.com/#collection/5888b7d38d777f4f3f3085a8/folder/58a3abf08d777f0721a65b16',
'ITK': 'https://itk.org',
},
install_requires=[
r'numpy',
r'itk>=5.3rc3',
r'itk-minimalpathextraction>=1.2.0'
]
)
| # -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
from pathlib import Path
this_directory = Path(__file__).parent
setup_readme_text = (this_directory / "setup_readme.md").read_text()
#include_dirs=[np.get_include()],
setup(
name='itk-tubetk',
version='1.1',
author='Stephen R. Aylward',
author_email='stephen.aylward@kitware.com',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK',
description=r'An open-source toolkit, led by Kitware, Inc., for the segmentation, registration, and analysis of tubes and surfaces in images.',
long_description=setup_readme_text,
long_description_content_type='text/markdown',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit Tubes Vessels Nerves Ultrasound MRI CT Medical',
url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK/',
project_urls={
'Dashboard': 'https://open.cdash.org/index.php?project=TubeTK',
'Issue Tracker': 'https://github.com/InsightSoftwareConsortium/ITKTubeTK/issues',
'Testing Data': 'https://data.kitware.com/#collection/5888b7d38d777f4f3f3085a8/folder/58a3abf08d777f0721a65b16',
'ITK': 'https://itk.org',
},
install_requires=[
r'numpy',
r'itk>=5.3rc3',
r'itk-minimalpathextraction>=1.2.0'
]
)
| Python | 0.000008 |
caf45bc9d92bb496a3fb32b494db623b5b405208 | bump version | picker/__init__.py | picker/__init__.py | VERSION = (0, 5, 0)
default_app_config = 'picker.apps.PickerConfig'
def get_version():
return '.'.join(map(str, VERSION))
| VERSION = (0, 4, 0)
default_app_config = 'picker.apps.PickerConfig'
def get_version():
return '.'.join(map(str, VERSION))
| Python | 0 |
2b939c703951c0a7042fa336d9c685c437fb0586 | Bump to version 1.2 | setup.py | setup.py | """Setup script for templer.django-project-app"""
from setuptools import setup
from setuptools import find_packages
version = '1.2'
setup(
name='templer.django-project-app',
version=version,
description='Templer extension for creating '
'Django applications within projects.',
long_description=open('README.rst').read(),
classifiers=[
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Code Generators',
],
keywords='templer, django, application',
author='Fantomas42',
author_email='fantomas42@gmail.com',
url='https://github.com/Fantomas42/templer.django-project-app',
license='BSD',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['templer'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'templer.core',
],
entry_points="""
[paste.paster_create_template]
django_app = templer.django_project_app:DjangoApp
django_project_app = templer.django_project_app:DjangoProjectApp
[templer.templer_structure]
management_command = templer.django_project_app:ManagementCommandStructure
""",
)
| """Setup script for templer.django-project-app"""
from setuptools import setup
from setuptools import find_packages
version = '1.1'
setup(
name='templer.django-project-app',
version=version,
description='Templer extension for creating '
'Django applications within projects.',
long_description=open('README.rst').read(),
classifiers=[
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Code Generators',
],
keywords='templer, django, application',
author='Fantomas42',
author_email='fantomas42@gmail.com',
url='https://github.com/Fantomas42/templer.django-project-app',
license='BSD',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['templer'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'templer.core',
],
entry_points="""
[paste.paster_create_template]
django_app = templer.django_project_app:DjangoApp
django_project_app = templer.django_project_app:DjangoProjectApp
[templer.templer_structure]
management_command = templer.django_project_app:ManagementCommandStructure
""",
)
| Python | 0 |
9669a99d1a76f346b2cfb9b4197636ac3142f9d2 | Update users table in a batched manner | synapse/storage/schema/delta/30/as_users.py | synapse/storage/schema/delta/30/as_users.py | # Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage.appservice import ApplicationServiceStore
logger = logging.getLogger(__name__)
def run_upgrade(cur, database_engine, config, *args, **kwargs):
# NULL indicates user was not registered by an appservice.
try:
cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT")
except:
# Maybe we already added the column? Hope so...
pass
cur.execute("SELECT name FROM users")
rows = cur.fetchall()
config_files = []
try:
config_files = config.app_service_config_files
except AttributeError:
logger.warning("Could not get app_service_config_files from config")
pass
appservices = ApplicationServiceStore.load_appservices(
config.server_name, config_files
)
owned = {}
for row in rows:
user_id = row[0]
for appservice in appservices:
if appservice.is_exclusive_user(user_id):
if user_id in owned.keys():
logger.error(
"user_id %s was owned by more than one application"
" service (IDs %s and %s); assigning arbitrarily to %s" %
(user_id, owned[user_id], appservice.id, owned[user_id])
)
owned.setdefault(appservice.id, []).append(user_id)
for as_id, user_ids in owned.items():
n = 100
user_chunks = (user_ids[i:i + 100] for i in xrange(0, len(user_ids), n))
for chunk in user_chunks:
cur.execute(
database_engine.convert_param_style(
"UPDATE users SET appservice_id = ? WHERE name IN (%s)" % (
",".join("?" for _ in chunk),
)
),
[as_id] + chunk
)
| # Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage.appservice import ApplicationServiceStore
logger = logging.getLogger(__name__)
def run_upgrade(cur, database_engine, config, *args, **kwargs):
# NULL indicates user was not registered by an appservice.
try:
cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT")
except:
# Maybe we already added the column? Hope so...
pass
cur.execute("SELECT name FROM users")
rows = cur.fetchall()
config_files = []
try:
config_files = config.app_service_config_files
except AttributeError:
logger.warning("Could not get app_service_config_files from config")
pass
appservices = ApplicationServiceStore.load_appservices(
config.server_name, config_files
)
owned = {}
for row in rows:
user_id = row[0]
for appservice in appservices:
if appservice.is_exclusive_user(user_id):
if user_id in owned.keys():
logger.error(
"user_id %s was owned by more than one application"
" service (IDs %s and %s); assigning arbitrarily to %s" %
(user_id, owned[user_id], appservice.id, owned[user_id])
)
owned[user_id] = appservice.id
for user_id, as_id in owned.items():
cur.execute(
database_engine.convert_param_style(
"UPDATE users SET appservice_id = ? WHERE name = ?"
),
(as_id, user_id)
)
| Python | 0 |
da5a05c27f1c19c69ce23f5cd6cd0f09edb9d7f7 | Refactor common serializer selection code. | paranuara_api/views.py | paranuara_api/views.py | from rest_framework import viewsets
from paranuara_api.models import Company, Person
from paranuara_api.serializers import (
CompanySerializer, CompanyListSerializer, PersonListSerializer,
PersonSerializer
)
class MultiSerializerMixin(object):
def get_serializer_class(self):
return self.serializers[self.action]
class CompanyViewSet(MultiSerializerMixin, viewsets.ReadOnlyModelViewSet):
queryset = Company.objects.all()
lookup_field = 'index'
serializers = {
'list': CompanyListSerializer,
'retrieve': CompanySerializer,
}
class PersonViewSet(MultiSerializerMixin, viewsets.ReadOnlyModelViewSet):
queryset = Person.objects.all()
lookup_field = 'index'
serializers = {
'list': PersonListSerializer,
'retrieve': PersonSerializer,
}
| from rest_framework import viewsets
from paranuara_api.models import Company, Person
from paranuara_api.serializers import (
CompanySerializer, CompanyListSerializer, PersonListSerializer,
PersonSerializer
)
class CompanyViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Company.objects.all()
lookup_field = 'index'
serializers = {
'list': CompanyListSerializer,
'retrieve': CompanySerializer,
}
def get_serializer_class(self):
return self.serializers[self.action]
class PersonViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Person.objects.all()
lookup_field = 'index'
serializers = {
'list': PersonListSerializer,
'retrieve': PersonSerializer,
}
def get_serializer_class(self):
return self.serializers[self.action]
| Python | 0 |
a2a849f3d425e9c544a66d2b04ab80555be16add | Fix path error | drivnal/handlers/path.py | drivnal/handlers/path.py | from drivnal.constants import *
from drivnal.backup import Client
import drivnal.utils as utils
from drivnal import server
import os
import flask
@server.app.route('/path', methods=['GET'])
@server.app.route('/path/<path:path>', methods=['GET'])
def path_get(path=None):
path = '/' + (path or '')
paths = []
if path != '/':
paths.append({
'name': '..',
'path': os.path.abspath(os.path.join(path, os.pardir)),
})
try:
path_list = os.listdir(path)
except OSError, error:
return utils.jsonify({
'error': PATH_NOT_FOUND,
'error_msg': error.strerror,
}), 404
for name in sorted(path_list):
full_path = os.path.join(path, name)
if not os.path.isdir(full_path):
continue
paths.append({
'name': name,
'path': full_path,
})
return utils.jsonify(paths)
| from drivnal.constants import *
from drivnal.backup import Client
import drivnal.utils as utils
from drivnal import server
import os
import flask
@server.app.route('/path', methods=['GET'])
@server.app.route('/path/<path:path>', methods=['GET'])
def path_get(path=None):
path = '/' + (path or '')
paths = []
if path != '/':
paths.append({
'name': '..',
'path': os.path.abspath(os.path.join(path, os.pardir)),
})
try:
path_list = os.listdir(path)
except OSError:
return utils.jsonify({
'error': PATH_NOT_FOUND,
'error_msg': error.strerror,
}), 404
for name in sorted(path_list):
full_path = os.path.join(path, name)
if not os.path.isdir(full_path):
continue
paths.append({
'name': name,
'path': full_path,
})
return utils.jsonify(paths)
| Python | 0.000014 |
9728d151967b6796ef2a34d8a9867fd109fe48f3 | remove psutil from setup.py | setup.py | setup.py | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: JavaScript
Topic :: Database
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Unix
"""
import sys
try:
from setuptools import setup
except ImportError:
from ez_setup import setup
use_setup_tools()
from setuptools import setup
extra_opts = {"test_suite": "tests"}
if sys.version_info[:2] == (2, 6):
# Need unittest2 to run unittests in Python 2.6
extra_opts["tests_require"] = ["unittest2"]
extra_opts["test_suite"] = "unittest2.collector"
setup(name='mongo-connector',
version="1.1.1+",
author="MongoDB, Inc.",
author_email='mongodb-user@googlegroups.com',
description='Mongo Connector',
keywords='mongo-connector',
url='https://github.com/10gen-labs/mongo-connector',
license="http://www.apache.org/licenses/LICENSE-2.0.html",
platforms=["any"],
classifiers=filter(None, classifiers.split("\n")),
install_requires=['pymongo', 'pysolr >= 3.1.0', 'elasticsearch'],
packages=["mongo_connector", "mongo_connector.doc_managers"],
package_data={
'mongo_connector.doc_managers': ['schema.xml']
},
entry_points={
'console_scripts': [
'mongo-connector = mongo_connector.connector:main',
],
},
**extra_opts
)
| # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: JavaScript
Topic :: Database
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Unix
"""
import sys
try:
from setuptools import setup
except ImportError:
from ez_setup import setup
use_setup_tools()
from setuptools import setup
extra_opts = {"test_suite": "tests",
"tests_require": ["psutil>=2.0"]}
if sys.version_info[:2] == (2, 6):
# Need unittest2 to run unittests in Python 2.6
extra_opts["tests_require"] += ["unittest2"]
extra_opts["test_suite"] = "unittest2.collector"
setup(name='mongo-connector',
version="1.1.1+",
author="MongoDB, Inc.",
author_email='mongodb-user@googlegroups.com',
description='Mongo Connector',
keywords='mongo-connector',
url='https://github.com/10gen-labs/mongo-connector',
license="http://www.apache.org/licenses/LICENSE-2.0.html",
platforms=["any"],
classifiers=filter(None, classifiers.split("\n")),
install_requires=['pymongo', 'pysolr >= 3.1.0', 'elasticsearch'],
packages=["mongo_connector", "mongo_connector.doc_managers"],
package_data={
'mongo_connector.doc_managers': ['schema.xml']
},
entry_points={
'console_scripts': [
'mongo-connector = mongo_connector.connector:main',
],
},
**extra_opts
)
| Python | 0.000001 |
90cec05ed692e9be580d0df6a7738684fe76a6a1 | Add print method | publisher.py | publisher.py | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(description='Upload Android app to Google Play.')
argparser.add_argument('service_account_email',
help='EXAMPLE@developer.gserviceaccount.com')
argparser.add_argument('key_file',
help='The path to the Key file.')
argparser.add_argument('package_name',
help='The package name. Example: com.android.sample')
argparser.add_argument('apk_file',
help='The path to the APK file to upload.')
argparser.add_argument('track',
nargs='?',
default='alpha',
help='Can be \'alpha\', \'beta\', \'production\' or \'rollout\'')
def main(argv):
flags = argparser.parse_args()
# Process flags and read their values.
service_account_email = flags.service_account_email
key_file = flags.key_file
package_name = flags.package_name
apk_file = flags.apk_file
track = flags.track
print 'service_account_email: "%s"' % service_account_email
print 'key_file: "%s"' % key_file
print 'package_name: "%s"' % package_name
print 'apk_file: "%s"' % apk_file
print 'track: "%s"' % track
f = file(key_file, 'rb')
key = f.read()
f.close()
credentials = client.SignedJwtAssertionCredentials(
service_account_email,
key,
scope='https://www.googleapis.com/auth/androidpublisher')
http = httplib2.Http()
http = credentials.authorize(http)
service = build('androidpublisher', 'v2', http=http)
try:
edit_request = service.edits().insert(body={}, packageName=package_name)
result = edit_request.execute()
edit_id = result['id']
apk_response = service.edits().apks().upload(
editId=edit_id,
packageName=package_name,
media_body=apk_file).execute()
print 'Version code %d has been uploaded' % apk_response['versionCode']
track_response = service.edits().tracks().update(
editId=edit_id,
track=track,
packageName=package_name,
body={u'versionCodes': [apk_response['versionCode']]}).execute()
print 'Track %s is set for version code(s) %s' % (
track_response['track'], str(track_response['versionCodes']))
commit_request = service.edits().commit(
editId=edit_id, packageName=package_name).execute()
print 'Edit "%s" has been committed' % (commit_request['id'])
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(description='Upload Android app to Google Play.')
argparser.add_argument('service_account_email',
help='EXAMPLE@developer.gserviceaccount.com')
argparser.add_argument('key_file',
help='The path to the Key file.')
argparser.add_argument('package_name',
help='The package name. Example: com.android.sample')
argparser.add_argument('apk_file',
help='The path to the APK file to upload.')
argparser.add_argument('track',
nargs='?',
default='alpha',
help='Can be \'alpha\', \'beta\', \'production\' or \'rollout\'')
def main(argv):
flags = argparser.parse_args()
# Process flags and read their values.
service_account_email = flags.service_account_email
key_file = flags.key_file
package_name = flags.package_name
apk_file = flags.apk_file
track = flags.track
f = file(key_file, 'rb')
key = f.read()
f.close()
credentials = client.SignedJwtAssertionCredentials(
service_account_email,
key,
scope='https://www.googleapis.com/auth/androidpublisher')
http = httplib2.Http()
http = credentials.authorize(http)
service = build('androidpublisher', 'v2', http=http)
try:
edit_request = service.edits().insert(body={}, packageName=package_name)
result = edit_request.execute()
edit_id = result['id']
apk_response = service.edits().apks().upload(
editId=edit_id,
packageName=package_name,
media_body=apk_file).execute()
print 'Version code %d has been uploaded' % apk_response['versionCode']
track_response = service.edits().tracks().update(
editId=edit_id,
track=track,
packageName=package_name,
body={u'versionCodes': [apk_response['versionCode']]}).execute()
print 'Track %s is set for version code(s) %s' % (
track_response['track'], str(track_response['versionCodes']))
commit_request = service.edits().commit(
editId=edit_id, packageName=package_name).execute()
print 'Edit "%s" has been committed' % (commit_request['id'])
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python | 0.000068 |
f8ee0fc34d060016f0f601e1d84000b9c612efc6 | exclude "abstract" methods from coverage | muninn/storage/base.py | muninn/storage/base.py | import os.path
import muninn.util as util
class StorageBackend(object):
def __init__(self):
self.supports_symlinks = False
self.global_prefix = ''
def get_tmp_root(self, product):
if self._tmp_root:
tmp_root = os.path.join(self._tmp_root, product.core.archive_path)
util.make_path(tmp_root)
return tmp_root
def run_for_product(self, product, fn, use_enclosing_directory):
tmp_root = self.get_tmp_root(product)
product_path = self.product_path(product)
with util.TemporaryDirectory(dir=tmp_root, prefix=".run_for_product-",
suffix="-%s" % product.core.uuid.hex) as tmp_path:
self.get(product, product_path, tmp_path, use_enclosing_directory)
paths = [os.path.join(tmp_path, basename) for basename in os.listdir(tmp_path)]
return fn(paths)
def prepare(self): # pragma: no cover
# Prepare storage for use.
raise NotImplementedError()
def exists(self): # pragma: no cover
# Check that storage exists.
raise NotImplementedError()
def initialize(self, configuration): # pragma: no cover
# Initialize storage.
raise NotImplementedError()
def destroy(self): # pragma: no cover
# Destroy storage
raise NotImplementedError()
# TODO refactor away?
def product_path(self, product): # pragma: no cover
# Product path within storage
raise NotImplementedError()
# TODO lower-granularity put/get/delete?
def put(self, paths, properties, use_enclosing_directory, use_symlinks=None,
retrieve_files=None, run_for_product=None): # pragma: no cover
# Place product file(s) into storage
raise NotImplementedError()
def get(self, product, product_path, target_path, use_enclosing_directory, use_symlinks=None): # pragma: no cover
# Retrieve product file(s) from storage
raise NotImplementedError()
def size(self, product_path): # pragma: no cover
# Return product storage size
raise NotImplementedError()
def delete(self, product_path, properties): # pragma: no cover
# Delete product file(s) from storage
raise NotImplementedError()
def move(self, product, archive_path, paths=None): # pragma: no cover
# Move product
raise NotImplementedError()
def current_archive_path(self, paths, properties): # pragma: no cover
raise NotImplementedError()
| import os.path
import muninn.util as util
class StorageBackend(object):
def __init__(self):
self.supports_symlinks = False
self.global_prefix = ''
def get_tmp_root(self, product):
if self._tmp_root:
tmp_root = os.path.join(self._tmp_root, product.core.archive_path)
util.make_path(tmp_root)
return tmp_root
def run_for_product(self, product, fn, use_enclosing_directory):
tmp_root = self.get_tmp_root(product)
product_path = self.product_path(product)
with util.TemporaryDirectory(dir=tmp_root, prefix=".run_for_product-",
suffix="-%s" % product.core.uuid.hex) as tmp_path:
self.get(product, product_path, tmp_path, use_enclosing_directory)
paths = [os.path.join(tmp_path, basename) for basename in os.listdir(tmp_path)]
return fn(paths)
def prepare(self):
# Prepare storage for use.
raise NotImplementedError()
def exists(self):
# Check that storage exists.
raise NotImplementedError()
def initialize(self, configuration):
# Initialize storage.
raise NotImplementedError()
def destroy(self):
# Destroy storage
raise NotImplementedError()
def product_path(self, product): # TODO refactor away?
# Product path within storage
raise NotImplementedError()
# TODO lower-granularity put/get/delete?
def put(self, paths, properties, use_enclosing_directory, use_symlinks=None,
retrieve_files=None, run_for_product=None):
# Place product file(s) into storage
raise NotImplementedError()
def get(self, product, product_path, target_path, use_enclosing_directory, use_symlinks=None):
# Retrieve product file(s) from storage
raise NotImplementedError()
def size(self, product_path):
# Return product storage size
raise NotImplementedError()
def delete(self, product_path, properties):
# Delete product file(s) from storage
raise NotImplementedError()
def move(self, product, archive_path, paths=None):
# Move product
raise NotImplementedError()
def current_archive_path(self, paths, properties):
raise NotImplementedError()
| Python | 0 |
5261aa35eb5ab697310efc5bc8b7d11e8655127b | Update project info | setup.py | setup.py | """ setup script for "portal" package
for development:
python setup.py develop
to install:
python setup.py install
"""
from setuptools import setup
project = "portal"
# maintain long_description as a single long line.
# workaround for a bug in pkg_info._get_metadata("PKG-INFO")
long_description =\
"""Alpha version of the TrueNTH Shared Services RESTful API, to be used by TrueNTH intervention applications. This API attempts to conform with the HL7 FHIR specification as much as is reasonable.
"""
setup(
name=project,
url="https://github.com/uwcirg/true_nth_usa_portal",
description="TrueNTH Shared Services",
long_description=long_description,
author="CIRG, University of Washington",
author_email="truenth-dev@uw.edu",
classifiers=(
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Medical Science Apps",
),
license = "BSD",
platforms = "any",
include_package_data=True,
use_scm_version=True,
zip_safe=False,
dependency_links=(
"git+https://github.com/pbugni/Flask-User.git#egg=Flask-User-0.6.8.1",
),
packages=["portal"],
setup_requires=("setuptools_scm"),
install_requires=(
"Authomatic>=0.1.0",
"celery",
"coverage",
"Flask>=0.10.1",
"Flask-Babel",
"Flask-Celery-Helper",
"Flask-Migrate",
"Flask-OAuthlib",
"Flask-SQLAlchemy",
"Flask-Script",
"Flask-Swagger",
"Flask-Testing",
"Flask-User>=0.6.8.1",
"Flask-WebTest",
"jsonschema",
"nose",
"oauthlib",
"page_objects",
"pkginfo",
"psycopg2",
"python-dateutil",
"recommonmark",
"redis",
"selenium",
"sphinx",
"sphinx_rtd_theme",
"swagger_spec_validator",
"validators",
"xvfbwrapper",
),
test_suite="tests",
)
| """ setup script for "portal" package
for development:
python setup.py develop
to install:
python setup.py install
"""
from setuptools import setup
project = "portal"
# maintain long_description as a single long line.
# workaround for a bug in pkg_info._get_metadata("PKG-INFO")
long_description =\
"""Alpha version of the TrueNTH Central Services RESTful API, to be used by TrueNTH intervention applications. This API attempts to conform with the HL7 FHIR specification as much as is reasonable.
"""
setup(
name=project,
url="https://github.com/uwcirg/true_nth_usa_portal_demo",
description="TrueNTH Central Services",
long_description=long_description,
author="University of Washington",
classifiers=(
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Medical Science Apps",
),
include_package_data=True,
use_scm_version=True,
zip_safe=False,
dependency_links=(
"git+https://github.com/pbugni/Flask-User.git#egg=Flask-User-0.6.8.1",
),
packages=["portal"],
setup_requires=("setuptools_scm"),
install_requires=(
"Authomatic>=0.1.0",
"celery",
"coverage",
"Flask>=0.10.1",
"Flask-Babel",
"Flask-Celery-Helper",
"Flask-Migrate",
"Flask-OAuthlib",
"Flask-SQLAlchemy",
"Flask-Script",
"Flask-Swagger",
"Flask-Testing",
"Flask-User>=0.6.8.1",
"Flask-WebTest",
"jsonschema",
"nose",
"oauthlib",
"page_objects",
"pkginfo",
"psycopg2",
"python-dateutil",
"recommonmark",
"redis",
"selenium",
"sphinx",
"sphinx_rtd_theme",
"swagger_spec_validator",
"validators",
"xvfbwrapper",
),
test_suite="tests",
)
| Python | 0 |
6ce8537236f6bbc92789dc57a07befad391e2bc8 | fix install_requires | setup.py | setup.py | from setuptools import setup
version = '2.4.2'
setup(
name='cbagent',
version=version,
description='Stats collectors package for Couchbase Server monitoring',
author='Couchbase',
license='Apache Software License',
packages=[
'cbagent',
'cbagent.collectors',
'cbagent.collectors.libstats'
],
entry_points={
'console_scripts': [
'cbagent = cbagent.__main__:main',
]
},
include_package_data=True,
install_requires=[
'couchbase==1.2.1',
'decorator',
'fabric==1.8.0',
'logger',
'requests==2.1.0',
'seriesly',
'spring'
],
dependency_links=[
'git+https://github.com/couchbaselabs/spring.git#egg=spring',
]
)
| from setuptools import setup
version = '2.4.1'
setup(
name='cbagent',
version=version,
description='Stats collectors package for Couchbase Server monitoring',
author='Couchbase',
license='Apache Software License',
packages=[
'cbagent',
'cbagent.collectors',
'cbagent.collectors.libstats'
],
entry_points={
'console_scripts': [
'cbagent = cbagent.__main__:main',
]
},
include_package_data=True,
install_requires=[
'couchbase==1.2.0',
'decorator',
'fabric==1.8.0',
'logger',
'requests==2.1.0',
'seriesly',
'spring'
],
dependency_links=[
'git+https://github.com/couchbaselabs/spring.git#egg=spring',
]
)
| Python | 0.000001 |
cfc5b528a25b0c77086208755d7075bf1f17efbc | Add patient id in SimplePatientSerializer | patient/serializers.py | patient/serializers.py | from django.contrib.auth.models import User
from .models import Patient
from rest_framework import serializers
import datetime
from next_of_kin.models import NextOfKin
from next_of_kin.serializers import NextOfKinSerializer
class SimpleUserSerializer(serializers.ModelSerializer):
full_name = serializers.SerializerMethodField('get_full_name')
class Meta:
model = User
fields = ['full_name',]
def get_full_name(self, obj):
return obj.get_full_name()
class SimplePatientSerializer(serializers.ModelSerializer):
user = SimpleUserSerializer()
class Meta:
model = Patient
fields = ('id', 'user')
class PatientListSerializer(serializers.ModelSerializer):
user = SimpleUserSerializer()
birth_date = serializers.SerializerMethodField('get_birth_date')
age = serializers.SerializerMethodField('get_age')
def get_birth_date(self, obj):
return obj.national_identification_number[0:2] + "." \
+ obj.national_identification_number[2:4] + "." \
+ obj.national_identification_number[4:6]
def get_age(self, obj):
today = datetime.datetime.today()
ddmm = obj.national_identification_number[0:4]
yyyy = "20" + obj.national_identification_number[4:6]
if int(yyyy) >= today.year:
yyyy = str(int(yyyy) - 100)
birth_date = datetime.datetime.strptime(ddmm + yyyy, "%d%m%Y")
diff = today - birth_date
num_years = int(diff.days / 365.2425) # rough estimate, can be wrong in some edge cases
return num_years
class Meta:
model = Patient
fields = (
'id',
'user',
'birth_date',
'age',
'national_identification_number',
'telephone'
)
class PatientDetailSerializer(PatientListSerializer):
user = SimpleUserSerializer()
birth_date = serializers.SerializerMethodField('get_birth_date')
age = serializers.SerializerMethodField('get_age')
next_of_kin = serializers.SerializerMethodField('get_next_of_kin')
def get_next_of_kin(self, obj):
next_of_kin = NextOfKin.objects.filter(patient__id=obj.id)
serializer = NextOfKinSerializer(next_of_kin, many=True, context=self.context)
return serializer.data
class Meta:
model = Patient
fields = (
'id',
'user',
'birth_date',
'age',
'national_identification_number',
'telephone',
'address',
'next_of_kin',
'pulse_max',
'pulse_min',
'o2_max',
'o2_min',
'temperature_max',
'temperature_min',
'activity_access',
'pulse_access',
'o2_access',
'temperature_access'
)
| from django.contrib.auth.models import User
from .models import Patient
from rest_framework import serializers
import datetime
from next_of_kin.models import NextOfKin
from next_of_kin.serializers import NextOfKinSerializer
class SimpleUserSerializer(serializers.ModelSerializer):
full_name = serializers.SerializerMethodField('get_full_name')
class Meta:
model = User
fields = ['full_name',]
def get_full_name(self, obj):
return obj.get_full_name()
class SimplePatientSerializer(serializers.ModelSerializer):
user = SimpleUserSerializer()
class Meta:
model = Patient
fields = ('user',)
class PatientListSerializer(serializers.ModelSerializer):
user = SimpleUserSerializer()
birth_date = serializers.SerializerMethodField('get_birth_date')
age = serializers.SerializerMethodField('get_age')
def get_birth_date(self, obj):
return obj.national_identification_number[0:2] + "." \
+ obj.national_identification_number[2:4] + "." \
+ obj.national_identification_number[4:6]
def get_age(self, obj):
today = datetime.datetime.today()
ddmm = obj.national_identification_number[0:4]
yyyy = "20" + obj.national_identification_number[4:6]
if int(yyyy) >= today.year:
yyyy = str(int(yyyy) - 100)
birth_date = datetime.datetime.strptime(ddmm + yyyy, "%d%m%Y")
diff = today - birth_date
num_years = int(diff.days / 365.2425) # rough estimate, can be wrong in some edge cases
return num_years
class Meta:
model = Patient
fields = (
'id',
'user',
'birth_date',
'age',
'national_identification_number',
'telephone'
)
class PatientDetailSerializer(PatientListSerializer):
user = SimpleUserSerializer()
birth_date = serializers.SerializerMethodField('get_birth_date')
age = serializers.SerializerMethodField('get_age')
next_of_kin = serializers.SerializerMethodField('get_next_of_kin')
def get_next_of_kin(self, obj):
next_of_kin = NextOfKin.objects.filter(patient__id=obj.id)
serializer = NextOfKinSerializer(next_of_kin, many=True, context=self.context)
return serializer.data
class Meta:
model = Patient
fields = (
'id',
'user',
'birth_date',
'age',
'national_identification_number',
'telephone',
'address',
'next_of_kin',
'pulse_max',
'pulse_min',
'o2_max',
'o2_min',
'temperature_max',
'temperature_min',
'activity_access',
'pulse_access',
'o2_access',
'temperature_access'
)
| Python | 0.000001 |
b1933e4d998d703a14bbb1769e04a078fac215bc | Update HexStats.py | HexChat/HexStats.py | HexChat/HexStats.py | import hexchat
#Based on Weechat's Weestats: https://weechat.org/scripts/source/weestats.py.html/
#By Filip H.F. 'FiXato' Slagter <fixato [at] gmail [dot] com>
__module_name__ = 'HexStats'
__module_version__ = '0.0.1'
__module_description__ = 'Displays HexChat-wide User Statistics'
__module_author__ = 'Vlek'
def stats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.prnt( getstats() )
return hexchat.EAT_ALL
def printstats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.command('say {}'.format( getstats() ))
return hexchat.EAT_ALL
def getstats():
chans = hexchat.get_list('channels')
types = [i.type for i in chans]
channels = types.count(2)
ops = []
for channel in chans:
if channel.type == 2:
context = channel.context
ops += [user.prefix for user in context.get_list('users') if user.nick == context.get_info('nick')]
ops = ops.count('@')
servers = types.count(1)
queries = types.count(3)
return 'Stats: {} channels ({} OPs), {} servers, {} queries'.format( channels, ops,
servers, queries )
hexchat.hook_command("stats", stats, help="/stats displays HexChat user statistics")
hexchat.hook_command("printstats", printstats, help="/printstats Says HexChat user statistics in current context")
| import hexchat
#Based on Weechat's Weestats: https://weechat.org/scripts/source/weestats.py.html/
#By Filip H.F. 'FiXato' Slagter <fixato [at] gmail [dot] com>
__module_name__ = 'HexStats'
__module_version__ = '0.0.1'
__module_description__ = 'Displays HexChat Wide User Statistics'
__module_author__ = 'Vlek'
def stats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.prnt( getstats() )
return hexchat.EAT_ALL
def printstats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.command('say {}'.format( getstats() ))
return hexchat.EAT_ALL
def getstats():
chans = hexchat.get_list('channels')
types = [i.type for i in chans]
channels = types.count(2)
contextlist = [i.context for i in chans if i.type == 2]
ops = []
for context in contextlist:
ops += [user.prefix for user in context.get_list('users') if user.nick == context.get_info('nick')]
print('Channel: {} - {}'.format(context.get_info('channel'), context.get_info('nick')))
#ops = ops.count('@')
servers = types.count(1)
queries = types.count(3)
return 'Stats: {} channels ({} OPs), {} servers, {} queries'.format( channels, ops,
servers, queries )
hexchat.hook_command("stats", stats, help="/stats displays HexChat user statistics")
hexchat.hook_command("printstats", printstats, help="/printstats Says HexChat user statistics in current context")
| Python | 0.000001 |
1a6b79629c4e79e3917287a693047fbe5e0129ad | Check user if admin before lockdown | plugins/lock_the_chat.py | plugins/lock_the_chat.py | """
Echo plugin example
"""
import octeon
global locked
locked = []
PLUGINVERSION = 2
# Always name this variable as `plugin`
# If you dont, module loader will fail to load the plugin!
plugin = octeon.Plugin()
@plugin.message(regex=".*") # You pass regex pattern
def lock_check(bot, update):
if update.message.chat_id in locked:
for admin in update.message.chat.get_administrators():
if admin.user.username == update.message.from_user.username:
return
update.message.delete()
return
@plugin.command(command="/lock",
description="Locks chat",
inline_supported=True,
hidden=False)
def lock(bot, update, user, args):
if update.message.chat_id in locked:
return octeon.message("Chat is already locked")
if update.message.chat.type != "PRIVATE":
for admin in update.message.chat.get_administrators():
if admin.user.username == update.message.from_user.username:
for admin in update.message.chat.get_administrators():
if admin.user.username == bot.get_me().username:
locked.append(update.message.chat_id)
return octeon.message("Chat locked")
return octeon.message("I am not admin of this chat...")
return octeon.message(text="Hey! You are not admin of this chat!", photo="https://pbs.twimg.com/media/C_I2Xv1WAAAkpiv.jpg")
else:
return octeon.message("Why would you lock a private converstaion?")
@plugin.command(command="/unlock",
description="Unlocks chat",
inline_supported=True,
hidden=False)
def unlock(bot, update, user, args):
if update.message.chat_id in locked:
for admin in update.message.chat.get_administrators():
if admin.user.username == update.message.from_user.username:
locked.remove(update.message.chat_id)
return octeon.message("Chat unlocked")
else:
return octeon.message("This chat wasnt locked at all") | """
Echo plugin example
"""
import octeon
global locked
locked = []
PLUGINVERSION = 2
# Always name this variable as `plugin`
# If you dont, module loader will fail to load the plugin!
plugin = octeon.Plugin()
@plugin.message(regex=".*") # You pass regex pattern
def lock_check(bot, update):
if update.message.chat_id in locked:
for admin in update.message.chat.get_administrators():
if admin.user.username == update.message.from_user.username:
return
update.message.delete()
return
@plugin.command(command="/lock",
description="Locks chat",
inline_supported=True,
hidden=False)
def lock(bot, update, user, args):
if update.message.chat.type != "PRIVATE" and not update.message.chat_id in locked:
for admin in update.message.chat.get_administrators():
if admin.user.username == bot.get_me().username:
locked.append(update.message.chat_id)
return octeon.message("Chat locked")
return octeon.message("I am not admin of this chat...")
else:
return octeon.message("Why would you lock a private converstaion?")
@plugin.command(command="/unlock",
description="Unlocks chat",
inline_supported=True,
hidden=False)
def unlock(bot, update, user, args):
if update.message.chat_id in locked:
locked.remove(update.message.chat_id)
return octeon.message("Chat unlocked")
else:
return octeon.message("This chat wasnt locked at all") | Python | 0.000001 |
b3204ccf3bd3ac26cabb4e6aa75bdb9dbf3f9e75 | Sentence case the biscuit | elections/uk/migrations/0005_add_favourite_biscuits.py | elections/uk/migrations/0005_add_favourite_biscuits.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
| Python | 0.999999 |
d60f0fa1f942a24ca38ce20f2b5a617eb5181456 | update session backend | hiren/hiren/settings.py | hiren/hiren/settings.py | """
Django settings for hiren project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6ajer4-t(c_k3@tb0@g-w5ztxoq61e866pm0xl2t4im%khu9qo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'disk',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
ROOT_URLCONF = 'hiren.urls'
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "template"),
)
| """
Django settings for hiren project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6ajer4-t(c_k3@tb0@g-w5ztxoq61e866pm0xl2t4im%khu9qo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'disk',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'hiren.urls'
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "template"),
)
| Python | 0.000001 |
8b01df8ffb790a66cb054206def0a425275539c4 | Fix encoding issue and comment improvement (#1807) | endpoints/getting-started/clients/google-jwt-client.py | endpoints/getting-started/clients/google-jwt-client.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of calling a Google Cloud Endpoint API with a JWT signed by
a Google API Service Account."""
import argparse
import time
import google.auth.crypt
import google.auth.jwt
import requests
from six.moves import urllib
def generate_jwt(service_account_file):
"""Generates a signed JSON Web Token using a Google API Service Account."""
# Note: this sample shows how to manually create the JWT for the purposes
# of showing how the authentication works, but you can use
# google.auth.jwt.Credentials to automatically create the JWT.
# http://google-auth.readthedocs.io/en/latest/reference
# /google.auth.jwt.html#google.auth.jwt.Credentials
signer = google.auth.crypt.RSASigner.from_service_account_file(
service_account_file)
now = int(time.time())
expires = now + 3600 # One hour in seconds
payload = {
'iat': now,
'exp': expires,
# aud must match 'audience' in the security configuration in your
# swagger spec. It can be any string.
'aud': 'echo.endpoints.sample.google.com',
# iss must match 'issuer' in the security configuration in your
# swagger spec (e.g. service account email). It can be any string.
'iss': 'jwt-client.endpoints.sample.google.com',
# sub and email are mapped to the user id and email respectively.
# sub should match 'iss'
'sub': 'jwt-client.endpoints.sample.google.com',
'email': 'user@example.com'
}
jwt = google.auth.jwt.encode(signer, payload).decode('UTF-8')
return jwt
def make_request(host, api_key, signed_jwt):
"""Makes a request to the auth info endpoint for Google JWTs."""
url = urllib.parse.urljoin(host, '/auth/info/googlejwt')
params = {
'key': api_key
}
headers = {
'Authorization': 'Bearer {}'.format(signed_jwt)
}
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
return response.text
def main(host, api_key, service_account_file):
signed_jwt = generate_jwt(service_account_file)
response = make_request(host, api_key, signed_jwt)
print(response)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'host', help='Your API host, e.g. https://your-project.appspot.com.')
parser.add_argument(
'api_key', help='Your API key.')
parser.add_argument(
'service_account_file',
help='The path to your service account json file.')
args = parser.parse_args()
main(args.host, args.api_key, args.service_account_file)
| #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of calling a Google Cloud Endpoint API with a JWT signed by
a Google API Service Account."""
import argparse
import time
import google.auth.crypt
import google.auth.jwt
import requests
from six.moves import urllib
def generate_jwt(service_account_file):
"""Generates a signed JSON Web Token using a Google API Service Account."""
# Note: this sample shows how to manually create the JWT for the purposes
# of showing how the authentication works, but you can use
# google.auth.jwt.Credentials to automatically create the JWT.
# http://google-auth.readthedocs.io/en/latest/reference
# /google.auth.jwt.html#google.auth.jwt.Credentials
signer = google.auth.crypt.RSASigner.from_service_account_file(
service_account_file)
now = int(time.time())
expires = now + 3600 # One hour in seconds
payload = {
'iat': now,
'exp': expires,
# aud must match 'audience' in the security configuration in your
# swagger spec. It can be any string.
'aud': 'echo.endpoints.sample.google.com',
# iss must match 'issuer' in the security configuration in your
# swagger spec. It can be any string.
'iss': 'jwt-client.endpoints.sample.google.com',
# sub and email are mapped to the user id and email respectively.
'sub': '12345678',
'email': 'user@example.com'
}
jwt = google.auth.jwt.encode(signer, payload)
return jwt
def make_request(host, api_key, signed_jwt):
"""Makes a request to the auth info endpoint for Google JWTs."""
url = urllib.parse.urljoin(host, '/auth/info/googlejwt')
params = {
'key': api_key
}
headers = {
'Authorization': 'Bearer {}'.format(signed_jwt)
}
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
return response.text
def main(host, api_key, service_account_file):
signed_jwt = generate_jwt(service_account_file)
response = make_request(host, api_key, signed_jwt)
print(response)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'host', help='Your API host, e.g. https://your-project.appspot.com.')
parser.add_argument(
'api_key', help='Your API key.')
parser.add_argument(
'service_account_file',
help='The path to your service account json file.')
args = parser.parse_args()
main(args.host, args.api_key, args.service_account_file)
| Python | 0 |
243c4ba66d35efbe58944ff973d668d9a3b7c6f8 | Update __init__.py | VertexActions/__init__.py | VertexActions/__init__.py | #
| Python | 0.000072 | |
9a9a6643bbc26a3f359df52b0b4bbb4207225017 | Update VariationalAutoencoderRunner.py | autoencoder/VariationalAutoencoderRunner.py | autoencoder/VariationalAutoencoderRunner.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(
n_input=784,
n_hidden=200,
optimizer=tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%d,' % (epoch + 1),
"Cost:", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(
n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%d,' % (epoch + 1),
"Cost:", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
| Python | 0.000001 |
32fa9354c221f91cc6790177371a00468d22cb85 | Fix the scan script | scan/scan.py | scan/scan.py | # Add your Python code here. E.g.
from microbit import *
MAX_ROWS=4
level = 9
def scan(pause=500, reverse=False):
for i in range(0,10):
x = 0
rows = i
cols = i
while x <= i:
for y in range(0,rows+1):
if x <= MAX_ROWS and y <= MAX_ROWS:
coord_x = MAX_ROWS-x if reverse else x
coord_y = MAX_ROWS-y if reverse else y
display.set_pixel(coord_x,coord_y,max(0,level-((rows-y)*2)))
x = x+1
rows = rows-1
sleep(pause)
while True:
scan(150)
scan(150,True)
| # Add your Python code here. E.g.
from microbit import *
MAX_ROWS=4
def scan(level,pause=500, reverse=False):
for i in range(0,10):
x = 0
rows = i
cols = i
while x <= i:
for y in range(0,rows+1):
if x <= MAX_ROWS and y <= MAX_ROWS:
coord_x = MAX_ROWS-x if reverse else x
coord_y = MAX_ROWS-y if reverse else y
display.set_pixel(coord_x,coord_y,max(0,level-((rows-y)*2)))
x = x+1
rows = rows-1
sleep(pause)
while True:
scan(9,150)
scan(150,True)
| Python | 0.001609 |
9360c15f8883543ad5d83aa7dc870c60a1fed5ec | add infos | setup.py | setup.py | #!/usr/bin/env python3
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Philippe Proulx <philippe.proulx@efficios.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import subprocess
from setuptools import setup
# make sure we run Python 3+ here
v = sys.version_info
if v.major < 3:
sys.stderr.write('Sorry, pytsdl needs Python 3\n')
sys.exit(1)
# pyPEG2 needs to be installed manually until their PyPI tarball is
# fixed for setuptools.
try:
import pypeg2
except ImportError:
sys.stderr.write('Please install pyPEG2 manually:\n\n')
sys.stderr.write(' sudo pip3 install pyPEG2\n')
sys.exit(1)
packages = [
'pytsdl',
]
setup(name='pytsdl',
version=0.3,
description='TSDL parser implemented entirely in Python 3',
author='Philippe Proulx',
author_email='eeppeliteloop@gmail.com',
license='MIT',
keywords='tsdl ctf metadata',
url='https://github.com/eepp/pytsdl',
packages=packages)
| #!/usr/bin/env python3
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Philippe Proulx <philippe.proulx@efficios.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import subprocess
from setuptools import setup
# make sure we run Python 3+ here
v = sys.version_info
if v.major < 3:
sys.stderr.write('Sorry, pytsdl needs Python 3\n')
sys.exit(1)
# pyPEG2 needs to be installed manually until their PyPI tarball is
# fixed for setuptools.
try:
import pypeg2
except ImportError:
sys.stderr.write('Please install pyPEG2 manually:\n\n')
sys.stderr.write(' sudo pip3 install pyPEG2\n')
sys.exit(1)
packages = [
'pytsdl',
]
setup(name='pytsdl',
version=0.3,
description='TSDL parser implemented entirely in Python 3',
author='Philippe Proulx',
author_email='eeppeliteloop@gmail.com',
url='https://github.com/eepp/pytsdl',
packages=packages)
| Python | 0 |
dc1773eaf3e66ddf5cbaa564bb55dbb8e51218ff | Fix #752: test case failed | topbeat/tests/system/test_filesystem.py | topbeat/tests/system/test_filesystem.py | from topbeat import TestCase
import numbers
"""
Contains tests for ide statistics.
"""
class Test(TestCase):
def test_filesystems(self):
"""
Checks that system wide stats are found in the output and
have the expected types.
"""
self.render_config_template(
system_stats=False,
process_stats=False,
filesystem_stats=True
)
topbeat = self.start_topbeat()
self.wait_until(lambda: self.output_has(lines=1))
topbeat.kill_and_wait()
output = self.read_output()[0]
for key in [
"fs.device_name",
"fs.mount_point",
]:
assert isinstance(output[key], basestring)
for key in [
"fs.used_p",
]:
assert isinstance(output[key], numbers.Number)
for key in [
"fs.avail",
"fs.files",
"fs.free_files",
"fs.total",
"fs.used",
]:
assert type(output[key]) is int or type(output[key]) is long
| from topbeat import TestCase
"""
Contains tests for ide statistics.
"""
class Test(TestCase):
def test_filesystems(self):
"""
Checks that system wide stats are found in the output and
have the expected types.
"""
self.render_config_template(
system_stats=False,
process_stats=False,
filesystem_stats=True
)
topbeat = self.start_topbeat()
self.wait_until(lambda: self.output_has(lines=1))
topbeat.kill_and_wait()
output = self.read_output()[0]
for key in [
"fs.device_name",
"fs.mount_point",
]:
assert isinstance(output[key], basestring)
for key in [
"fs.used_p",
]:
assert type(output[key]) is float
for key in [
"fs.avail",
"fs.files",
"fs.free_files",
"fs.total",
"fs.used",
]:
assert type(output[key]) is int or type(output[key]) is long
| Python | 0 |
1dacd99bbe1b32586a013d7d6f0874271e097e7c | Revise var to reach | lc0055_jump_game.py | lc0055_jump_game.py | """Leetcode 55. Jump Game
Medium
URL: https://leetcode.com/problems/jump-game/
Given an array of non-negative integers, you are initially positioned at the
first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum
jump length is 0, which makes it impossible to reach the last index.
"""
class SolutionGreedy(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Create max reachable index.
reach = 0
for i in range(len(nums)):
# Index i is not reachable.
if reach < i:
return False
# Update reach by taking max of itself and i+nums[i].
reach = max(reach, i + nums[i])
return True
def main():
# Ans: True
nums = [2,3,1,1,4]
print SolutionGreedy().canJump(nums)
# Ans: False
nums = [3,2,1,0,4]
print SolutionGreedy().canJump(nums)
if __name__ == '__main__':
main()
| """Leetcode 55. Jump Game
Medium
URL: https://leetcode.com/problems/jump-game/
Given an array of non-negative integers, you are initially positioned at the
first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum
jump length is 0, which makes it impossible to reach the last index.
"""
class SolutionGreedy(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Create max reachable index.
reachable = 0
for i in range(len(nums)):
# Index i is not reachable.
if reachable < i:
return False
# Update reachable by taking max of itself and i+nums[i].
reachable = max(reachable, i + nums[i])
return True
def main():
# Ans: True
nums = [2,3,1,1,4]
print SolutionGreedy().canJump(nums)
# Ans: False
nums = [3,2,1,0,4]
print SolutionGreedy().canJump(nums)
if __name__ == '__main__':
main()
| Python | 0 |
e908792a8a47b4afc478a89f479ab836d7acea5e | set 2to3 False | setup.py | setup.py | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
#
# Standard imports
#
import glob
import os
import sys
from setuptools import setup, find_packages
#
# desiutil needs to import some of its own code.
#
sys.path.insert(int(sys.path[0] == ''),os.path.abspath('./py'))
from desiutil.setup import DesiModule, DesiTest, DesiVersion, get_version
#
# Begin setup
#
setup_keywords = dict()
#
# THESE SETTINGS NEED TO BE CHANGED FOR EVERY PRODUCT.
#
setup_keywords['name'] = 'desiutil'
setup_keywords['description'] = 'DESI utilities package'
setup_keywords['author'] = 'DESI Collaboration'
setup_keywords['author_email'] = 'desi-data@desi.lbl.gov'
setup_keywords['license'] = 'BSD'
setup_keywords['url'] = 'https://github.com/desihub/desiutil'
#
# END OF SETTINGS THAT NEED TO BE CHANGED.
#
setup_keywords['version'] = get_version(setup_keywords['name'])
#
# Use README.rst as long_description.
#
setup_keywords['long_description'] = ''
if os.path.exists('README.rst'):
with open('README.rst') as readme:
setup_keywords['long_description'] = readme.read()
#
# Set other keywords for the setup function. These are automated, & should
# be left alone unless you are an expert.
#
# Treat everything in bin/ except *.rst as a script to be installed.
#
if os.path.isdir('bin'):
setup_keywords['scripts'] = [fname for fname in glob.glob(os.path.join('bin', '*'))
if not os.path.basename(fname).endswith('.rst')]
setup_keywords['provides'] = [setup_keywords['name']]
setup_keywords['requires'] = ['Python (>2.7.0)']
# setup_keywords['install_requires'] = ['Python (>2.7.0)']
setup_keywords['zip_safe'] = False
setup_keywords['use_2to3'] = False
setup_keywords['packages'] = find_packages('py')
setup_keywords['package_dir'] = {'':'py'}
setup_keywords['cmdclass'] = {'module_file': DesiModule, 'version': DesiVersion, 'test': DesiTest}
setup_keywords['test_suite']='{name}.test.{name}_test_suite.{name}_test_suite'.format(**setup_keywords)
#
# Autogenerate command-line scripts.
#
# setup_keywords['entry_points'] = {'console_scripts':['desiInstall = desiutil.install.desi_install:main']}
#
# Add internal data directories.
#
setup_keywords['package_data'] = {'desiutil.test': ['t/*']}
#
# Run setup command.
#
setup(**setup_keywords)
| #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
#
# Standard imports
#
import glob
import os
import sys
from setuptools import setup, find_packages
#
# desiutil needs to import some of its own code.
#
sys.path.insert(int(sys.path[0] == ''),os.path.abspath('./py'))
from desiutil.setup import DesiModule, DesiTest, DesiVersion, get_version
#
# Begin setup
#
setup_keywords = dict()
#
# THESE SETTINGS NEED TO BE CHANGED FOR EVERY PRODUCT.
#
setup_keywords['name'] = 'desiutil'
setup_keywords['description'] = 'DESI utilities package'
setup_keywords['author'] = 'DESI Collaboration'
setup_keywords['author_email'] = 'desi-data@desi.lbl.gov'
setup_keywords['license'] = 'BSD'
setup_keywords['url'] = 'https://github.com/desihub/desiutil'
#
# END OF SETTINGS THAT NEED TO BE CHANGED.
#
setup_keywords['version'] = get_version(setup_keywords['name'])
#
# Use README.rst as long_description.
#
setup_keywords['long_description'] = ''
if os.path.exists('README.rst'):
with open('README.rst') as readme:
setup_keywords['long_description'] = readme.read()
#
# Set other keywords for the setup function. These are automated, & should
# be left alone unless you are an expert.
#
# Treat everything in bin/ except *.rst as a script to be installed.
#
if os.path.isdir('bin'):
setup_keywords['scripts'] = [fname for fname in glob.glob(os.path.join('bin', '*'))
if not os.path.basename(fname).endswith('.rst')]
setup_keywords['provides'] = [setup_keywords['name']]
setup_keywords['requires'] = ['Python (>2.7.0)']
# setup_keywords['install_requires'] = ['Python (>2.7.0)']
setup_keywords['zip_safe'] = False
setup_keywords['use_2to3'] = True
setup_keywords['packages'] = find_packages('py')
setup_keywords['package_dir'] = {'':'py'}
setup_keywords['cmdclass'] = {'module_file': DesiModule, 'version': DesiVersion, 'test': DesiTest}
setup_keywords['test_suite']='{name}.test.{name}_test_suite.{name}_test_suite'.format(**setup_keywords)
#
# Autogenerate command-line scripts.
#
# setup_keywords['entry_points'] = {'console_scripts':['desiInstall = desiutil.install.desi_install:main']}
#
# Add internal data directories.
#
setup_keywords['package_data'] = {'desiutil.test': ['t/*']}
#
# Run setup command.
#
setup(**setup_keywords)
| Python | 0.999999 |
012230b2693a1922cae4bbf163de1b4d23d00b40 | Fix beta sample | tensorforce/core/distributions/beta.py | tensorforce/core/distributions/beta.py | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Beta distribution.
"""
import tensorflow as tf
from tensorforce import util
from tensorforce.core.networks import layers
from tensorforce.core.distributions import Distribution
# TODO Michael: integrate to model, rescale min max
class Beta(Distribution):
def __init__(self, shape, min_value, max_value, alpha, beta):
"""
Beta distribution used for continuous actions. In particular, the Beta distribution
allows to bound action values with min and max values.
Args:
shape: Shape of actions
min_value: Min value of all actions for the given shape
max_value: Max value of all actions for the given shape
alpha: Concentration parameter of the Beta distribution
beta: Concentration parameter of the Beta distribution
"""
self.shape = shape
self.min_value = min_value
self.h = (max_value - min_value) / 2
self.alpha = alpha
self.beta = beta
def kl_divergence(self, other):
assert isinstance(other, Beta)
return other.log_norm - self.log_norm - tf.digamma(other.beta) * (other.beta - self.beta) - \
tf.digamma(other.alpha) * (other.alpha - self.alpha) + tf.digamma(other.sum) * (other.sum - self.sum)
def entropy(self):
return self.log_norm - (self.beta - 1.0) * tf.digamma(self.beta) - \
(self.alpha - 1.0) * tf.digamma(self.alpha) + ((self.sum - 2.0) * tf.digamma(self.sum))
@classmethod
def from_tensors(cls, parameters, deterministic):
self = cls(shape=None, min_value=None, max_value=None)
self.distribution = (self.alpha, self.beta) = parameters
self.deterministic = deterministic
return self
def create_tf_operations(self, x, deterministic):
# Flat mean and log standard deviation
flat_size = util.prod(self.shape)
self.alpha = layers['dense'](x=x, size=flat_size, bias=self.alpha, activation='softplus')
self.beta = layers['dense'](x=x, size=flat_size, bias=self.beta, activation='softplus')
self.sum = self.alpha + self.beta
self.mean = self.alpha / self.sum
self.log_norm = tf.lgamma(self.alpha) + tf.lgamma(self.beta) - tf.lgamma(self.sum)
self.distribution = (self.alpha, self.beta)
self.deterministic = deterministic
def log_probability(self, action):
return (self.alpha - 1.0) * tf.log(action) + (self.alpha - 1.0) * tf.log1p(-action) - self.log_norm
def sample(self):
deterministic = self.mean
alpha_sample = tf.random_gamma(shape=tf.shape(input=self.alpha), alpha=self.alpha)
beta_sample = tf.random_gamma(shape=tf.shape(input=self.beta), alpha=self.beta)
sample = alpha_sample / (alpha_sample + beta_sample)
return self.min_value + tf.where(condition=self.deterministic, x=deterministic, y=sample)
| # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Beta distribution.
"""
import tensorflow as tf
from tensorforce import util
from tensorforce.core.networks import layers
from tensorforce.core.distributions import Distribution
# TODO Michael: integrate to model, rescale min max
class Beta(Distribution):
def __init__(self, shape, min_value, max_value, alpha, beta):
"""
Beta distribution used for continuous actions. In particular, the Beta distribution
allows to bound action values with min and max values.
Args:
shape: Shape of actions
min_value: Min value of all actions for the given shape
max_value: Max value of all actions for the given shape
alpha: Concentration parameter of the Beta distribution
beta: Concentration parameter of the Beta distribution
"""
self.shape = shape
self.min_value = min_value
self.h = (max_value - min_value) / 2
self.alpha = alpha
self.beta = beta
def kl_divergence(self, other):
assert isinstance(other, Beta)
return other.log_norm - self.log_norm - tf.digamma(other.beta) * (other.beta - self.beta) - \
tf.digamma(other.alpha) * (other.alpha - self.alpha) + tf.digamma(other.sum) * (other.sum - self.sum)
def entropy(self):
return self.log_norm - (self.beta - 1.0) * tf.digamma(self.beta) - \
(self.alpha - 1.0) * tf.digamma(self.alpha) + ((self.sum - 2.0) * tf.digamma(self.sum))
@classmethod
def from_tensors(cls, parameters, deterministic):
self = cls(shape=None, min_value=None, max_value=None)
self.distribution = (self.alpha, self.beta) = parameters
self.deterministic = deterministic
return self
def create_tf_operations(self, x, deterministic):
# Flat mean and log standard deviation
flat_size = util.prod(self.shape)
self.alpha = layers['dense'](x=x, size=flat_size, bias=self.alpha, activation='softplus')
self.beta = layers['dense'](x=x, size=flat_size, bias=self.beta, activation='softplus')
self.sum = self.alpha + self.beta
self.mean = self.alpha / self.sum
self.log_norm = tf.lgamma(self.alpha) + tf.lgamma(self.beta) - tf.lgamma(self.sum)
self.distribution = (self.alpha, self.beta)
self.deterministic = deterministic
def log_probability(self, action):
return (self.alpha - 1.0) * tf.log(action) + (self.alpha - 1.0) * tf.log1p(-action) - self.log_norm
def sample(self):
deterministic = self.mean
alpha_sample = tf.random_gamma(shape=tf.shape(input=self.alpha), alpha=self.alpha)
beta_sample = tf.random_gamma(shape=tf.shape(input=self.beta), alpha=self.beta)
sample = alpha_sample / (alpha_sample + beta_sample)
return self.min_value + tf.where(condition=self.deterministic, x=deterministic, y=sample)
| Python | 0.000011 |
32fe6ea4fa2aa710d2627b6c19c0e25013b72a1d | Test fixture update. | awx/main/tests/functional/test_rbac_workflow.py | awx/main/tests/functional/test_rbac_workflow.py | import pytest
from awx.main.access import (
WorkflowJobTemplateAccess,
WorkflowJobTemplateNodeAccess,
WorkflowJobAccess,
# WorkflowJobNodeAccess
)
@pytest.fixture
def wfjt(workflow_job_template_factory, organization):
objects = workflow_job_template_factory('test_workflow', organization=organization, persisted=True)
return objects.workflow_job_template
@pytest.fixture
def wfjt_with_nodes(workflow_job_template_factory, organization, job_template):
objects = workflow_job_template_factory(
'test_workflow', organization=organization, workflow_job_template_nodes=[{'unified_job_template': job_template}], persisted=True)
return objects.workflow_job_template
@pytest.fixture
def wfjt_node(wfjt_with_nodes):
return wfjt_with_nodes.workflow_job_template_nodes.all()[0]
@pytest.fixture
def workflow_job(wfjt):
return wfjt.workflow_jobs.create(name='test_workflow')
@pytest.mark.django_db
class TestWorkflowJobTemplateAccess:
def test_random_user_no_edit(self, wfjt, rando):
access = WorkflowJobTemplateAccess(rando)
assert not access.can_change(wfjt, {'name': 'new name'})
def test_org_admin_edit(self, wfjt, org_admin):
access = WorkflowJobTemplateAccess(org_admin)
assert access.can_change(wfjt, {'name': 'new name'})
def test_org_admin_role_inheritance(self, wfjt, org_admin):
assert org_admin in wfjt.admin_role
assert org_admin in wfjt.execute_role
assert org_admin in wfjt.read_role
def test_jt_blocks_copy(self, wfjt_with_nodes, org_admin):
"""I want to copy a workflow JT in my organization, but someone
included a job template that I don't have access to, so I can
not copy the WFJT as-is"""
access = WorkflowJobTemplateAccess(org_admin)
assert not access.can_add({'reference_obj': wfjt_with_nodes})
@pytest.mark.django_db
class TestWorkflowJobTemplateNodeAccess:
def test_jt_access_to_edit(self, wfjt_node, org_admin):
access = WorkflowJobTemplateNodeAccess(org_admin)
assert not access.can_change(wfjt_node, {'job_type': 'scan'})
@pytest.mark.django_db
class TestWorkflowJobAccess:
def test_wfjt_admin_delete(self, wfjt, workflow_job, rando):
wfjt.admin_role.members.add(rando)
access = WorkflowJobAccess(rando)
assert access.can_delete(workflow_job)
def test_cancel_your_own_job(self, wfjt, workflow_job, rando):
wfjt.execute_role.members.add(rando)
workflow_job.created_by = rando
workflow_job.save()
access = WorkflowJobAccess(rando)
assert access.can_cancel(workflow_job)
| import pytest
from awx.main.access import (
WorkflowJobTemplateAccess,
WorkflowJobTemplateNodeAccess,
WorkflowJobAccess,
# WorkflowJobNodeAccess
)
@pytest.fixture
def wfjt(workflow_job_template_factory, organization):
objects = workflow_job_template_factory('test_workflow', organization=organization, persisted=True)
return objects.workflow_job_template
@pytest.fixture
def wfjt_with_nodes(workflow_job_template_factory, organization, job_template):
objects = workflow_job_template_factory(
'test_workflow', organization=organization, workflow_job_template_nodes=[{'unified_job_template': job_template}], persisted=True)
return objects.workflow_job_template
@pytest.fixture
def wfjt_node(wfjt_with_nodes):
return wfjt_with_nodes.workflow_job_template_nodes.all()[0]
@pytest.fixture
def workflow_job(wfjt):
return wfjt.jobs.create(name='test_workflow')
@pytest.mark.django_db
class TestWorkflowJobTemplateAccess:
def test_random_user_no_edit(self, wfjt, rando):
access = WorkflowJobTemplateAccess(rando)
assert not access.can_change(wfjt, {'name': 'new name'})
def test_org_admin_edit(self, wfjt, org_admin):
access = WorkflowJobTemplateAccess(org_admin)
assert access.can_change(wfjt, {'name': 'new name'})
def test_org_admin_role_inheritance(self, wfjt, org_admin):
assert org_admin in wfjt.admin_role
assert org_admin in wfjt.execute_role
assert org_admin in wfjt.read_role
def test_jt_blocks_copy(self, wfjt_with_nodes, org_admin):
"""I want to copy a workflow JT in my organization, but someone
included a job template that I don't have access to, so I can
not copy the WFJT as-is"""
access = WorkflowJobTemplateAccess(org_admin)
assert not access.can_add({'reference_obj': wfjt_with_nodes})
@pytest.mark.django_db
class TestWorkflowJobTemplateNodeAccess:
def test_jt_access_to_edit(self, wfjt_node, org_admin):
access = WorkflowJobTemplateNodeAccess(org_admin)
assert not access.can_change(wfjt_node, {'job_type': 'scan'})
@pytest.mark.django_db
class TestWorkflowJobAccess:
def test_wfjt_admin_delete(self, wfjt, workflow_job, rando):
wfjt.admin_role.members.add(rando)
access = WorkflowJobAccess(rando)
assert access.can_delete(workflow_job)
def test_cancel_your_own_job(self, wfjt, workflow_job, rando):
wfjt.execute_role.members.add(rando)
workflow_job.created_by = rando
workflow_job.save()
access = WorkflowJobAccess(rando)
assert access.can_cancel(workflow_job)
| Python | 0 |
25ff2bcd545c7429dda5f3cd48ff8272c28d8965 | Complete recur sort w/ iter merge sol | lc0148_sort_list.py | lc0148_sort_list.py | """Leetcode 148. Sort List
Medium
URL: https://leetcode.com/problems/sort-list/
Sort a linked list in O(n log n) time using constant space complexity.
Example 1:
Input: 4->2->1->3
Output: 1->2->3->4
Example 2:
Input: -1->5->3->4->0
Output: -1->0->3->4->5
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class SolutionRecur(object):
def _merge_sorted_lists(self, l1, l2):
if not l1 and not l2:
return None
if not l1 or not l2:
return l1 or l2
prev = ListNode(None)
current = prev
while l1 and l2:
if l1.val <= l2.val:
current.next = l1
l1 = l1.next
else:
current.next = l2
l2 = l2.next
current = current.next
# Link the remaining non-empty list.
current.next = l1 or l2
return prev.next
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n*logn).
Space complexity: O(logn), for stake call.
"""
# Apply recursive sort list with iterative merge sorted lists.
# Create base condition: no or just one node.
if not head or not head.next:
return head
# Use prev, slow & fast pointers to get middle node prev.
prev, slow, fast = None, head, head
while fast and fast.next:
prev = slow
slow = slow.next
fast = fast.next.next
# Split list into two by cutting the link of the 1st to the 2nd.
prev.next = None
# Recursively sort 1st and 2nd lists.
l1 = self.sortList(head)
l2 = self.sortList(slow)
# Merge two sorted lists into the one.
return self._merge_sorted_lists(l1, l2)
def main():
# Input: 4->2->1->3
# Output: 1->2->3->4
head = ListNode(4)
head.next = ListNode(2)
head.next.next = ListNode(1)
head.next.next.next = ListNode(3)
sorted_head = SolutionRecur().sortList(head)
print (sorted_head.val, sorted_head.next.val,
sorted_head.next.next.val,
sorted_head.next.next.next.val)
# Input: -1->5->3->4->0
# Output: -1->0->3->4->5
if __name__ == '__main__':
main()
| """Leetcode 148. Sort List
Medium
URL: https://leetcode.com/problems/sort-list/
Sort a linked list in O(n log n) time using constant space complexity.
Example 1:
Input: 4->2->1->3
Output: 1->2->3->4
Example 2:
Input: -1->5->3->4->0
Output: -1->0->3->4->5
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0 |
d3d8267588b60f77b6c55ffd8461ddfa163501da | add dependency for protobuf | setup.py | setup.py | #!/usr/bin/env python
# Copyright 2011 The fast-python-pb Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for fast python protocol buffers."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='fastpb',
version='0.1',
description='Fast Python Protocol Buffers',
license='Apache',
author='Greplin, Inc.',
author_email='opensource@greplin.com',
url='https://www.github.com/Cue/fast-python-pb',
package_dir={'': 'src'},
packages=['fastpb'],
package_data={
'fastpb': ['template/*'],
},
entry_points={
'console_scripts': [
'protoc-gen-fastpython = fastpb.generator:main'
]
},
install_requires=['ez-setup==0.9', 'protobuf >= 2.3.0', 'jinja2 >= 2.0'],
)
| #!/usr/bin/env python
# Copyright 2011 The fast-python-pb Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for fast python protocol buffers."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='fastpb',
version='0.1',
description='Fast Python Protocol Buffers',
license='Apache',
author='Greplin, Inc.',
author_email='opensource@greplin.com',
url='https://www.github.com/Cue/fast-python-pb',
package_dir={'': 'src'},
packages=['fastpb'],
package_data = {
'fastpb': ['template/*'],
},
entry_points = {
'console_scripts': [
'protoc-gen-fastpython = fastpb.generator:main'
]
},
install_requires=['protobuf >= 2.3.0', 'jinja2 >= 2.0'],
)
| Python | 0 |
2035099fd78b1d0906403ec836a4b7e7144a6bbc | bump to 0.0.6 | swingtix/bookkeeper/__init__.py | swingtix/bookkeeper/__init__.py | __VERSION__='0.0.6'
| __VERSION__='0.0.5'
| Python | 0.000005 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.