commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
5abf8cdea0bd8c5d9d813cb081f4afd2c6715a81
|
Fix up tests
|
payments/tests/test_customer.py
|
payments/tests/test_customer.py
|
import decimal
from django.test import TestCase
from django.contrib.auth.models import User
from mock import patch
from ..models import Customer, Charge
class TestCustomer(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="patrick")
self.customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx",
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
@patch("stripe.Customer.retrieve")
def test_customer_purge_leaves_customer_record(self, CustomerRetrieveMock):
self.customer.purge()
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
self.assertTrue(customer.user is None)
self.assertTrue(customer.card_fingerprint == "")
self.assertTrue(customer.card_last_4 == "")
self.assertTrue(customer.card_kind == "")
self.assertTrue(User.objects.filter(pk=self.user.pk).exists())
@patch("stripe.Customer.retrieve")
def test_customer_delete_same_as_purge(self, CustomerRetrieveMock):
self.customer.delete()
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
self.assertTrue(customer.user is None)
self.assertTrue(customer.card_fingerprint == "")
self.assertTrue(customer.card_last_4 == "")
self.assertTrue(customer.card_kind == "")
self.assertTrue(User.objects.filter(pk=self.user.pk).exists())
def test_change_charge(self):
self.assertTrue(self.customer.can_charge())
@patch("stripe.Customer.retrieve")
def test_cannot_charge(self, CustomerRetrieveMock):
self.customer.delete()
self.assertFalse(self.customer.can_charge())
def test_charge_accepts_only_decimals(self):
with self.assertRaises(ValueError):
self.customer.charge(10)
@patch("stripe.Charge.retrieve")
def test_record_charge(self, RetrieveMock):
RetrieveMock.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 1000,
"paid": True,
"refunded": False,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
obj = self.customer.record_charge("ch_XXXXXX")
self.assertEquals(Charge.objects.get(stripe_id="ch_XXXXXX").pk, obj.pk)
self.assertEquals(obj.paid, True)
self.assertEquals(obj.disputed, False)
self.assertEquals(obj.refunded, False)
self.assertEquals(obj.amount_refunded, None)
@patch("stripe.Charge.retrieve")
def test_refund_charge(self, RetrieveMock):
charge = Charge.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
card_last_4="4323",
card_kind="Visa",
amount=decimal.Decimal("10.00"),
paid=True,
refunded=False,
fee=decimal.Decimal("4.99"),
disputed=False
)
RetrieveMock.return_value.refund.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 1000,
"paid": True,
"refunded": True,
"amount_refunded": 1000,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
charge.refund()
charge2 = Charge.objects.get(stripe_id="ch_XXXXXX")
self.assertEquals(charge2.refunded, True)
self.assertEquals(charge2.amount_refunded, decimal.Decimal("10.00"))
def test_calculate_refund_amount_full_refund(self):
charge = Charge(
stripe_id="ch_111111",
customer=self.customer,
amount=decimal.Decimal("500.00")
)
self.assertEquals(
charge.calculate_refund_amount(),
decimal.Decimal("500.00")
)
def test_calculate_refund_amount_partial_refund(self):
charge = Charge(
stripe_id="ch_111111",
customer=self.customer,
amount=decimal.Decimal("500.00")
)
self.assertEquals(
charge.calculate_refund_amount(amount=decimal.Decimal("300.00")),
decimal.Decimal("300.00")
)
def test_calculate_refund_above_max_refund(self):
charge = Charge(
stripe_id="ch_111111",
customer=self.customer,
amount=decimal.Decimal("500.00")
)
self.assertEquals(
charge.calculate_refund_amount(amount=decimal.Decimal("600.00")),
decimal.Decimal("500.00")
)
@patch("stripe.Charge.retrieve")
@patch("stripe.Charge.create")
def test_charge_converts_dollars_into_cents(self, ChargeMock, RetrieveMock):
ChargeMock.return_value.id = "ch_XXXXX"
RetrieveMock.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 1000,
"paid": True,
"refunded": False,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
self.customer.charge(
amount=decimal.Decimal("10.00")
)
_, kwargs = ChargeMock.call_args
self.assertEquals(kwargs["amount"], 1000)
|
Python
| 0.000319
|
@@ -4139,33 +4139,13 @@
-decimal.Decimal(%22
500
-.
00
-%22)
%0A
@@ -4487,33 +4487,13 @@
-decimal.Decimal(%22
300
-.
00
-%22)
%0A
@@ -4830,33 +4830,13 @@
-decimal.Decimal(%22
500
-.
00
-%22)
%0A
|
43d53871d3c391d85be88da3ab94df1fbd5eff64
|
Update to use existing strip html function
|
website/search/util.py
|
website/search/util.py
|
import re
import copy
import webcolors
from werkzeug.contrib.atom import AtomFeed
COLORBREWER_COLORS = [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214), (106, 61, 154), (255, 255, 153), (177, 89, 40)]
RE_XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \
u'|' + \
u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800), unichr(0xdbff), unichr(0xdc00), unichr(0xdfff),
unichr(0xd800), unichr(0xdbff), unichr(0xdc00), unichr(0xdfff),
unichr(0xd800), unichr(0xdbff), unichr(0xdc00), unichr(0xdfff))
RE_XML_ILLEGAL_COMPILED = re.compile(RE_XML_ILLEGAL)
RE_HTML_TAG_COMPILED = re.compile(r'<[^>]+>')
def build_query(q='*', start=0, size=10, sort=None):
query = {
'query': build_query_string(q),
'from': start,
'size': size,
}
if sort:
query['sort'] = [
{
sort: 'desc'
}
]
return query
def build_query_string(q):
return {
'query_string': {
'default_field': '_all',
'query': q,
'analyze_wildcard': True,
'lenient': True # TODO, may not want to do this
}
}
def generate_color():
# TODO - this might not be the optimal way - copy is expensive
colors_to_generate = copy.copy(COLORBREWER_COLORS)
colors_used = []
while True:
try:
color = colors_to_generate.pop(0)
colors_used.append(color)
except IndexError:
new_colors = get_new_colors(colors_used)
colors_to_generate = new_colors
colors_used = []
yield webcolors.rgb_to_hex(color)
def calculate_distance_between_colors(color1, color2):
""" Takes 2 color tupes and returns the average between them
"""
return ((color1[0] + color2[0]) / 2, (color1[1] + color2[1]) / 2, (color1[2] + color2[2]) / 2)
def get_new_colors(colors_used):
new_colors = []
for i in xrange(len(colors_used) - 1):
new_colors.append(calculate_distance_between_colors(colors_used[i], colors_used[i + 1]))
return new_colors
def create_atom_feed(name, data, query, size, start, url, to_atom):
if query == '*':
title_query = 'All'
else:
title_query = query
title = '{name}: Atom Feed for query: "{title_query}"'.format(name=name, title_query=title_query)
author = 'COS'
links = [
{'href': '{url}?page=1'.format(url=url), 'rel': 'first'},
{'href': '{url}?page={page}'.format(url=url, page=(start / size) + 2), 'rel': 'next'},
{'href': '{url}?page={page}'.format(url=url, page=(start / size)), 'rel': 'previous'}
]
links = links[1:-1] if (start / size) == 0 else links
feed = AtomFeed(
title=title,
feed_url=url,
author=author,
links=links
)
for doc in data:
feed.add(**to_atom(doc))
return feed
def html_and_illegal_unicode_replace(atom_element):
""" Replace an illegal for XML unicode character with nothing.
This fix thanks to Matt Harper from his blog post:
https://maxharp3r.wordpress.com/2008/05/15/pythons-minidom-xml-and-illegal-unicode-characters/
"""
if atom_element:
new_element = RE_XML_ILLEGAL_COMPILED.sub('', atom_element)
return RE_HTML_TAG_COMPILED.sub('', new_element)
return atom_element
|
Python
| 0
|
@@ -77,16 +77,62 @@
omFeed%0A%0A
+from website.util.sanitize import strip_html%0A%0A
%0ACOLORBR
@@ -833,55 +833,8 @@
L)%0A%0A
-RE_HTML_TAG_COMPILED = re.compile(r'%3C%5B%5E%3E%5D+%3E')%0A%0A
%0Adef
@@ -3471,37 +3471,19 @@
urn
-RE_HTML_TAG_COMPILED.sub('',
+strip_html(
new_
|
31b0b97590ce496ba22a39c396ff868c6f511637
|
install pre-commit
|
dacsspace/client.py
|
dacsspace/client.py
|
#!/usr/bin/env python3
import argparse
from configparser import ConfigParser
from asnake.aspace import ASpace
#published_only = False
class ArchivesSpaceClient:
"""Handles communication with ArchivesSpace."""
def __init__(self):
config = ConfigParser()
config.read("local_settings.cfg")
self.aspace = ASpace(baseurl=config.get('ArchivesSpace', 'baseURL'),
username=config.get('ArchivesSpace', 'user'),
password=config.get('ArchivesSpace', 'password'))
self.repo = self.aspace.repositories(config.get('ArchivesSpace', 'repository'))
def get_resources(self, published_only):
"""Returns data about resource records from AS.
Args:
published_only (boolean): Fetch only published records from AS
Returns:
resources (list): Full JSON of AS resource records
"""
if published_only is True:
for resource in self.repo.search.with_params(q='publish:true AND primary_type:resource'):
resource_json = resource.json()
return resource_json
else:
for resource in self.repo.search.with_params(q='primary_type:resource'):
resource_json = resource.json()
return resource_json
#return resource.publish
#return resource_json
#return resource.publish
#build in tests
#ArchivesSpaceClient().get_resources(published_only)
|
Python
| 0
|
@@ -19,25 +19,8 @@
hon3
-%0A%0Aimport argparse
%0Afro
@@ -54,16 +54,17 @@
gParser%0A
+%0A
from asn
@@ -93,32 +93,8 @@
ce%0A%0A
-#published_only = False%0A
%0Acla
@@ -1269,156 +1269,10 @@
on%0A%0A
- #return resource.publish%0A #return resource_json%0A #return resource.publish%0A%0A #build in tests%0A%0A
#
+
Arch
|
7cfc16d016906e1437580acff42e503d3b2fa188
|
Change %s to .format
|
src/pip/_internal/distributions/source/legacy.py
|
src/pip/_internal/distributions/source/legacy.py
|
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import logging
from pip._internal.build_env import BuildEnvironment
from pip._internal.distributions.base import AbstractDistribution
from pip._internal.exceptions import InstallationError
from pip._internal.utils.subprocess import runner_with_spinner_message
logger = logging.getLogger(__name__)
class SourceDistribution(AbstractDistribution):
"""Represents a source distribution.
The preparation step for these needs metadata for the packages to be
generated, either using PEP 517 or using the legacy `setup.py egg_info`.
NOTE from @pradyunsg (14 June 2019)
I expect SourceDistribution class will need to be split into
`legacy_source` (setup.py based) and `source` (PEP 517 based) when we start
bringing logic for preparation out of InstallRequirement into this class.
"""
def get_pkg_resources_distribution(self):
return self.req.get_dist()
def prepare_distribution_metadata(self, finder, build_isolation):
# Prepare for building. We need to:
# 1. Load pyproject.toml (if it exists)
# 2. Set up the build environment
self.req.load_pyproject_toml()
should_isolate = self.req.use_pep517 and build_isolation
if should_isolate:
self._setup_isolation(finder)
self.req.prepare_metadata()
self.req.assert_source_matches_version()
def _setup_isolation(self, finder):
def _raise_conflicts(conflicting_with, conflicting_reqs):
format_string = (
"Some build dependencies for {requirement} "
"conflict with {conflicting_with}: {description}."
)
error_message = format_string.format(
requirement=self.req,
conflicting_with=conflicting_with,
description=', '.join(
'%s is incompatible with %s' % (installed, wanted)
for installed, wanted in sorted(conflicting)
)
)
raise InstallationError(error_message)
# Isolate in a BuildEnvironment and install the build-time
# requirements.
self.req.build_env = BuildEnvironment()
self.req.build_env.install_requirements(
finder, self.req.pyproject_requires, 'overlay',
"Installing build dependencies"
)
conflicting, missing = self.req.build_env.check_requirements(
self.req.requirements_to_check
)
if conflicting:
_raise_conflicts("PEP 517/518 supported requirements",
conflicting)
if missing:
logger.warning(
"Missing build requirements in pyproject.toml for %s.",
self.req,
)
logger.warning(
"The project does not specify a build backend, and "
"pip cannot fall back to setuptools without %s.",
" and ".join(map(repr, sorted(missing)))
)
# Install any extra build dependencies that the backend requests.
# This must be done in a second pass, as the pyproject.toml
# dependencies must be installed before we can call the backend.
with self.req.build_env:
runner = runner_with_spinner_message(
"Getting requirements to build wheel"
)
backend = self.req.pep517_backend
with backend.subprocess_runner(runner):
reqs = backend.get_requires_for_build_wheel()
conflicting, missing = self.req.build_env.check_requirements(reqs)
if conflicting:
_raise_conflicts("the backend dependencies", conflicting)
self.req.build_env.install_requirements(
finder, missing, 'normal',
"Installing backend dependencies"
)
|
Python
| 0.000007
|
@@ -1943,18 +1943,18 @@
'
-%25s
+%7B%7D
is inco
@@ -1971,14 +1971,18 @@
ith
-%25s' %25
+%7B%7D'.format
(ins
|
d96041d38cc9f0cae1a96cfafd6f1cd781844dcc
|
Allow users to login with the usernane or the email
|
wger/core/api/views.py
|
wger/core/api/views.py
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
# Standard Library
import logging
# Django
from django.contrib.auth.models import User
# Third Party
from rest_framework import (
status,
viewsets,
)
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
# wger
from wger import (
MIN_APP_VERSION,
get_version,
)
from wger.core.api.permissions import AllowRegisterUser
from wger.core.api.serializers import (
DaysOfWeekSerializer,
LanguageSerializer,
LicenseSerializer,
RepetitionUnitSerializer,
UserApiSerializer,
UsernameSerializer,
UserprofileSerializer,
UserRegistrationSerializer,
WeightUnitSerializer,
)
from wger.core.models import (
DaysOfWeek,
Language,
License,
RepetitionUnit,
UserProfile,
WeightUnit,
)
from wger.utils.api_token import create_token
from wger.utils.permissions import (
UpdateOnlyPermission,
WgerPermission,
)
logger = logging.getLogger(__name__)
class UserProfileViewSet(viewsets.ModelViewSet):
"""
API endpoint for workout objects
"""
is_private = True
serializer_class = UserprofileSerializer
permission_classes = (WgerPermission, UpdateOnlyPermission)
ordering_fields = '__all__'
def get_queryset(self):
"""
Only allow access to appropriate objects
"""
return UserProfile.objects.filter(user=self.request.user)
def get_owner_objects(self):
"""
Return objects to check for ownership permission
"""
return [(User, 'user')]
@action(detail=True)
def username(self, request, pk):
"""
Return the username
"""
user = self.get_object().user
return Response(UsernameSerializer(user).data)
class ApplicationVersionView(viewsets.ViewSet):
"""
Returns the application's version
"""
permission_classes = (AllowAny, )
@staticmethod
def get(request):
return Response(get_version())
class RequiredApplicationVersionView(viewsets.ViewSet):
"""
Returns the minimum required version of flutter app to access this server
"""
permission_classes = (AllowAny, )
@staticmethod
def get(request):
return Response(get_version(MIN_APP_VERSION, True))
class UserAPILoginView(viewsets.ViewSet):
"""
API endpoint for api user objects
"""
permission_classes = (AllowAny, )
queryset = User.objects.all()
serializer_class = UserApiSerializer
throttle_scope = 'login'
def get(self, request):
return Response({'message': "You must send a 'username' and 'password' via POST"})
def post(self, request):
data = request.data
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
username = serializer.data["username"]
password = serializer.data["password"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
logger.info(f"Tried logging via API with unknown user: '{username}'")
return Response(
{'detail': 'Username or password unknown'},
status=status.HTTP_401_UNAUTHORIZED,
)
if user.check_password(password):
token = create_token(user)
return Response({'token': token.key}, status=status.HTTP_200_OK)
else:
logger.info(f"User '{username}' tried logging via API with a wrong password")
return Response(
{'detail': 'Username or password unknown'},
status=status.HTTP_401_UNAUTHORIZED,
)
class UserAPIRegistrationViewSet(viewsets.ViewSet):
"""
API endpoint
"""
permission_classes = (AllowRegisterUser, )
serializer_class = UserRegistrationSerializer
def get_queryset(self):
"""
Only allow access to appropriate objects
"""
return UserProfile.objects.filter(user=self.request.user)
def post(self, request):
data = request.data
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
user.userprofile.added_by = request.user
user.userprofile.save()
token = create_token(user)
return Response(
{
'message': 'api user successfully registered',
'token': token.key
},
status=status.HTTP_201_CREATED
)
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint for workout objects
"""
queryset = Language.objects.all()
serializer_class = LanguageSerializer
ordering_fields = '__all__'
filterset_fields = ('full_name', 'short_name')
class DaysOfWeekViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint for workout objects
"""
queryset = DaysOfWeek.objects.all()
serializer_class = DaysOfWeekSerializer
ordering_fields = '__all__'
filterset_fields = ('day_of_week', )
class LicenseViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint for workout objects
"""
queryset = License.objects.all()
serializer_class = LicenseSerializer
ordering_fields = '__all__'
filterset_fields = (
'full_name',
'short_name',
'url',
)
class RepetitionUnitViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint for repetition units objects
"""
queryset = RepetitionUnit.objects.all()
serializer_class = RepetitionUnitSerializer
ordering_fields = '__all__'
filterset_fields = ('name', )
class WeightUnitViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint for weight units objects
"""
queryset = WeightUnit.objects.all()
serializer_class = WeightUnitSerializer
ordering_fields = '__all__'
filterset_fields = ('name', )
|
Python
| 0
|
@@ -1427,24 +1427,66 @@
rializer,%0A)%0A
+from wger.core.forms import UserLoginForm%0A
from wger.co
@@ -3598,17 +3598,16 @@
n=True)%0A
-%0A
@@ -3645,24 +3645,25 @@
rname%22%5D%0A
+%0A
password
@@ -3658,73 +3658,43 @@
-password = serializer.data%5B%22password%22%5D%0A%0A try:%0A
+# Try to retrieve the user%0A
user
@@ -3693,83 +3693,81 @@
-user
+form
= User
-.objects.get(username=username)%0A except User.DoesNotExist
+LoginForm(data=serializer.data)%0A if not form.is_valid()
:%0A
@@ -4007,54 +4007,8 @@
)%0A%0A
- if user.check_password(password):%0A
@@ -4036,18 +4036,25 @@
ken(
-user)%0A
+form.get_user())%0A
@@ -4126,268 +4126,8 @@
OK)%0A
- else:%0A logger.info(f%22User '%7Busername%7D' tried logging via API with a wrong password%22)%0A return Response(%0A %7B'detail': 'Username or password unknown'%7D,%0A status=status.HTTP_401_UNAUTHORIZED,%0A )%0A
%0A%0Acl
|
612b5731a38823e06e567c78a5094ad18dd30b51
|
add online tool
|
dashlib/mnb_misc.py
|
dashlib/mnb_misc.py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
import time
def clear_screen():
os.system('clear')
def check_version():
from mnb_explorer import get_version_txt
cur_version = get_dashmnbversion()
git_version = get_version_txt()
if ((cur_version.get('major') != git_version.get('major')) or \
(cur_version.get('minor') != git_version.get('minor')) or \
(cur_version.get('fix') != git_version.get('fix'))):
print('\t*** New version is available, ple update ! do git pull\n')
if git_version.get('msgs', None):
print('\t*** %s\n\n' % git_version.get('msgs', None))
def logo_show():
from pyfiglet import Figlet
from config import MAINNET
from config import MOVE_1K_COLLATERAL
f = Figlet(font='slant')
#f = Figlet(font='small')
print(f.renderText('Dash Masternode with HW Wallet'))
#print('\n\t\t\tdonation : xxxxxxxxxx')
print('\t\t\tby : chaeplin\n')
check_version()
print('Network : ' + ('MAINNET' if MAINNET else 'TESTNET'))
if MOVE_1K_COLLATERAL:
print()
print('**** MOVE_1K_COLLATERAL is True *******')
print()
time.sleep(5)
else:
time.sleep(1)
# clear_screen()
def get_xferblockcount_cache(getblock=False):
from config import MAINNET
import simplejson as json
xferblockcount_cache_abs_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../cache/' + ('MAINNET' if MAINNET else 'TESTNET') + '-xferblockcount.dat')
if getblock:
xferblockcount = 0
if os.path.exists(xferblockcount_cache_abs_path):
with open(xferblockcount_cache_abs_path) as data_file:
xferblockcount = json.load(data_file)
return xferblockcount
else:
return xferblockcount_cache_abs_path
def get_txidtxidn(txid, txidn):
if txid is None or txidn is None:
return None
else:
return txid + '-' + str(txidn)
def print_mnlist(mnconfig, ipmatch, mnstatus):
print(mnconfig.get('alias') + '\t' + mnconfig.get('ipport') + ':' +
ipmatch + '\t' + mnconfig.get('collateral_address') + ' ' + mnstatus)
def print_mnstatus(mn_config, mns, mna):
print()
print('[masternodes status]')
print('alias\tip (m: ip/port match)\tcollateral address\t\t status')
for m in mn_config:
mna_ip = mna.get(m.get('collateral_txidtxidn', '-------'), '-')
mns_status = mns.get(m.get('collateral_txidtxidn', '-------'), '-')
if m.get('ipport') != mna_ip:
ipmatch = '-'
else:
ipmatch = 'm'
print_mnlist(m, ipmatch, mns_status)
print()
def get_function_name():
return sys._getframe(1).f_code.co_name
def get_caller_name():
return sys._getframe(2).f_code.co_name
def get_dashmnbversion():
import simplejson as json
version_file = os.path.join( os.path.dirname( os.path.abspath(__file__)), 'version.txt')
with open(version_file) as data_file:
data = json.load(data_file)
return data
def print_err_exit(
caller_name,
function_name,
err_msg,
errargs=None):
VERSION = get_dashmnbversion()
msg = '\n\n\tversion : %s.%s.%s\n' % (VERSION.get('major'), VERSION.get('minor'), VERSION.get('fix'))
msg += '\tcaller : %s\n' % caller_name
msg += '\tfunction : %s\n' % function_name
if errargs:
msg += '\terr : %s' % str(errargs)
msg += '\t===> %s\n' % err_msg
# if tunnel:
# os.kill(tunnel, signal.SIGTERM)
raise SystemExit(msg)
def now():
return int(time.time())
def printdbg(str):
ts = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now()))
logstr = "{} {}".format(ts, str)
if os.environ.get('DASHMNB_DEBUG', None):
print(logstr)
def print_hw_wallet_check():
print('---> check hw wallet, check message on screen and press button')
print('\tif PIN protected, wallet ask your PIN(once per session)')
print('\tif Passphrase protected, wallet ask your Passphrase(once per session)')
print('\tcheck message on screen and press button on hw wallet to proceed(all signing)\n')
# end
|
Python
| 0
|
@@ -2710,16 +2710,96 @@
print(
+'%5Cn* be sure to check masternode status again using online tools like dashninja'
)%0A%0A%0Adef
|
0ca727f0ce5877ba2ca3ef74c9309c752a51fbf6
|
Fix enable action on plugins
|
src/sentry/web/frontend/project_plugin_enable.py
|
src/sentry/web/frontend/project_plugin_enable.py
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class ProjectPluginEnableView(ProjectView):
required_scope = 'project:write'
def post(self, request, organization, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
if not plugin.is_enabled(project):
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
plugin.enable(project=project)
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
|
Python
| 0
|
@@ -535,12 +535,8 @@
if
-not
plug
|
0599b259ed08121160196734f7212dc7fa33149f
|
Remove execute_auth_api_request method
|
devicehive/token.py
|
devicehive/token.py
|
from devicehive.api_request import ApiRequest
from devicehive.api_response import ApiResponseError
class Token(object):
"""Token class."""
AUTH_HEADER_NAME = 'Authorization'
AUTH_HEADER_VALUE_PREFIX = 'Bearer '
def __init__(self, transport, auth):
self._transport = transport
self._login = auth.get('login')
self._password = auth.get('password')
self._refresh_token = auth.get('refresh_token')
self._access_token = auth.get('access_token')
def _login(self):
# TODO: implement token/login request.
# Set self._refresh_token and self._access_token after success login.
pass
def _auth(self):
api_request = ApiRequest(self._transport)
if not api_request.websocket_transport:
return
api_request.action('authenticate')
api_request.set('token', self._access_token)
api_request.execute('Authentication failure')
@property
def access_token(self):
return self._access_token
@property
def auth_header(self):
auth_header_name = self.AUTH_HEADER_NAME
auth_header_value = self.AUTH_HEADER_VALUE_PREFIX + self._access_token
return auth_header_name, auth_header_value
def execute_auth_api_request(self, api_request, error_message):
api_request.header(*self.auth_header)
try:
return api_request.execute(error_message)
except ApiResponseError as api_response_error:
if api_response_error.code != 401:
raise
self.auth()
api_request.header(*self.auth_header)
return api_request.execute(error_message)
def refresh(self):
api_request = ApiRequest(self._transport)
api_request.method('POST')
api_request.url('token/refresh')
api_request.action('token/refresh')
api_request.set('refreshToken', self._refresh_token)
tokens = api_request.execute('Token refresh failure')
self._access_token = tokens['accessToken']
def auth(self):
if self._refresh_token:
self.refresh()
else:
self._login()
self._auth()
|
Python
| 0.000025
|
@@ -43,61 +43,8 @@
est%0A
-from devicehive.api_response import ApiResponseError%0A
%0A%0Acl
@@ -1193,430 +1193,8 @@
ue%0A%0A
- def execute_auth_api_request(self, api_request, error_message):%0A api_request.header(*self.auth_header)%0A try:%0A return api_request.execute(error_message)%0A except ApiResponseError as api_response_error:%0A if api_response_error.code != 401:%0A raise%0A self.auth()%0A api_request.header(*self.auth_header)%0A return api_request.execute(error_message)%0A%0A
|
e32acfcfa14ec785a3d716f60b61cc66d6c496ea
|
add celery
|
app/tasks.py
|
app/tasks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Kun Jia
# date: 20/06/2017
# email: me@jack003.com
from celery.schedules import crontab
from celery.task import periodic_task
from celery.utils.log import get_task_logger
from flask import current_app
from pymongo import MongoClient
from app import celery
from .api import get_list, get_content
logger = get_task_logger(__name__)
"""
Example:
crontab() Execute every minute.
crontab(minute=0, hour=0) Execute daily at midnight.
crontab(minute=0, hour='*/3') Execute every three hours: midnight, 3am, 6am, 9am, noon, 3pm, 6pm, 9pm.
crontab(minute=0, hour='0,3,6,9,12,15,18,21') Same as previous.
crontab(minute='*/15') Execute every 15 minutes.
crontab(day_of_week='sunday') Execute every minute (!) at Sundays.
crontab(minute='*', hour='*', day_of_week='sun') Same as previous.
crontab(minute='*/10', hour='3,17,22', day_of_week='thu,fri') Execute every ten minutes, but only between 3-4 am, 5-6 pm, and 10-11 pm on Thursdays or Fridays.
crontab(minute=0, hour='*/2,*/3') Execute every even hour, and every hour divisible by three. This means: at every hour except: 1am, 5am, 7am, 11am, 1pm, 5pm, 7pm, 11pm
crontab(minute=0, hour='*/5') Execute hour divisible by 5. This means that it is triggered at 3pm, not 5pm (since 3pm equals the 24-hour clock value of �~@~\15�~@~], which is divisible by 5).
crontab(minute=0, hour='*/3,8-17') Execute every hour divisible by 3, and every hour during office hours (8am-5pm).
crontab(0, 0, day_of_month='2') Execute on the second day of every month.
crontab(0, 0, day_of_month='2-30/3') Execute on every even numbered day.
crontab(0, 0, day_of_month='1-7,15-21') Execute on the first and third weeks of the month.
crontab(0, 0, day_of_month='11', month_of_year='5') Execute on the eleventh of May every year.
crontab(0, 0, month_of_year='*/3') Execute on the first month of every quarter.
"""
# @periodic_task(run_every=crontab())
# def test_beat():
# return 'beat ok'
#
#
# @celery.task
# def test_add(a, b):
# return a + b
@periodic_task(run_every=crontab(minute='*/2'))
def cache_data():
app = current_app._get_current_object()
client = MongoClient(app.config['MONGODB_SETTINGS']['host'], app.config['MONGODB_SETTINGS']['port'])
db = client.hacker_news
types = ['top', 'new', 'best', 'ask', 'show', 'job']
for i, t in enumerate(types):
dlist = get_list(t)
dcontent = get_content(dlist)
data = {'_id': i + 1, 'stype': t, 'dlist': dlist, 'dcontent': dcontent}
db.cache.update({'_id': data['_id']}, data, True)
client.close()
return True
|
Python
| 0.999861
|
@@ -2497,17 +2497,17 @@
e': t, '
-d
+s
list': d
@@ -2513,17 +2513,17 @@
dlist, '
-d
+s
content'
|
0237fb8114f5a8423d39f44b2882d5dbf10954d7
|
make .seen replies for CTCP ACTIONs say "doing nick message"; leaves .seen replies for PRIVMSG to channel the same ("saying message").
|
willie/modules/seen.py
|
willie/modules/seen.py
|
# coding=utf8
"""
seen.py - Willie Seen Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
import time
import datetime
from willie.tools import Identifier
from willie.tools.time import get_timezone, format_time
from willie.module import commands, rule, priority, thread
@commands('seen')
def seen(bot, trigger):
"""Reports when and where the user was last seen."""
if not trigger.group(2):
bot.say(".seen <nick> - Reports when <nick> was last seen.")
return
nick = trigger.group(2).strip()
timestamp = bot.db.get_nick_value(nick, 'seen_timestamp')
if timestamp:
channel = bot.db.get_nick_value(nick, 'seen_channel')
message = bot.db.get_nick_value(nick, 'seen_message')
tz = get_timezone(bot.db, bot.config, None, trigger.nick,
trigger.sender)
saw = datetime.datetime.utcfromtimestamp(timestamp)
timestamp = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, saw)
msg = "I last saw {} at {}".format(nick, timestamp)
if Identifier(channel) == trigger.sender:
msg = msg + " in here, saying " + message
else:
msg += " in another channel."
bot.say(str(trigger.nick) + ': ' + msg)
else:
bot.say("Sorry, I haven't seen {} around.".format(nick))
@thread(False)
@rule('(.*)')
@priority('low')
def note(bot, trigger):
if not trigger.is_privmsg:
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender)
bot.db.set_nick_value(trigger.nick, 'seen_message', trigger)
|
Python
| 0
|
@@ -886,16 +886,76 @@
essage')
+%0A action = bot.db.get_nick_value(nick, 'seen_action')
%0A%0A
@@ -1350,16 +1350,117 @@
sender:%0A
+%09 if action:%0A msg = msg + %22 in here, doing %22 + nick + %22 %22 + message%0A%09 else:%0A
@@ -1505,16 +1505,16 @@
message%0A
-
@@ -1930,32 +1930,32 @@
trigger.sender)%0A
-
bot.db.s
@@ -2003,12 +2003,97 @@
', trigger)%0A
+ bot.db.set_nick_value(trigger.nick, 'seen_action', 'intent' in trigger.tags)%0A
|
b4af07754c64e915fcfc5fbec00389dee6a11020
|
disable one unit test on travis
|
_unittests/ut_helpgen/test_notebooks_api.py
|
_unittests/ut_helpgen/test_notebooks_api.py
|
"""
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.pyquickhelper.loghelper import fLOG
from src.pyquickhelper.pycode import get_temp_folder
from src.pyquickhelper.helpgen import nb2slides, nb2html, nb2rst
from src.pyquickhelper.ipythonhelper import read_nb
class TestNotebookAPI (unittest.TestCase):
def test_convert_slides_api_html(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2:
return
path = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.normpath(
os.path.join(
path,
"..",
"..",
"_doc",
"notebooks"))
nb = os.path.join(fold, "example_pyquickhelper.ipynb")
self.assertTrue(os.path.exists(nb))
nbr = read_nb(nb, kernel=False)
temp = get_temp_folder(__file__, "temp_nb_api_html")
outfile = os.path.join(temp, "out_nb_slides.slides.html")
res = nb2slides(nbr, outfile)
self.assertTrue(len(res) > 1)
for r in res:
self.assertTrue(os.path.exists(r))
outfile = os.path.join(temp, "out_nb_slides.html")
res = nb2html(nbr, outfile)
self.assertEqual(len(res), 1)
for r in res:
self.assertTrue(os.path.exists(r))
def test_convert_slides_api_rst(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2:
return
path = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.normpath(
os.path.join(
path,
"..",
"..",
"_doc",
"notebooks"))
nb = os.path.join(fold, "example_pyquickhelper.ipynb")
self.assertTrue(os.path.exists(nb))
nbr = read_nb(nb, kernel=False)
temp = get_temp_folder(__file__, "temp_nb_api_rst")
outfile = os.path.join(temp, "out_nb_slides.rst")
res = nb2rst(nbr, outfile)
self.assertEqual(len(res), 1)
for r in res:
self.assertTrue(os.path.exists(r))
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -478,16 +478,39 @@
p_folder
+, is_travis_or_appveyor
%0Afrom sr
@@ -1958,32 +1958,148 @@
return%0A%0A
+ if is_travis_or_appveyor() in ('travis', 'appveyor'):%0A # no latex, no pandoc%0A return%0A%0A
path = o
|
b03b9276e48edfa53a70a46dc5779cd1de2299e0
|
add priv_dns_name sub command
|
aws_utils/ec2.py
|
aws_utils/ec2.py
|
#!/usr/bin/env python3
import sys
import argparse
import boto3
# --------------------------------------------------------------------------------
# arg parse
# --------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='ec2 utils')
parser.set_defaults(target='')
sub_parsers = parser.add_subparsers(title='sub commands')
# --------------------------------------------------------------------------------
# other functions
# --------------------------------------------------------------------------------
def getInstanceName(instance):
for tag in instance.tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def printIfMatchOrEmpty(ins, cond, out):
if cond == None or ins == cond:
print(out)
# --------------------------------------------------------------------------------
# sub commands
# --------------------------------------------------------------------------------
def ids(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.instance_id)
sub_parser = sub_parsers.add_parser('ids')
sub_parser.set_defaults(target='ids')
sub_parser.set_defaults(func=ids)
sub_parser.add_argument('--name')
def names(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
name = ''
for tag in instance.tags:
if tag['Key'] == 'Name':
name = tag['Value']
break
print(name)
sub_parser = sub_parsers.add_parser('names')
sub_parser.set_defaults(target='names')
sub_parser.set_defaults(func=names)
def status(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
name = getInstanceName(instance)
print("id : {0}, {2} ({1})".format(
instance.instance_id,
name,
instance.state['Name']
))
sub_parser = sub_parsers.add_parser('status')
sub_parser.set_defaults(target='status')
sub_parser.set_defaults(func=status)
def ip_pub(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.public_ip_address)
sub_parser = sub_parsers.add_parser('ip_pub')
sub_parser.set_defaults(target='ip_pub')
sub_parser.add_argument('--name')
sub_parser.set_defaults(func=ip_pub)
def start(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
if ns.name and getInstanceName(instance) in ns.args:
instance.start()
elif instance.id in ns.args:
instance.start()
sub_parser = sub_parsers.add_parser('start')
sub_parser.set_defaults(target='start')
sub_parser.add_argument('--name', action='store_true')
sub_parser.add_argument('args')
sub_parser.set_defaults(func=start)
def stop(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
if ns.name and getInstanceName(instance) in ns.args:
instance.stop()
elif instance.id in ns.args:
instance.stop()
sub_parser = sub_parsers.add_parser('stop')
sub_parser.set_defaults(target='stop')
sub_parser.add_argument('--name', action='store_true')
sub_parser.add_argument('args')
sub_parser.set_defaults(func=stop)
# --------------------------------------------------------------------------------
# main
# --------------------------------------------------------------------------------
def main():
namespace = parser.parse_args()
if namespace.target is not None and namespace.target:
namespace.func(namespace)
else:
parser.print_help()
if __name__ == '__main__':
main()
|
Python
| 0.000003
|
@@ -2437,16 +2437,370 @@
p_pub)%0A%0A
+def ip_pub(ns):%0A ec2 = boto3.resource('ec2')%0A for instance in ec2.instances.all():%0A printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.private_dns_name)%0A%0Asub_parser = sub_parsers.add_parser('priv_dns_name')%0Asub_parser.set_defaults(target='priv_dns_name')%0Asub_parser.add_argument('--name')%0Asub_parser.set_defaults(func=ip_pub)%0A%0A
def star
|
83a0f135e806cd901bfc62eddda008d0c295feaa
|
fix regexes
|
dictionaria/util.py
|
dictionaria/util.py
|
# coding: utf8
from __future__ import unicode_literals
from collections import OrderedDict, defaultdict
import re
from clldutils.text import truncate_with_ellipsis
from clldutils.misc import UnicodeMixin
from clld.db.models import common
from clld.db.meta import DBSession
from bs4 import BeautifulSoup
from clldmpg import cdstar
from clld.web.util.helpers import link
from clld.web.util.htmllib import HTML, escape
from clld.web.util import concepticon
assert cdstar and link
MULT_VALUE_SEP = ' ; '
MARKDOWN_LINK_PATTERN = re.compile(r'\[(?P<label>[^\]]+)\]\((?P<uid>[^)]+)\)')
def last_first(contrib):
if contrib.id == 'baezgabriela':
return '{0}, {1}'.format(
' '.join(contrib.name.split()[1:]),
contrib.name.split()[0])
return contrib.last_first()
def add_unit_links(req, contrib, text):
res, pos = [], 0
for m in MARKDOWN_LINK_PATTERN.finditer(text):
if m.start() > pos:
res.append(escape(text[pos:m.start()]))
res.append(HTML.a(
m.group('label'),
href=req.route_url('unit', id='{0}-{1}'.format(contrib.id, m.group('uid')))))
pos = m.end()
if pos < len(text):
res.append(escape(text[pos:]))
return HTML.span(*res)
def drop_unit_links(text):
return MARKDOWN_LINK_PATTERN.sub(lambda m: m.group('label'), text)
def add_links2(sid, ids, desc, type_):
if not desc:
return
if not ids:
return desc
p = re.compile(
'((?<=\W)|^)(?P<id>{0})(?=\W|$)'.format('|'.join(re.escape(id_) for id_ in ids if id_)),
flags=re.MULTILINE)
return p.sub(lambda m: '{0}'.format(Link(sid + '-' + m.group('id'), type_)), desc)
def unit_detail_html(request=None, context=None, **kw):
labels = {}
for type_, cls in [('source', common.Source), ('unit', common.Unit)]:
labels[type_] = defaultdict(set)
for r in DBSession.query(cls.id):
sid, _, lid = r[0].partition('-')
labels[type_][sid].add(lid)
res = {}
for k, v in context.datadict().items():
if k.endswith('_links'):
v = v.replace('<', '<').replace('>', '>')
for type_ in ['source', 'unit']:
v = add_links2(
context.dictionary.id, labels[type_][context.dictionary.id], v, type_)
res[k.replace('_links', '')] = add_links(request, v)
return dict(links=res)
def truncate(s):
return truncate_with_ellipsis(s, width=70)
def split(s):
return [ss.strip() for ss in s.split(MULT_VALUE_SEP) if ss.strip()]
def join(iterable):
return MULT_VALUE_SEP.join(iterable)
def concepticon_link(request, meaning):
return concepticon.link(request, meaning.concepticon_url.split('/')[-1])
class Link(UnicodeMixin):
def __init__(self, id, type):
self.id = id
self.type = type
def __unicode__(self):
return '**{0.type}:{0.id}**'.format(self)
def sub(self, s, req, labels=None):
if not labels:
cls = getattr(common, self.type.capitalize())
labels = {r[0]: r[1] for r in DBSession.query(cls.id, cls.name)}
def _repl(m):
if m.group('id') in labels:
return '<a href="{0}">{1}</a>'.format(
req.route_url(self.type, id=m.group('id')), labels[m.group('id')])
return m.string
return re.sub('\*\*{0}:(?P<id>[^*]+)\*\*'.format(self.type), _repl, s)
def add_links(req, s):
for type_ in ['source', 'unit']:
s = Link(None, type_).sub(s, req)
return s
def toc(soup):
def link(id_, label):
return HTML.a(label, href='#{0}'.format(id_))
def toplink(html):
a = html.new_tag(
'a',
href='#top',
title='go to top of the page',
style="vertical-align: bottom")
a.string = '⇫'
return a
def permalink(html, id_):
a = html.new_tag(
'a',
**{
'href': '#{0}'.format(id_),
'title': 'Permalink to this headline',
'class': "headerlink"})
a.string = '¶'
return a
toc_, count = [], 0
for d in soup.descendants:
if d.name in ['h1', 'h2', 'h3', 'h4', 'h5']:
count += 1
id_ = 'section{0}'.format(count)
toc_.append((id_, int(d.name[1:]), d.get_text()))
d.insert(0, soup.new_tag('a', id=id_))
d.append(toplink(soup))
d.append(permalink(soup, id_))
if toc_:
top_level = min(t[1] for t in toc_)
nt = OrderedDict()
curr = []
for id_, level, label in toc_:
if level == top_level:
curr = nt[(id_, label)] = []
elif level == top_level + 1:
curr.append((id_, label))
toc_ = HTML.ul(*[HTML.li(link(*t), HTML.ul(*[HTML.li(link(*tt)) for tt in ns]))
for t, ns in nt.items()])
else:
toc_ = ''
return '{0}'.format(soup), toc_
|
Python
| 0.999586
|
@@ -1,59 +1,4 @@
-# coding: utf8%0Afrom __future__ import unicode_literals%0A
from
@@ -1424,16 +1424,17 @@
+r
'((?%3C=%5CW
@@ -3334,16 +3334,17 @@
re.sub(
+r
'%5C*%5C*%7B0%7D
|
107b72da8629d97452dc6b7ee0f44eeb7d9e351c
|
remove x-based matplotlib rendering
|
gamma_limits_sensitivity/__init__.py
|
gamma_limits_sensitivity/__init__.py
|
'''
This is the hard working code in order to calculate ULs, sensitivities,
and time to detections.
'''
import matplotlib.pyplot as plt
def upper_limit(N_on, N_off, alpha, l_lim, A_eff):
figures = [plt.figure()]
dictionary = {
'plots': figures
}
return dictionary
def sensitivity(s_bg, alpha, t_obs, A_eff):
figures = [plt.figure()]
dictionary = {
'plots': figures
}
return dictionary
def predict(s_bg, alpha, f_0, df_0, Gamma, dGamma, E_0, A_eff):
figures = [plt.figure()]
times = [1., 2., 3.]
dictionary = {
'times': times,
'plots': figures
}
return dictionary
|
Python
| 0.000001
|
@@ -129,16 +129,37 @@
as plt%0A
+matplotlib.use('Agg')
%0A%0Adef up
|
2a67ef989fa79aefccb9bcefe543715346642f91
|
Normalize time to UTC.
|
abusehelper/contrib/autoshun/autoshunbot.py
|
abusehelper/contrib/autoshun/autoshunbot.py
|
import idiokit
from abusehelper.core import utils, cymruwhois, bot, events
AUTOSHUN_CSV_URL = "http://www.autoshun.org/files/shunlist.csv"
class AutoshunBot(bot.PollingBot):
COLUMNS = ["ip", "time", "type"]
feed_url = bot.Param(default=AUTOSHUN_CSV_URL)
use_cymru_whois = bot.BoolParam(default=True)
def poll(self):
pipe = self._poll(url=self.feed_url)
if self.use_cymru_whois:
pipe = pipe | cymruwhois.augment("ip")
return pipe | self._normalize()
@idiokit.stream
def _poll(self, url):
self.log.info("Downloading %s" % url)
try:
info, fileobj = yield utils.fetch_url(url)
except utils.FetchUrlFailed, fuf:
self.log.error("Download failed: %r", fuf)
idiokit.stop()
self.log.info("Downloaded")
# Skip first line
fileobj.readline()
yield utils.csv_to_events(fileobj,
columns=self.COLUMNS,
charset=info.get_param("charset"))
@idiokit.stream
def _normalize(self):
while True:
event = yield idiokit.next()
event.add("feed", "autoshun")
event.add("source url", self.feed_url)
yield idiokit.send(event)
if __name__ == "__main__":
AutoshunBot.from_command_line().execute()
|
Python
| 0.960634
|
@@ -8,16 +8,53 @@
idiokit%0A
+import time as _time%0Aimport calendar%0A
from abu
@@ -1286,44 +1286,503 @@
rl)%0A
- yield idiokit.send(event
+%0A times = event.values(%22time%22)%0A event.clear(%22time%22)%0A for time in times:%0A event.add(%22time%22, self._normalize_time(time))%0A yield idiokit.send(event)%0A%0A def _normalize_time(self, time):%0A parsed = _time.strptime(time, %22%25Y-%25m-%25d %25H:%25M:%25S%22)%0A seconds = calendar.timegm(parsed)%0A seconds += 5 * 3600 # UTC-5 to UTC%0A time_tuple = _time.gmtime(seconds)%0A return _time.strftime(%22%25Y-%25m-%25d %25H:%25M:%25S UTC%22, time_tuple
)%0A%0Ai
|
85e5957103d2bd796a657a8d9d75d862258c7c1e
|
Save .npy with str instead of int
|
generate_adjacency_matrix_one_npy.py
|
generate_adjacency_matrix_one_npy.py
|
import numpy as np
import argparse
from matrix import *
from multiprocessing.pool import ThreadPool
from ctypes import c_char_p
import multiprocessing as mp
import sys
import time
import math
import string
def parse_args():
'''
Parses arguments.
'''
parser = argparse.ArgumentParser(description="Produce adjacency matrix")
parser.add_argument('--edges', nargs='?', default='net_youtube.txt',
help='Input edges')
parser.add_argument('--undirected', nargs='?', default=True,
help='')
parser.add_argument('--output', nargs='?', default='adjacency_matrix.txt',
help='output file path')
return parser.parse_args()
def get_adj_vec_num_str(n_i, l_i):
node = nodes[n_i]
indexs = (edges[:,0] == node)
neighbors = edges[indexs,1]
num = 0
for neighbor in neighbors:
num = num + pow_value[alias[neighbor]]
num_str = str(num)
line_length = len(num_str)
lines_lens[l_i] = line_length
lines_proxy[l_i][0:line_length] = num_str
return 0
def get_adj_vec_num_str_undirected(n_i, l_i):
node = nodes[n_i]
to_indexs = (edges[:,0] == node)
from_indexs = (edges[:,1] == node)
to_neighbors = edges[to_indexs,1]
from_neighbors = edges[from_indexs,0]
neighbors = np.unique(np.concatenate((to_neighbors,from_neighbors)))
num = 0
for neighbor in neighbors:
num = num + pow_value[alias[neighbor]]
num_str = str(num)
line_length = len(num_str)
lines_lens[l_i] = line_length
lines_proxy[l_i][0:line_length] = num_str
return 0
def initProcessForVec(ori_nodes, ori_edges, ori_size, ori_length, lines, lines_lengths):
global nodes
global edges
global alias
global pow_value
global size
global length
global pow_value
global lines_proxy
global empty_line
global lines_lens
lines_lens = lines_lengths
lines_proxy = lines
nodes = np.array(ori_nodes)
edges = np.array(ori_edges).reshape([ori_length, 2])
size = ori_size
length = ori_length
alias = dict(zip(nodes,range(len(nodes))))
pow_value = [(pow(2,x)) for x in range(size)]
def parse_to_adj_with_decimal(args):
print "Process begin. Read edges from %s" % args.edges
sys.stdout.flush()
start = time.time()
edges = parse_to_matrix(args.edges, data_type=int)
print "Finish Reading edges by %s secs." % (time.time() - start)
sys.stdout.flush()
output = open(args.output,'w')
output.truncate()
output.close()
length = len(edges)
shared_edges = mp.Array('d',(length * 2))
edges_buffer = np.frombuffer(shared_edges.get_obj())
edges_buffer[...] = edges.reshape(length*2,)
nodes = np.unique(edges)
size = len(nodes)
shared_nodes = mp.Array('d',size)
nodes_buffer = np.frombuffer(shared_nodes.get_obj())
nodes_buffer[...] = nodes
part_num = 5
if size < 10000:
part_num = 1
part_len = int(math.ceil(size/float(part_num)))
line_len = size/3 + 1 + len(str(size))
lines = [mp.Array('c', line_len) for i in range(part_len)]
lines_lengths = mp.Array('d', part_len)
start = time.time()
print "Init Adjacency Vectors processes ..."
sys.stdout.flush()
pool = mp.Pool(processes=20, initializer=initProcessForVec, initargs=(shared_nodes, shared_edges, size, length, lines, lines_lengths))
print "Init complete by %s secs." % (time.time() - start)
sys.stdout.flush()
print "The whole process is splitted into %d parts." % part_num
sys.stdout.flush()
node_index = 0
final_adj_decimal = []
for p_i in range(part_num):
print "The %d process begins ..." % (p_i + 1)
sys.stdout.flush()
start_index = p_i*part_len
end_index = min(start_index + part_len, size)
curr_len = end_index - start_index
vec_result = []
if args.undirected:
for i in range(curr_len):
task = pool.apply_async(get_adj_vec_num_str_undirected, args=(node_index + i, i))
vec_result.append(task)
else:
for i in range(curr_len):
task = pool.apply_async(get_adj_vec_num_str, args=(node_index + i, i))
vec_result.append(task)
count = 0
bound = curr_len / float(10000)
print "Begin calculate adjacency ... "
sys.stdout.flush()
start = time.time()
for n_i in range(curr_len):
if n_i >= count * bound:
sys.stdout.write("Process reach %.2f%% \r" % (count/float(100)))
sys.stdout.flush()
count = count + 1
vec_result[n_i].get()
print "Finish Computing by %s secs." % (time.time() - start)
sys.stdout.flush()
print "Begin parsing adjacency string... "
sys.stdout.flush()
start = time.time()
for l_i in range(curr_len):
pair_for_adj = []
pair_for_adj.append(nodes[node_index + l_i])
pair_for_adj.append(int(lines[l_i][:int(lines_lengths[l_i])]))
final_adj_decimal.append(pair_for_adj)
print "Parsing finish by %s secs." % (time.time() - start)
sys.stdout.flush()
node_index = node_index + curr_len
print "Begin writing adjacency ... "
sys.stdout.flush()
start = time.time()
np.save(args.output,np.array(final_adj_decimal))
print "Writing finish. Written into %s by %s secs." % (args.output, time.time() - start)
sys.stdout.flush()
pool.close()
pool.join()
args = parse_args()
parse_to_adj_with_decimal(args)
|
Python
| 0
|
@@ -4542,20 +4542,16 @@
.append(
-int(
lines%5Bl_
@@ -4579,17 +4579,16 @@
%5Bl_i%5D)%5D)
-)
%0A%09%09%09fina
|
86af85e46b0b313ecd0804916539d18556fba84a
|
Use api.constrains
|
account_analytic_required/models/account.py
|
account_analytic_required/models/account.py
|
# -*- coding: utf-8 -*-
# © 2011 Akretion
# © 2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from openerp import _, api, fields, models
from openerp.tools import float_is_zero
class AccountAccountType(models.Model):
_inherit = "account.account.type"
@api.model
def _get_policies(self):
"""This is the method to be inherited for adding policies"""
return [('optional', 'Optional'),
('always', 'Always'),
('never', 'Never')]
analytic_policy = fields.Selection(
_get_policies,
'Policy for analytic account',
required=True,
default='optional',
help="Set the policy for analytic accounts : if you select "
"'Optional', the accountant is free to put an analytic account "
"on an account move line with this type of account ; if you "
"select 'Always', the accountant will get an error message if "
"there is no analytic account ; if you select 'Never', the "
"accountant will get an error message if an analytic account "
"is present.")
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
@api.model
def _get_analytic_policy(self, account):
""" Extension point to obtain analytic policy for an account """
return account.user_type_id.analytic_policy
@api.multi
def _check_analytic_required_msg(self):
for move_line in self:
prec = move_line.company_currency_id.rounding
if (float_is_zero(move_line.debit, precision_rounding=prec) and
float_is_zero(move_line.credit, precision_rounding=prec)):
continue
analytic_policy = self._get_analytic_policy(move_line.account_id)
if (analytic_policy == 'always' and
not move_line.analytic_account_id):
return _("Analytic policy is set to 'Always' with account "
"%s '%s' but the analytic account is missing in "
"the account move line with label '%s'."
) % (move_line.account_id.code,
move_line.account_id.name,
move_line.name)
elif (analytic_policy == 'never' and
move_line.analytic_account_id):
return _("Analytic policy is set to 'Never' with account %s "
"'%s' but the account move line with label '%s' "
"has an analytic account %s '%s'."
) % (move_line.account_id.code,
move_line.account_id.name,
move_line.name,
move_line.analytic_account_id.code,
move_line.analytic_account_id.name)
@api.multi
def _check_analytic_required(self):
return not self._check_analytic_required_msg()
_constraints = [(_check_analytic_required,
_check_analytic_required_msg,
['analytic_account_id', 'account_id', 'debit', 'credit'])]
|
Python
| 0.000001
|
@@ -149,16 +149,28 @@
_, api,
+ exceptions,
fields,
@@ -2926,131 +2926,166 @@
api.
-multi%0A def _check_analytic_required(self):%0A return not self._check_analytic_required_msg()%0A%0A _constraints = %5B(
+constrains('analytic_account_id', 'account_id', 'debit', 'credit')%0A def _check_analytic_required(self):%0A for rec in self:%0A message = rec.
_che
@@ -3104,22 +3104,23 @@
required
-,%0A
+_msg()%0A
@@ -3127,119 +3127,74 @@
- _check_analytic_required_msg,%0A %5B'analytic_account_id', 'account_id', 'debit', 'credit'%5D)%5D
+if message:%0A raise exceptions.ValidationError(message)
%0A
|
ed45fb28c75e0c8b942425420fef05c6e503da62
|
Remove last LEDGER_INDEX_INFO reference
|
corehq/pillows/ledger.py
|
corehq/pillows/ledger.py
|
from collections import namedtuple
from functools import lru_cache
from pillowtop.checkpoints.manager import (
get_checkpoint_for_elasticsearch_pillow,
)
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors import PillowProcessor
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import (
KafkaChangeFeed,
KafkaCheckpointEventHandler,
)
from corehq.apps.export.models.new import LedgerSectionEntry
from corehq.apps.locations.models import SQLLocation
from corehq.form_processor.utils.general import should_use_sql_backend
from corehq.pillows.mappings.ledger_mapping import LEDGER_INDEX_INFO
from corehq.util.quickcache import quickcache
@quickcache(['case_id'])
def _location_id_for_case(case_id):
try:
return SQLLocation.objects.get(supply_point_id=case_id).location_id
except SQLLocation.DoesNotExist:
return None
def _get_daily_consumption_for_ledger(ledger):
from corehq.apps.commtrack.consumption import get_consumption_for_ledger_json
daily_consumption = get_consumption_for_ledger_json(ledger)
if should_use_sql_backend(ledger['domain']):
from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL
ledger_value = LedgerAccessorSQL.get_ledger_value(
ledger['case_id'], ledger['section_id'], ledger['entry_id']
)
ledger_value.daily_consumption = daily_consumption
LedgerAccessorSQL.save_ledger_values([ledger_value])
else:
from corehq.apps.commtrack.models import StockState
StockState.objects.filter(pk=ledger['_id']).update(daily_consumption=daily_consumption)
return daily_consumption
def _update_ledger_section_entry_combinations(ledger):
current_combos = _get_ledger_section_combinations(ledger['domain'])
if (ledger['section_id'], ledger['entry_id']) in current_combos:
return
# use get_or_create because this may be created by another parallel process
LedgerSectionEntry.objects.get_or_create(
domain=ledger['domain'],
section_id=ledger['section_id'],
entry_id=ledger['entry_id'],
)
# clear the lru_cache so that next time a ledger is saved, we get the combinations
_get_ledger_section_combinations.cache_clear()
@lru_cache()
def _get_ledger_section_combinations(domain):
return list(LedgerSectionEntry.objects.filter(domain=domain).values_list('section_id', 'entry_id').all())
class LedgerProcessor(PillowProcessor):
def process_change(self, change):
ledger = change.get_document()
from corehq.apps.commtrack.models import CommtrackConfig
commtrack_config = CommtrackConfig.for_domain(ledger['domain'])
if commtrack_config and commtrack_config.use_auto_consumption:
daily_consumption = _get_daily_consumption_for_ledger(ledger)
ledger['daily_consumption'] = daily_consumption
if not ledger.get('location_id') and ledger.get('case_id'):
ledger['location_id'] = _location_id_for_case(ledger['case_id'])
_update_ledger_section_entry_combinations(ledger)
def get_ledger_to_elasticsearch_pillow(pillow_id='LedgerToElasticsearchPillow', num_processes=1,
process_num=0, **kwargs):
"""
This pillow's id references Elasticsearch, but it no longer saves to ES.
It has been kept to keep the checkpoint consistent, and can be changed at any time.
"""
assert pillow_id == 'LedgerToElasticsearchPillow', 'Pillow ID is not allowed to change'
IndexInfo = namedtuple('IndexInfo', ['index'])
checkpoint = get_checkpoint_for_elasticsearch_pillow(
pillow_id, IndexInfo("ledgers_2016-03-15"), [topics.LEDGER]
)
change_feed = KafkaChangeFeed(
topics=[topics.LEDGER], client_id='ledgers-to-es', num_processes=num_processes, process_num=process_num
)
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=change_feed,
processor=LedgerProcessor(),
change_processed_event_handler=KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100, change_feed=change_feed
),
)
|
Python
| 0.000001
|
@@ -599,77 +599,8 @@
end%0A
-from corehq.pillows.mappings.ledger_mapping import LEDGER_INDEX_INFO%0A
from
|
a7973885ac792da0a369615b0c0240f491b01ef5
|
PYCBC-1047 Ping test fails on all platforms
|
couchbase/diagnostics.py
|
couchbase/diagnostics.py
|
from abc import abstractmethod
from typing import Optional, Mapping, Union, Any
from enum import Enum
from couchbase_core import JSON
from datetime import timedelta
from couchbase.exceptions import InvalidArgumentException
import json
import copy
class EndpointState(Enum):
Disconnected = "disconnected"
Connecting = "connecting"
Connected = "connected"
Disconnecting = "disconnecting"
class ClusterState(Enum):
Online = "online"
Degraded = "degraded"
Offline = "offline"
class ServiceType(Enum):
View = "views"
KeyValue = "kv"
Query = "n1ql"
Search = "fts"
Analytics = "cbas"
Config = "config"
Management = "mgmt"
class PingState(Enum):
OK = 'ok'
TIMEOUT = 'timeout'
class EndPointDiagnostics(object):
def __init__(self, # type: EndPointDiagnostics
service_type, # type: ServiceType
raw_endpoint # type: JSON
):
self._raw_endpoint = raw_endpoint
self._raw_endpoint['type'] = service_type.value
@property
def type(self):
# type: (...) -> ServiceType
return ServiceType(self._raw_endpoint.get('type'))
@property
def id(self):
# type: (...) -> str
return self._raw_endpoint.get('id')
@property
def local(self):
# type: (...) -> str
return self._raw_endpoint.get('local')
@property
def remote(self):
# type: (...) -> str
return self._raw_endpoint.get('remote')
@property
def last_activity(self):
# type: (...) -> timedelta
return timedelta(microseconds=self._raw_endpoint.get('last_activity_us'))
@property
def namespace(self):
# type: (...) -> str
return self._raw_endpoint.get('scope')
@property
def state(self):
# type: (...) -> EndpointState
return EndpointState(self._raw_endpoint.get('status'))
def as_dict(self):
# type: (...) -> dict
return self._raw_endpoint
def as_json(self):
# type: (...) -> str
return json.dumps(self.as_dict())
class DiagnosticsResult(object):
def __init__(self, # type: DiagnosticsResult
source_diagnostics # type: Union[Mapping[str,Any], list[Mapping[str,Any]]]
):
self._id = self._version = self._sdk = self._endpoints = None
# we could have an array of dicts, or just a single dict
if isinstance(source_diagnostics, dict):
source_diagnostics = [source_diagnostics]
if not isinstance(source_diagnostics, list):
raise InvalidArgumentException("DiagnosticsResult expects a dict or list(dict)")
for d in source_diagnostics:
self.append_endpoints(d)
def as_json(self):
# type: (...) -> str
return_val = copy.deepcopy(self.__dict__)
for k, val in return_val['_endpoints'].items():
json_vals=[]
for v in val:
json_vals.append(v.as_dict())
return_val['_endpoints'][k] = json_vals
return_val['_endpoints'] = {k.value: v for k, v in return_val['_endpoints'].items()}
return json.dumps(return_val)
def append_endpoints(self, source_diagnostics):
# type: (...) -> None
# now the remaining keys are the endpoints...
self._id = source_diagnostics.pop('id', None)
self._version = source_diagnostics.pop('version', None)
self._sdk = source_diagnostics.pop('sdk', None)
if not self._endpoints:
self._endpoints = dict()
for k, v in source_diagnostics.items():
# construct an endpointpingreport for each
k = ServiceType(k)
endpoints = self._endpoints.get(k, list())
for value in v:
endpoints.append(EndPointDiagnostics(k, value))
self._endpoints[k] = endpoints
@property
def id(self):
# type: (...) -> str
return self._id
@property
def version(self):
# type: (...) -> int
return self._version
@property
def sdk(self):
# type: (...) -> str
return self._sdk
@property
def endpoints(self):
# type: (...) -> Mapping[ServiceType, list[EndPointDiagnostics]]
return self._endpoints
@property
def state(self):
# type: (...)-> ClusterState
num_found = 0
num_connected = 0
for k, v in self._endpoints.items():
for endpoint in v:
num_found += 1
if endpoint.state == EndpointState.Connected:
num_connected += 1
if num_found == num_connected:
return ClusterState.Online
if num_connected > 0 :
return ClusterState.Degraded
return ClusterState.Offline
class EndpointPingReport(object):
def __init__(self,
service_type, # type: ServiceType
source # type: Mapping[str, Any]
):
self._src_ping = source
self._src_ping['service_type'] = service_type
@property
def service_type(self):
# type: (...) -> ServiceType
return self._src_ping.get('service_type', None)
@property
def id(self):
# type: (...) -> str
return self._src_ping.get('id', None)
@property
def local(self):
# type: (...) -> str
return self._src_ping.get('local', None)
@property
def remote(self):
# type: (...) -> str
return self._src_ping.get('remote', None)
@property
def namespace(self):
# type: (...) -> str
# TODO: check if LCB will update this to namespace (like java)
return self._src_ping.get('scope', None)
@property
def latency(self):
# type: (...) -> timedelta
return timedelta(microseconds=self._src_ping.get('latency_us', None))
@property
def state(self):
# type: (...) -> PingState
return PingState(self._src_ping.get('status', None))
|
Python
| 0.995142
|
@@ -5661,68 +5661,36 @@
#
-TODO: check if LCB will update this to namespace (like java)
+was 'scope', now 'namespace'
%0A
@@ -5692,32 +5692,64 @@
'%0A return
+ self._src_ping.get('namespace',
self._src_ping.
@@ -5758,32 +5758,33 @@
t('scope', None)
+)
%0A%0A @property%0A
|
19afe973bffe1bb90942757fcbf81f3630ffddda
|
Update code formatting.
|
crawler/args.py
|
crawler/args.py
|
#!/usr/bin/env python3
# chameleon-crawler
#
# Copyright 2015 ghostwords.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from os import path
import argparse
def is_valid_file(f, parser):
if path.isfile(f):
return f
raise argparse.ArgumentTypeError("%s does not exist!" % f)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("crx", metavar='CHAMELEON_CRX_FILE_PATH',
type=lambda x: is_valid_file(x, parser),
help="path to Chameleon CRX package")
group = parser.add_mutually_exclusive_group()
group.add_argument("--headless", action="store_true", default=True,
help="use a virtual display (default)")
group.add_argument("--no-headless", dest='headless', action="store_false")
parser.add_argument("-n", dest='num_crawlers', type=int,
choices=range(1, 9), default=4,
help="how many browsers to use in parallel "
"(default: %(default)s)")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="turn off standard output")
parser.add_argument("-t", "--timeout", metavar='SECONDS',
type=int, default=20,
help="how many seconds to wait for pages to finish "
"loading before timing out (default: %(default)s)")
parser.add_argument("--urls", metavar='URL_FILE_PATH',
type=argparse.FileType('r'), default='urls.txt',
help="path to URL list file (default: %(default)s)")
return parser.parse_args()
|
Python
| 0
|
@@ -526,16 +526,25 @@
rgument(
+%0A
%22crx%22, m
@@ -581,28 +581,24 @@
H',%0A
-
type=lambda
@@ -626,36 +626,32 @@
arser),%0A
-
help=%22path to Ch
@@ -670,16 +670,21 @@
package%22
+%0A
)%0A%0A g
@@ -751,16 +751,25 @@
rgument(
+%0A
%22--headl
@@ -809,20 +809,16 @@
t=True,%0A
-
@@ -855,16 +855,21 @@
efault)%22
+%0A
)%0A gr
@@ -966,16 +966,25 @@
rgument(
+%0A
%22-n%22, de
@@ -1012,20 +1012,16 @@
pe=int,%0A
-
@@ -1052,36 +1052,32 @@
ault=4,%0A
-
help=%22how many b
@@ -1105,24 +1105,16 @@
allel %22%0A
-
@@ -1129,32 +1129,37 @@
t: %25(default)s)%22
+%0A
)%0A%0A parser.ad
@@ -1169,16 +1169,25 @@
rgument(
+%0A
%22-q%22, %22-
@@ -1235,28 +1235,24 @@
se,%0A
-
help=%22turn o
@@ -1270,16 +1270,21 @@
output%22
+%0A
)%0A%0A p
@@ -1302,16 +1302,25 @@
rgument(
+%0A
%22-t%22, %22-
@@ -1353,28 +1353,24 @@
S',%0A
-
type=int, de
@@ -1379,20 +1379,16 @@
ult=20,%0A
-
@@ -1440,24 +1440,16 @@
inish %22%0A
-
@@ -1494,24 +1494,29 @@
(default)s)%22
+%0A
)%0A%0A parse
@@ -1530,16 +1530,25 @@
rgument(
+%0A
%22--urls%22
@@ -1570,28 +1570,24 @@
FILE_PATH',%0A
-
type
@@ -1635,28 +1635,24 @@
t',%0A
-
-
help=%22path t
@@ -1690,16 +1690,21 @@
ault)s)%22
+%0A
)%0A%0A r
|
bbbf133b1da08b851bbae1409b8dd5696eaa1187
|
Remove unused imports in tests.app.soc.modules.gsoc.views.test_profile.
|
tests/app/soc/modules/gsoc/views/test_profile.py
|
tests/app/soc/modules/gsoc/views/test_profile.py
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user profile related views.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from tests.timeline_utils import TimelineHelper
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class ProfileViewTest(DjangoTestCase):
"""Tests user profile views.
"""
def setUp(self):
self.init()
def assertProfileTemplatesUsed(self, response):
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/profile/base.html')
self.assertTemplateUsed(response, 'v2/modules/gsoc/_form.html')
def testCreateProfilePage(self):
self.timeline.studentSignup()
url = '/gsoc/profile/student/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertProfileTemplatesUsed(response)
def testCreateMentorProfilePage(self):
self.timeline.studentSignup()
url = '/gsoc/profile/mentor/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertProfileTemplatesUsed(response)
def testRedirectWithStudentProfilePage(self):
self.timeline.studentSignup()
self.data.createStudent()
url = '/gsoc/profile/student/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertResponseRedirect(response)
def testForbiddenWithStudentProfilePage(self):
self.timeline.studentSignup()
self.data.createStudent()
url = '/gsoc/profile/mentor/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertResponseForbidden(response)
def testForbiddenWithMentorProfilePage(self):
self.timeline.studentSignup()
self.data.createMentor(self.org)
url = '/gsoc/profile/student/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertResponseForbidden(response)
def testEditProfilePage(self):
self.timeline.studentSignup()
self.data.createProfile()
url = '/gsoc/profile/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertResponseOK(response)
def testCreateProfile(self):
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.profile import GSoCStudentInfo
self.timeline.studentSignup()
self.data.createUser()
suffix = "%(program)s" % {
'program': self.gsoc.key().name(),
}
role_suffix = "%(role)s/%(suffix)s" % {
'role': 'student',
'suffix': suffix,
}
url = '/gsoc/profile/' + suffix
role_url = '/gsoc/profile/' + role_suffix
# we do not want to seed the data in the datastore, we just
# want to get the properties generated for seeding. The post
# test will actually do the entity creation, so we reuse the
# seed_properties method from the seeder to get the most common
# values for Profile and StudentInfo
postdata = seeder_logic.seed_properties(GSoCProfile)
props = seeder_logic.seed_properties(GSoCStudentInfo, properties={
'tax_form': None,
'enrollment_form': None,
})
props.pop('tax_form')
props.pop('enrollment_form')
postdata.update(props)
postdata.update({
'link_id': self.data.user.link_id,
'student_info': None,
'user': self.data.user, 'parent': self.data.user,
'scope': self.gsoc, 'status': 'active',
'email': self.data.user.account.email(),
'mentor_for': [], 'org_admin_for': [],
'is_org_admin': False, 'is_mentor': False,
})
response = self.post(role_url, postdata)
self.assertResponseRedirect(response, url+'?validated')
# hacky
profile = GSoCProfile.all().get()
profile.delete()
postdata.update({
'email': 'somerandominvalid@emailid',
})
response = self.post(role_url, postdata)
# yes! this is the protocol for form posts. We get an OK response
# with the response containing the form's GET request page whenever
# the form has an error and could not be posted. This is the architecture
# chosen in order to save the form error state's while rendering the
# error fields.
self.assertResponseOK(response)
error_dict = response.context['error']
self.assertTrue('email' in error_dict)
|
Python
| 0
|
@@ -722,24 +722,8 @@
%5D%0A%0A%0A
-import httplib%0A%0A
from
@@ -789,106 +789,8 @@
ic%0A%0A
-from tests.timeline_utils import TimelineHelper%0Afrom tests.profile_utils import GSoCProfileHelper%0A
from
@@ -4227,17 +4227,19 @@
nse, url
-+
+ +
'?valida
|
696b4e093171e9d6f17502650f15c9299438b874
|
Drop Py2 and six on tests/integration/modules/test_virtualenv_mod.py
|
tests/integration/modules/test_virtualenv_mod.py
|
tests/integration/modules/test_virtualenv_mod.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import tempfile
import salt.utils.path
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from tests.support.case import ModuleCase
from tests.support.helpers import slowTest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
@skipIf(
salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, "virtualenv not installed"
)
class VirtualenvModuleTest(ModuleCase):
"""
Validate the virtualenv module
"""
def setUp(self):
super(VirtualenvModuleTest, self).setUp()
self.venv_test_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.venv_dir = os.path.join(self.venv_test_dir, "venv")
@slowTest
def test_create_defaults(self):
"""
virtualenv.managed
"""
self.run_function("virtualenv.create", [self.venv_dir])
pip_file = os.path.join(self.venv_dir, "bin", "pip")
self.assertTrue(os.path.exists(pip_file))
@slowTest
def test_site_packages(self):
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
self.run_function(
"virtualenv.create", [self.venv_dir], system_site_packages=True
)
with_site = self.run_function("pip.freeze", bin_env=pip_bin)
self.run_function("file.remove", [self.venv_dir])
self.run_function("virtualenv.create", [self.venv_dir])
without_site = self.run_function("pip.freeze", bin_env=pip_bin)
self.assertFalse(with_site == without_site)
@slowTest
def test_clear(self):
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
self.run_function("virtualenv.create", [self.venv_dir])
self.run_function("pip.install", [], pkgs="pep8", bin_env=pip_bin)
self.run_function("virtualenv.create", [self.venv_dir], clear=True)
packages = self.run_function("pip.list", prefix="pep8", bin_env=pip_bin)
self.assertFalse("pep8" in packages)
def test_virtualenv_ver(self):
ret = self.run_function("virtualenv.virtualenv_ver", [self.venv_dir])
assert isinstance(ret, list)
assert all([isinstance(x, int) for x in ret])
def tearDown(self):
self.run_function("file.remove", [self.venv_test_dir])
|
Python
| 0
|
@@ -1,102 +1,4 @@
-# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import, print_function, unicode_literals%0A%0A
impo
@@ -503,34 +503,8 @@
per(
-VirtualenvModuleTest, self
).se
|
d8b144c3142534714ecff90cf88749a6b8ed347d
|
Remove django 1.8 workaround no longer needed
|
pucas/management/commands/createcasuser.py
|
pucas/management/commands/createcasuser.py
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from pucas.ldap import LDAPSearch, LDAPSearchException, \
user_info_from_ldap
class Command(BaseCommand):
help = 'Initialize a new CAS user account'
def add_arguments(self, parser):
parser.add_argument('netids', nargs='+')
parser.add_argument(
'--admin',
help='Give the specified user(s) superuser permissions (equivalent to createsuperuser)',
action='store_true',
default=False
)
parser.add_argument(
'--staff',
help='Give the specified user(s) staff permissions',
action='store_true',
default=False
)
def handle(self, *args, **options):
User = get_user_model()
ldap_search = LDAPSearch()
netids = options['netids']
admin = options['admin']
staff = options['staff']
for netid in netids:
try:
# make sure we can find the netid in LDAP first
ldap_search.find_user(netid)
user, created = User.objects.get_or_create(username=netid)
# NOTE: should we re-init data from ldap even if user
# already exists, or error?
user_info_from_ldap(user)
# If admin flag is set, make the user an admin
if admin or staff:
user.is_staff = True
if admin:
user.is_superuser = True
user.save()
self.stdout.write(
self.style_success("%s user '%s'" \
% ('Created' if created else 'Updated', netid)))
except LDAPSearchException:
self.stderr.write(
self.style.ERROR("LDAP information for '%s' not found" \
% netid))
def style_success(self, msg):
# workaround to support django 1.8 - style.SUCCESS
# only added in django 1.9
if hasattr(self.style, 'SUCCESS'):
return self.style.SUCCESS(msg)
else:
return msg
|
Python
| 0
|
@@ -1660,25 +1660,50 @@
lf.style
-_success(
+.SUCCESS(%0A
%22%25s user
@@ -1708,19 +1708,16 @@
er '%25s'%22
- %5C
%0A
@@ -1936,11 +1936,8 @@
und%22
- %5C
%0A
@@ -1961,267 +1961,27 @@
-%25 netid))%0A%0A def style_success(self, msg):%0A # workaround to support django 1.8 - style.SUCCESS%0A # only added in django 1.9%0A if hasattr(self.style, 'SUCCESS'):%0A return self.style.SUCCESS(msg)%0A else:%0A return msg%0A
+ %25 netid))
%0A
|
872320e02d5c922e177434f6b9fa70af8cf822b9
|
Revert "RT-26"
|
wkhtmltopdf/__init__.py
|
wkhtmltopdf/__init__.py
|
# Have to comment this import to perfrom pip install at the same time as django install
# import os
# if 'DJANGO_SETTINGS_MODULE' in os.environ:
# from .utils import *
__author__ = 'Incuna Ltd'
__version__ = '2.0.3'
|
Python
| 0.000001
|
@@ -1,107 +1,14 @@
-# Have to comment this import to perfrom pip install at the same time as django install%0A# import os%0A%0A#
+import os%0A
if '
@@ -50,9 +50,8 @@
on:%0A
-#
|
563a82246180d949917bcd444411bbeb82604e97
|
Add an assertion in search.py
|
recipe_modules/buildbucket/tests/search.py
|
recipe_modules/buildbucket/tests/search.py
|
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import types
from google.protobuf import json_format
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.go.chromium.org.luci.buildbucket.proto import rpc as rpc_pb2
DEPS = [
'buildbucket',
'json',
'properties',
'runtime',
'step'
]
def RunSteps(api):
builds = api.buildbucket.search(
rpc_pb2.BuildPredicate(
gerrit_changes=list(api.buildbucket.build.input.gerrit_changes),
),
limit=api.properties.get('limit'),
)
pres = api.step.active_result.presentation
for b in builds:
pres.logs['build %s' % b.id] = json_format.MessageToJson(b).splitlines()
def GenTests(api):
def test(test_name, tags=None, **req):
return (
api.test(test_name) +
api.runtime(is_luci=True, is_experimental=False) +
api.buildbucket.try_build(
project='chromium',
builder='Builder',
git_repo='https://chromium.googlesource.com/chromium/src',
)
)
yield (
test('basic')
)
yield (
test('two builds') +
api.buildbucket.simulated_search_results([
build_pb2.Build(id=1, status=common_pb2.SUCCESS),
build_pb2.Build(id=2, status=common_pb2.FAILURE),
])
)
yield (
test('search failed') +
api.step_data(
'buildbucket.search',
api.json.output_stream(
json_format.MessageToDict(rpc_pb2.BatchResponse(
responses=[dict(error=dict(message='there was a problem'))],
)),
),
)
)
yield (
test('limit') +
api.properties(limit=5) +
api.buildbucket.simulated_search_results([
build_pb2.Build(id=i+1, status=common_pb2.SUCCESS)
for i in xrange(10)
])
)
|
Python
| 0
|
@@ -556,16 +556,54 @@
s(api):%0A
+ limit = api.properties.get('limit')%0A
builds
@@ -757,40 +757,65 @@
mit=
-api.properties.get('limit'),%0A )
+limit,%0A )%0A assert limit is None or len(builds) %3C= limit
%0A p
@@ -2024,16 +2024,80 @@
UCCESS)%0A
+ # Returning more to test trimming of the returned list.%0A
|
51251817fe1c10d49b27f121c2b826e7f260205d
|
Fix TypeError in get_issues_from_pr in changelog-generator.py
|
tools/changelog-generator/changelog-generator.py
|
tools/changelog-generator/changelog-generator.py
|
from github import Github
from git import Repo
from jinja2 import Environment, FileSystemLoader
import re
import datetime
import click
def get_issues_from_pr(p):
return [m.groups()[0] for m in re.finditer(r"#(\d+)", p.body)]
def generate_topic_list(labels):
r = set()
for l in labels:
if l == 'area/gui':
r.add('gui')
elif l == 'C/NB':
r.add('northbound')
elif l == 'area/api':
r.add('api')
elif l == 'area/config':
r.add('configuration')
elif l == 'C/FL' or l == 'area/floodlight':
r.add('floodlight')
elif l == 'area/docs':
r.add('docs')
elif l == 'area/testing':
r.add('tests')
elif l in ['area/storm',
'C/STATS',
'C/EVENT',
'C/ISLLATENCY',
'C/FLOW',
'C/PORTSTATE',
'C/NBWORKER',
'C/PING',
'C/SWMANAGER',
'C/NETWORK',
'C/ROUTER',
'C/REROUTE']:
r.add('storm-topologies')
return r
def get_pull_request_id(commit):
regex = r"Merge pull request #(\d+) from "
matches = re.finditer(regex, commit.message)
try:
return int(next(m.groups()[0] for m in matches))
except StopIteration:
pass
def lable_filter(label):
return all([label not in ['feature', 'bug', 'bugfix', 'improvement', 'Next Release', 'Ready to Merge'],
not label.startswith('priority/')
])
@click.command()
@click.option('--github-token', required=True, help='token from gh, look https://help.github.com/en/articles/creating'
'-a-personal-access-token-for-the-command-line')
@click.option('--new-version', required=True, help='new version in format 1.2.3')
@click.option('--from-version', help='version of last release in format 1.2.3')
@click.option('--git-rev-list', help='custom revision list, look man git-rev-list')
def main(github_token, new_version, from_version, git_rev_list):
from_ver = from_version
to_ver = new_version
if from_ver is None and git_rev_list is None:
raise click.BadParameter('Must set --from-version or --git-rev-list')
if git_rev_list is None:
git_rev_list = f'v{from_ver}..origin/release-{to_ver}'
repo = Repo(".")
commits = list(repo.iter_commits(git_rev_list, max_count=500, min_parents=2))
pr_id_set = set(map(get_pull_request_id, commits)) - {None}
g = Github(github_token)
gh_repo = g.get_repo("telstra/open-kilda")
pr_list = [gh_repo.get_pull(n) for n in pr_id_set]
features = []
bugs = []
improvements = []
issues = []
components = set()
for v in pr_list:
labels = [l.name for l in v.get_labels()]
components |= {l[2:].lower() for l in labels if l.startswith("C/")}
pr = {
'id': v.number,
'title': v.title,
'labels': sorted([l for l in labels if lable_filter(l)]),
'issues': sorted(get_issues_from_pr(v)),
'topics': sorted(generate_topic_list(labels))
}
if 'feature' in labels:
features.append(pr)
elif 'bug' in labels or 'bugfix' in labels:
bugs.append(pr)
elif 'improvement' in labels:
improvements.append(pr)
else:
issues.append(pr)
env = Environment(
loader=FileSystemLoader('./tools/changelog-generator')
)
template = env.get_template("template.jinja2")
print(template.render(features=features,
improvements=improvements,
issues=issues,
bugs=bugs,
components=components,
from_ver=from_ver,
to_ver=to_ver,
date=datetime.date.today().strftime("%d/%m/%Y")))
if __name__ == '__main__':
main(auto_envvar_prefix='CHANGELOG_GENERATOR')
|
Python
| 0.000001
|
@@ -157,16 +157,86 @@
_pr(p):%0A
+ text = %22%7B%7D%5Cn%7B%7D%22.format(p.title, %22%22 if p.body is None else p.body)%0A
retu
@@ -285,22 +285,20 @@
(%5Cd+)%22,
-p.body
+text
)%5D%0A%0A%0Adef
|
a34357d2221f72bb4a9160d931cd10de4b8eddc2
|
Move dateutil import higher
|
redash/query_runner/google_spreadsheets.py
|
redash/query_runner/google_spreadsheets.py
|
from base64 import b64decode
import json
import logging
import sys
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
from dateutil import parser
enabled = True
except ImportError:
enabled = False
def _load_key(filename):
with open(filename, "rb") as f:
return json.loads(f.read())
def _guess_type(value):
if value == '':
return TYPE_STRING
try:
val = int(value)
return TYPE_INTEGER
except ValueError:
pass
try:
val = float(value)
return TYPE_FLOAT
except ValueError:
pass
if unicode(value).lower() in ('true', 'false'):
return TYPE_BOOLEAN
try:
val = parser.parse(value)
return TYPE_DATETIME
except ValueError:
pass
return TYPE_STRING
def _value_eval_list(value):
value_list = []
for member in value:
if member == '' or member is None:
val = None
value_list.append(val)
continue
try:
val = int(member)
value_list.append(val)
continue
except ValueError:
pass
try:
val = float(member)
value_list.append(val)
continue
except ValueError:
pass
if unicode(member).lower() in ('true', 'false'):
if unicode(member).lower() == 'true':
value_list.append(True)
else:
value_list.append(False)
continue
try:
val = parser.parse(member)
value_list.append(val)
continue
except ValueError:
pass
value_list.append(member)
return value_list
HEADER_INDEX = 0
class WorksheetNotFoundError(Exception):
def __init__(self, worksheet_num, worksheet_count):
message = "Worksheet number {} not found. Spreadsheet has {} worksheets. Note that the worksheet count is zero based.".format(worksheet_num, worksheet_count)
super(WorksheetNotFoundError, self).__init__(message)
def parse_worksheet(worksheet):
if not worksheet:
return {'columns': [], 'rows': []}
column_names = []
columns = []
for j, column_name in enumerate(worksheet[HEADER_INDEX]):
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': TYPE_STRING
})
if len(worksheet) > 1:
for j, value in enumerate(worksheet[HEADER_INDEX+1]):
columns[j]['type'] = _guess_type(value)
rows = [dict(zip(column_names, _value_eval_list(row))) for row in worksheet[HEADER_INDEX + 1:]]
data = {'columns': columns, 'rows': rows}
return data
def parse_spreadsheet(spreadsheet, worksheet_num):
worksheets = spreadsheet.worksheets()
worksheet_count = len(worksheets)
if worksheet_num >= worksheet_count:
raise WorksheetNotFoundError(worksheet_num, worksheet_count)
worksheet = worksheets[worksheet_num].get_all_values()
return parse_worksheet(worksheet)
class GoogleSpreadsheet(BaseQueryRunner):
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "google_spreadsheets"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
}
},
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
def __init__(self, configuration):
super(GoogleSpreadsheet, self).__init__(configuration)
def _get_spreadsheet_service(self):
scope = [
'https://spreadsheets.google.com/feeds',
]
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
credentials = SignedJwtAssertionCredentials(key['client_email'], key["private_key"], scope=scope)
spreadsheetservice = gspread.authorize(credentials)
return spreadsheetservice
def run_query(self, query):
logger.debug("Spreadsheet is about to execute query: %s", query)
values = query.split("|")
key = values[0] #key of the spreadsheet
worksheet_num = 0 if len(values) != 2 else int(values[1])# if spreadsheet contains more than one worksheet - this is the number of it
try:
spreadsheet_service = self._get_spreadsheet_service()
spreadsheet = spreadsheet_service.open_by_key(key)
data = parse_spreadsheet(spreadsheet, worksheet_num)
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except gspread.SpreadsheetNotFound:
error = "Spreadsheet ({}) not found. Make sure you used correct id.".format(key)
json_data = None
return json_data, error
register(GoogleSpreadsheet)
|
Python
| 0
|
@@ -53,18 +53,35 @@
ing%0A
+from dateutil
import
-sys
+parser
%0Afro
@@ -281,40 +281,8 @@
als%0A
- from dateutil import parser%0A
|
ebe5a4ce8c12489bceb8991f627fdea29329e854
|
Enable discovery server to reply also when started w/o net connection
|
xfd_discovery_server.py
|
xfd_discovery_server.py
|
#!/usr/bin/env python
#
# Author Aske Olsson aske.olsson@switch-gears.dk
#
import socket
import struct
#MCAST_GRP = '224.1.1.1'
#MCAST_PORT = 5007
MCAST_ADDR = "239.77.124.213"
MCAST_PORT = 19418
MCAST_ANS_PORT = 19419
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_ADDR), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
#ip = socket.gethostbyname(socket.gethostname())
myMAC = open('/sys/class/net/eth0/address').read()
while True:
try:
data, sender_addr = sock.recvfrom(1024)
# print data, sender_addr
# Answer back
ans_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ans_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
ans_sock.sendto("MAC=" + myMAC, (sender_addr[0], MCAST_ANS_PORT))
except Exception:
pass
|
Python
| 0
|
@@ -14,18 +14,16 @@
v python
-
%0A#%0A# Aut
@@ -95,16 +95,28 @@
t struct
+%0Aimport time
%0A%0A#MCAST
@@ -226,16 +226,53 @@
19419%0A%0A
+def socket_setup():%0A try:%0A
sock = s
@@ -339,16 +339,24 @@
TO_UDP)%0A
+
sock.set
@@ -406,16 +406,24 @@
DDR, 1)%0A
+
sock.bin
@@ -442,16 +442,24 @@
_PORT))%0A
+
mreq = s
@@ -526,16 +526,24 @@
R_ANY)%0A%0A
+
sock.set
@@ -601,17 +601,99 @@
, mreq)%0A
-%0A
+ except socket.error, e:%0A sock = None%0A return sock%0A%0Adef listen():%0A
#ip = so
@@ -733,16 +733,20 @@
name())%0A
+
myMAC =
@@ -792,30 +792,151 @@
d()%0A
-%0Awhile True:%0A try:%0A
+ print %22listen loop%22%0A sock = socket_setup()%0A while True:%0A print %22lock aquired%22, sock%0A try:%0A if sock:%0A
@@ -979,17 +979,24 @@
m(1024)%0A
-#
+
@@ -1023,16 +1023,25 @@
ddr%0A
-#
+
+ #
Ans
@@ -1049,16 +1049,24 @@
er back%0A
+
@@ -1145,32 +1145,40 @@
TO_UDP)%0A
+
+
ans_sock.setsock
@@ -1228,32 +1228,40 @@
TTL, 2)%0A
+
+
ans_sock.sendto(
@@ -1318,36 +1318,245 @@
-except Exception:%0A pass
+ else:%0A print %22setup socket%22%0A sock = socket_setup()%0A time.sleep(1)%0A except socket.error, e:%0A sock = None%0A%0Adef main():%0A listen()%0A%0Aif __name__ == %22__main__%22:%0A main()
%0A%0A
|
2cf4a0b93db423207798ffd93b2e91cdb73b6d2b
|
Add identifier for UT Brownsville
|
tx_salaries/utils/transformers/ut_brownsville.py
|
tx_salaries/utils/transformers/ut_brownsville.py
|
from . import base
from . import mixins
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'Last Name',
'first_name': 'First Name',
'department': 'Department',
'job_title': 'Title',
'hire_date': 'Hire Date',
'status': 'LABEL FOR FT/PT STATUS',
'compensation': 'Annualized',
'race': 'Race',
'gender': 'Gender'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'University of Texas at Brownsville'
ORGANIZATION_CLASSIFICATION = 'University'
# TODO not given on spreadsheet, but they appear to give part time
compensation_type = 'Full Time'
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
transform = base.transform_factory(TransformedRecord)
|
Python
| 0.00011
|
@@ -431,32 +431,70 @@
: 'First Name',%0A
+ 'middle_name': 'Middle Name',%0A
'departm
@@ -513,16 +513,16 @@
tment',%0A
-
@@ -581,52 +581,8 @@
e',%0A
- 'status': 'LABEL FOR FT/PT STATUS',%0A
@@ -1044,16 +1044,16 @@
xample:%0A
-
@@ -1089,16 +1089,415 @@
!= ''%0A%0A
+ @property%0A def identifier(self):%0A %22%22%22%0A Identifier for UT Brownsville%0A %22%22%22%0A excluded = %5Bself.department_key, self.job_title_key,%0A self.hire_date_key, self.compensation_key%5D%0A return %7B%0A 'scheme': 'tx_salaries_hash',%0A 'identifier': base.create_hash_for_record(self.data,%0A exclude=excluded)%0A %7D%0A%0A
%0Atransfo
|
3aaaefcb91b31fbd58f1b928c9d975c2bc218eea
|
Fix compatibility with Python 3.4
|
src/neuroglancer_scripts/file_accessor.py
|
src/neuroglancer_scripts/file_accessor.py
|
# Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
"""Access to a Neuroglancer pre-computed dataset on the local filesystem.
See the :mod:`~neuroglancer_scripts.accessor` module for a description of the
API.
"""
import gzip
import pathlib
import neuroglancer_scripts.accessor
from neuroglancer_scripts.accessor import _CHUNK_PATTERN_FLAT, DataAccessError
__all__ = [
"FileAccessor",
]
_CHUNK_PATTERN_SUBDIR = "{key}/{0}-{1}/{2}-{3}/{4}-{5}"
NO_COMPRESS_MIME_TYPES = {
"application/json",
"image/jpeg",
"image/png",
}
class FileAccessor(neuroglancer_scripts.accessor.Accessor):
"""Access a Neuroglancer pre-computed pyramid on the local file system.
:param str base_dir: path to the directory containing the pyramid
:param bool flat: use a flat file layout (see :ref:`layouts`)
:param bool gzip: compress chunks losslessly with gzip
"""
can_read = True
can_write = True
def __init__(self, base_dir, flat=False, gzip=True):
self.base_path = pathlib.Path(base_dir)
if flat:
self.chunk_pattern = _CHUNK_PATTERN_FLAT
else:
self.chunk_pattern = _CHUNK_PATTERN_SUBDIR
self.gzip = gzip
def fetch_file(self, relative_path):
relative_path = pathlib.Path(relative_path)
file_path = self.base_path / relative_path
if ".." in file_path.relative_to(self.base_path).parts:
raise ValueError("only relative paths pointing under base_path "
"are accepted")
if file_path.is_file():
f = file_path.open("rb")
elif file_path.with_name(file_path.name + ".gz").is_file():
f = gzip.open(str(file_path.with_name(file_path.name + ".gz")),
"rb")
else:
raise DataAccessError("Cannot find {0} in {1}".format(
relative_path, self.base_path))
try:
with f:
return f.read()
except OSError as exc:
raise DataAccessError(
"Error fetching {1}: {2}" .format(
relative_path, self.base_path, exc)) from exc
def store_file(self, relative_path, buf,
mime_type="application/octet-stream",
overwrite=False):
relative_path = pathlib.Path(relative_path)
file_path = self.base_path / relative_path
if ".." in file_path.relative_to(self.base_path).parts:
raise ValueError("only relative paths pointing under base_path "
"are accepted")
mode = "wb" if overwrite else "xb"
try:
file_path.parent.mkdir(parents=True, exist_ok=True)
if self.gzip and mime_type not in NO_COMPRESS_MIME_TYPES:
with gzip.open(
str(file_path.with_name(file_path.name + ".gz")),
mode) as f:
f.write(buf)
else:
with file_path.open(mode) as f:
f.write(buf)
except OSError as exc:
raise DataAccessError("Error storing {0}: {1}"
.format(file_path, exc)) from exc
def fetch_chunk(self, key, chunk_coords):
f = None
for pattern in _CHUNK_PATTERN_FLAT, _CHUNK_PATTERN_SUBDIR:
chunk_path = self._chunk_path(key, chunk_coords, pattern)
if chunk_path.is_file():
f = chunk_path.open("rb")
elif chunk_path.with_name(chunk_path.name + ".gz").is_file():
f = gzip.open(
str(chunk_path.with_name(chunk_path.name + ".gz")), "rb"
)
if f is None:
raise DataAccessError("Cannot find chunk {0} in {1}".format(
self._flat_chunk_basename(key, chunk_coords), self.base_path))
try:
with f:
return f.read()
except OSError as exc:
raise DataAccessError(
"Error accessing chunk {0} in {1}: {2}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path, exc)) from exc
def store_chunk(self, buf, key, chunk_coords,
mime_type="application/octet-stream",
overwrite=True):
chunk_path = self._chunk_path(key, chunk_coords)
mode = "wb" if overwrite else "xb"
try:
chunk_path.parent.mkdir(parents=True, exist_ok=True)
if self.gzip and mime_type not in NO_COMPRESS_MIME_TYPES:
with gzip.open(
str(chunk_path.with_name(chunk_path.name + ".gz")),
mode) as f:
f.write(buf)
else:
with chunk_path.open(mode) as f:
f.write(buf)
except OSError as exc:
raise DataAccessError(
"Error storing chunk {0} in {1}: {2}" .format(
self._flat_chunk_basename(key, chunk_coords),
self.base_path, exc)) from exc
def _chunk_path(self, key, chunk_coords, pattern=None):
if pattern is None:
pattern = self.chunk_pattern
xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
chunk_filename = pattern.format(
xmin, xmax, ymin, ymax, zmin, zmax, key=key)
return self.base_path / chunk_filename
def _flat_chunk_basename(self, key, chunk_coords):
xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
chunk_filename = _CHUNK_PATTERN_FLAT.format(
xmin, xmax, ymin, ymax, zmin, zmax, key=key)
return chunk_filename
|
Python
| 0.000187
|
@@ -360,16 +360,26 @@
rt gzip%0A
+import os%0A
import p
@@ -2793,43 +2793,41 @@
-file_path.parent.mkdir(parents=True
+os.makedirs(str(file_path.parent)
, ex
@@ -4571,32 +4571,48 @@
ry:%0A
+os.makedirs(str(
chunk_path.paren
@@ -4616,27 +4616,9 @@
rent
-.mkdir(parents=True
+)
, ex
|
c3df6a10d008441c79eb07b889f52fe0de22538b
|
Fix the default prefix
|
powerline_vaulted_segment/vaulted.py
|
powerline_vaulted_segment/vaulted.py
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.theme import requires_segment_info
@requires_segment_info
def vaulted(pl, segment_info, prefix=None):
'''Return the current vaulted vault
:param string prefix:
The prefix to use in front of the vault name
'''
vault = segment_info['environ'].get('VAULTED_ENV', None)
if vault:
return '{0}{1}'.format(prefix, vault)
|
Python
| 0.998784
|
@@ -191,20 +191,18 @@
prefix=
-None
+''
):%0A '
|
b9749b3e635c2af40eb2c08b5d5ef29d354ba453
|
fix a test
|
micronota/bfillings/tests/test_diamond.py
|
micronota/bfillings/tests/test_diamond.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from tempfile import mkdtemp
from filecmp import cmp
from shutil import rmtree
from os import getcwd
from os.path import join
from collections import namedtuple
from unittest import TestCase, main
import numpy as np
import skbio
from skbio.util import get_data_path
from burrito.util import ApplicationError
from micronota.util import _get_named_data_path
from micronota.bfillings.diamond import (
DiamondMakeDB, make_db, FeatureAnnt,
DiamondCache)
class DiamondTests(TestCase):
def setUp(self):
self.tmp_dir = mkdtemp()
self.db_fa = _get_named_data_path('db.faa')
self.db = _get_named_data_path('db.dmnd')
self.neg_fp = [get_data_path(i) for i in
['empty', 'whitespace_only']]
def tearDown(self):
rmtree(self.tmp_dir)
class DiamondMakeDBTests(DiamondTests):
def test_base_command(self):
c = DiamondMakeDB()
self.assertEqual(
c.BaseCommand,
'cd "%s/"; %s' % (getcwd(), c._command))
def test_make_db(self):
fp = join(self.tmp_dir, 'db.dmnd')
make_db(self.db_fa, fp)
with open(fp, 'rb') as obs, open(self.db, 'rb') as exp:
self.assertEqual(obs.read(), exp.read())
def test_make_db_wrong_input(self):
fp = join(self.tmp_dir, 'db.dmnd')
for i in self.neg_fp:
with self.assertRaisesRegex(
ApplicationError,
r'(Error reading file)|(Invalid input file format)'):
make_db(i, fp)
class DiamondBlastTests(DiamondTests):
def setUp(self):
super().setUp()
cases = [('blastp', 'WP_009885814.faa'),
('blastx', 'WP_009885814.fna')]
Test = namedtuple('Test', ['aligner', 'input', 'exp'])
self.tests = [Test(i[0],
get_data_path(i[1]),
_get_named_data_path(i[1]))
for i in cases]
def test_blast(self):
for test in self.tests:
pred = FeatureAnnt([self.db], mkdtemp(dir=self.tmp_dir))
obs = pred(test.input, aligner=test.aligner, outfmt='tab')
exp = pred._filter_best(pred.parse_tabular('%s.diamond' % test.exp))
self.assertTrue(exp.equals(obs))
obs = pred(test.input, aligner=test.aligner, outfmt='sam')
exp = pred._filter_id_cov(pred.parse_sam('%s.idcov' % test.exp))
self.assertTrue(exp.equals(obs))
def test_blast_wrong_input(self):
pred = FeatureAnnt([self.db], self.tmp_dir)
for i in self.neg_fp:
for aligner in ['blastp', 'blastx']:
with self.assertRaisesRegex(
ApplicationError,
r'(Error reading file)|(Invalid input file format)'):
pred(i, aligner=aligner)
class DiamondCacheTests(DiamondTests):
def setUp(self):
super().setUp()
tests = ('blastp', 'WP_009885814.faa')
self.blast = (tests[0], get_data_path(tests[1]),
_get_named_data_path('%s.diamond' % tests[1]))
seqs = skbio.read(_get_named_data_path('cache.faa'), format='fasta')
self.cache = DiamondCache(list(seqs))
def test_cache(self):
np.random.seed(0)
aligner, query, exp_fp = self.blast
pred = FeatureAnnt([self.db], mkdtemp(dir=self.tmp_dir),
cache=self.cache)
obs = pred(query, aligner=aligner)
exp = pred._filter_best(pred.parse_tabular(exp_fp))
self.assertEqual(exp['sseqid'].values, obs['sseqid'].values)
def test_cache_empty_db(self):
np.random.seed(0)
aligner, query, exp_fp = self.blast
pred = FeatureAnnt([], mkdtemp(dir=self.tmp_dir),
cache=self.cache)
obs = pred(query, aligner=aligner)
exp = pred._filter_best(pred.parse_tabular(exp_fp))
self.assertEqual(exp['sseqid'].values, obs['sseqid'].values)
class ParsingTests(TestCase):
def setUp(self):
self.tmp_dir = mkdtemp()
cases = ['WP_009885814.fna', 'WP_009885814.faa']
Test = namedtuple('Test', ['input', 'exp', 'obs'])
self.sam_tests = [Test(_get_named_data_path('%s.sam' % i),
_get_named_data_path('%s.txt' % i),
join(self.tmp_dir, '%s.txt' % i))
for i in cases]
self.filter_tests = [Test(_get_named_data_path('%s.diamond' % i),
_get_named_data_path('%s.best' % i),
join(self.tmp_dir, '%s.best'))
for i in cases]
self.filter_tests2 = [Test(_get_named_data_path('%s.sam' % i),
_get_named_data_path('%s.idcov' % i),
join(self.tmp_dir, '%s.idcov'))
for i in cases]
def test_parse_sam(self):
for test in self.sam_tests:
df = FeatureAnnt.parse_sam(test.input)
df.to_csv(test.obs, sep='\t', index=False)
self.assertTrue(cmp(test.exp, test.obs, shallow=False))
def test_filter_best(self):
for test in self.filter_tests:
df = FeatureAnnt.parse_tabular(test.input)
df_filter = FeatureAnnt._filter_best(df)
df_filter.to_csv(test.obs, sep='\t')
self.assertTrue(cmp(test.exp, test.obs, shallow=False))
def test_filter_id_cov(self):
for test in self.filter_tests2:
df = FeatureAnnt.parse_sam(test.input)
df_filter = FeatureAnnt._filter_id_cov(df, pident=30, cov=92)
df_filter.to_csv(test.obs, sep='\t')
self.assertTrue(cmp(test.exp, test.obs, shallow=False))
if __name__ == '__main__':
main()
|
Python
| 0.999791
|
@@ -2747,29 +2747,27 @@
rse_sam('%25s.
-idcov
+sam
' %25 test.exp
|
a0dc4dc94d27e824ac4fd9d6c7fe7f929587f08c
|
Add 19.0.{1,2,3} (#11401)
|
var/spack/repos/builtin/packages/mesa/package.py
|
var/spack/repos/builtin/packages/mesa/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class Mesa(MesonPackage):
"""Mesa is an open-source implementation of the OpenGL specification
- a system for rendering interactive 3D graphics."""
homepage = "http://www.mesa3d.org"
# Note that we always want to build from the git repo instead of a
# tarball since the tarball has pre-generated files for certain versions
# of LLVM while the git repo doesn't so it can adapt at build time to
# whatever version of LLVM you're using.
git = "https://gitlab.freedesktop.org/mesa/mesa.git"
version('19.0.0', tag='mesa-19.0.0')
version('19.0.develop', branch='19.0')
version('develop', branch='master')
depends_on('meson@0.45:', type='build')
depends_on('binutils', type='build')
depends_on('pkgconfig', type='build')
depends_on('python@3:', type='build')
depends_on('py-mako@0.8.0:', type='build')
depends_on('libxml2')
depends_on('zlib')
depends_on('expat')
# Internal options
variant('llvm', default=True, description="Enable LLVM.")
variant('swr', values=any_combination_of('avx', 'avx2', 'knl', 'skx'),
description="Enable the SWR driver.")
# conflicts('~llvm', when='~swr=none')
# Front ends
variant('osmesa', default=True, description="Enable the OSMesa frontend.")
is_linux = sys.platform.startswith('linux')
variant('glx', default=is_linux, description="Enable the GLX frontend.")
# TODO: effectively deal with EGL. The implications of this have not been
# worked through yet
# variant('egl', default=False, description="Enable the EGL frontend.")
# TODO: Effectively deal with hardware drivers
# The implication of this is enabling DRI, among other things, and
# needing to check which llvm targets were built (ptx or amdgpu, etc.)
# Back ends
variant('opengl', default=True, description="Enable full OpenGL support.")
variant('opengles', default=False, description="Enable OpenGL ES support.")
# Provides
provides('gl@4.5', when='+opengl')
provides('glx@1.4', when='+glx')
# provides('egl@1.5', when='+egl')
# Variant dependencies
depends_on('llvm@6:', when='+llvm')
depends_on('libx11', when='+glx')
depends_on('libxcb', when='+glx')
depends_on('libxext', when='+glx')
depends_on('glproto@1.4.14:', when='+glx', type='build')
# Fix glproto dependency for glx=gallium-xlib
# https://gitlab.freedesktop.org/mesa/mesa/merge_requests/806
patch('glproto-mr806.patch', when='@19.0.0')
def meson_args(self):
spec = self.spec
args = [
'-Dglvnd=false',
'-Dgallium-nine=false',
'-Dgallium-omx=disabled',
'-Dgallium-opencl=disabled',
'-Dgallium-va=false',
'-Dgallium-vdpau=false',
'-Dgallium-xa=false',
'-Dgallium-xvmc=false',
'-Dvulkan-drivers=']
args_platforms = []
args_gallium_drivers = ['swrast']
args_dri_drivers = []
num_frontends = 0
if '+osmesa' in spec:
num_frontends += 1
args.append('-Dosmesa=gallium')
else:
args.append('-Dosmesa=disabled')
if '+glx' in spec:
num_frontends += 1
args.append('-Dglx=gallium-xlib')
args_platforms.append('x11')
else:
args.append('-Dglx=disabled')
if '+egl' in spec:
num_frontends += 1
args.extend(['-Degl=true', '-Dgbm=true'])
else:
args.extend(['-Degl=false', '-Dgbm=false'])
if '+opengl' in spec:
args.append('-Dopengl=true')
else:
args.append('-Dopengl=false')
if '+opengles' in spec:
args.extend(['-Dgles1=true', '-Dgles2=true'])
else:
args.extend(['-Dgles1=false', '-Dgles2=false'])
if '+egl' in spec or '+osmesa' in spec:
args_platforms.append('surfaceless')
if num_frontends > 1:
args.append('-Dshared-glapi=true')
else:
args.append('-Dshared-glapi=false')
if '+llvm' in spec:
args.append('-Dllvm=true')
if '+link_dylib' in spec['llvm']:
args.append('-Dshared-llvm=true')
else:
args.append('-Dshared-llvm=false')
else:
args.append('-Dllvm=false')
args_swr_arches = []
if 'swr=avx' in spec:
args_swr_arches.append('avx')
if 'swr=avx2' in spec:
args_swr_arches.append('avx2')
if 'swr=knl' in spec:
args_swr_arches.append('knl')
if 'swr=skx' in spec:
args_swr_arches.append('skx')
if args_swr_arches:
if '+llvm' not in spec:
raise SpecError('Variant swr requires +llvm')
args_gallium_drivers.append('swr')
args.append('-Dswr-arches=' + ','.join(args_swr_arches))
# Add the remaining list args
args.append('-Dplatforms=' + ','.join(args_platforms))
args.append('-Dgallium-drivers=' + ','.join(args_gallium_drivers))
args.append('-Ddri-drivers=' + ','.join(args_dri_drivers))
return args
|
Python
| 0.000152
|
@@ -770,37 +770,83 @@
on('
-19.0.0', tag='mesa-19.0.0
+develop', branch='master')%0A version('19.1.develop', branch='19.1
')%0A
-%0A
@@ -901,37 +901,172 @@
on('
-develop', branch='master
+19.0.3', tag='mesa-19.0.3', preferred=True)%0A version('19.0.2', tag='mesa-19.0.2')%0A version('19.0.1', tag='mesa-19.0.1')%0A version('19.0.0', tag='mesa-19.0.0
')%0A%0A
@@ -2923,24 +2923,33 @@
hen='@19.0.0
+:19.0.999
')%0A%0A def
|
cffead618248743046655aeb030938cde5180545
|
fix glint enable configuration reference
|
web_frontend/cloudscheduler/csv2/server_views.py
|
web_frontend/cloudscheduler/csv2/server_views.py
|
from django.conf import settings
config = settings.CSV2_CONFIG
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.csrf import requires_csrf_token
from django.http import HttpResponse
from django.core.exceptions import PermissionDenied
from cloudscheduler.lib.schema import csv2_configuration
from cloudscheduler.lib.view_utils import \
cskv, \
lno, \
qt, \
render, \
set_user_groups
from collections import defaultdict
import bcrypt
from sqlalchemy import Table, MetaData
from sqlalchemy.sql import select
#import sqlalchemy.exc
from cloudscheduler.lib.web_profiler import silk_profile as silkp
# lno: SV - error code identifier.
MODID = 'SV'
#-------------------------------------------------------------------------------
@silkp(name="Server Config")
def configuration(request):
"""
Update and list server configurations
"""
config.db_open()
message = None
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request)
if rc == 0:
if request.method == 'POST':
if 'category' in request.POST:
if 'config_key_values' in request.POST:
rc, msg, key_values = cskv(request.POST['config_key_values'])
category = request.POST['category']
else:
rc = 0
key_values = {}
for key in request.POST:
key_values[key] = request.POST[key]
key_values.pop('csrfmiddlewaretoken', None)
category = key_values.pop('category', None)
if rc == 0:
config_list = qt(config.db_connection.execute('select config_key,config_type,config_value from csv2_configuration where category="%s"' % category))
if len(config_list) > 0:
config_keys = {}
for config_item in config_list:
config_keys[config_item['config_key']] = {'type': config_item['config_type'], 'value': config_item['config_value']}
message = None
for key in key_values:
if key in config_keys:
if config_keys[key]['type'] == 'int':
try:
ignore = int(key_values[key])
except:
message = '%s server config update failed - value specified ("%s") for category="%s", config_key="%s" must be an integer.' % (lno(MODID), key_values[key], category, key)
break
elif config_keys[key]['type'] == 'str':
pass
elif config_keys[key]['type'] == 'bool':
bool = key_values[key].lower()
if bool == '0' or bool == 'no' or bool == 'off' or bool == 'false':
key_values[key] = 'False'
elif bool == '1' or bool == 'yes' or bool == 'on' or bool == 'true':
key_values[key] = 'True'
else:
message = '%s server config update failed - value specified ("%s") for category="%s", config_key="%s" must be a boolean value.' % (lno(MODID), key_values[key], category, key)
break
elif config_keys[key]['type'] == 'decimal' or config_keys[key]['type'] == 'float':
try:
ignore = float(key_values[key])
except:
message = '%s server config update failed - value specified ("%s") for category="%s", config_key="%s" must be a decimal or floating point number.' % (lno(MODID), key_values[key], category, key)
break
else:
raise Exception('unsupported config_type in csv2_configurations - category="%s", config_key="%s", config_type="%s".' % (category, key, config_keys[key]))
else:
message = '%s server config update failed - category="%s", invalid key "%s" specified.' % (lno(MODID), category, key)
break
if not message:
keys = []
table = Table('csv2_configuration', MetaData(bind=config.db_engine), autoload=True)
for key in key_values:
if key_values[key] != config_keys[key]['value']:
keys.append(key)
rc, msg = config.db_session_execute(table.update().where((table.c.category==category) & (table.c.config_key==key)).values({table.c.config_value:key_values[key]}))
if rc != 0:
config.db_session.rollback()
message = '{} server config update failed - {}'.format(lno(MODID), msg)
break
if len(keys) > 0:
config.db_session.commit()
message = 'server config update successfully updated the following keys: %s' % ', '.join(keys)
else:
message = '%s server config update failed - invalid category "%s" specified.' % (lno(MODID), category)
else:
message = '%s server config update failed - no category specified.' % lno(MODID)
else:
message='{} {}'.format(lno(MODID), msg)
if message and message[:2] == 'SV':
response_code = 1
else:
response_code = 0
s = select([csv2_configuration])
config_list = qt(config.db_connection.execute(s))
config_categories = list({v['category']:v for v in config_list})
# Render the page.
context = {
'active_user': active_user.username,
'active_group': active_user.active_group,
'user_groups': active_user.user_groups,
'config_list': config_list,
'config_categories': config_categories,
'response_code': response_code,
'message': message,
'enable_glint': config.enable_glint,
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
config.db_close()
return render(request, 'csv2/server_config.html', context)
|
Python
| 0
|
@@ -7113,16 +7113,44 @@
config.
+categories%5B%22web_frontend%22%5D%5B%22
enable_g
@@ -7153,16 +7153,18 @@
le_glint
+%22%5D
,%0A
|
1ba440ca24b0108d4dcf911f1f3c967ff7de1dc4
|
Create a unique filename when uploading a team file.
|
hackday/teams/models.py
|
hackday/teams/models.py
|
from django.contrib.auth.models import User
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from assets.models import Attachment, ImageAttachment, Link
from charities.models import Charity
from voting.moremodels import Category
class STATUS(object):
"""
Status of the team
"""
ACTIVE = 'A'
DISQUALIFIED = 'D'
DELETED = 'X'
CHOICES = (
(ACTIVE, 'Active'),
(DISQUALIFIED, 'Disqualified'),
(DELETED, 'Deleted'),
)
class PROJECT_TYPE(object):
"""
Type of project -- 'implemented' (working code) or 'concept' (smoke and
Powerpoint mirrors)
"""
# I honestly came really close to calling these 'SMOKE' and 'MIRRORS' but
# couldn't decide which to assign to which. - mpirnat
IMPLEMENTED = 'I'
CONCEPT = 'C'
CHOICES = (
(IMPLEMENTED, 'Implemented'),
(CONCEPT, 'Concept'),
)
class Team(models.Model):
"""
A team of participants that will work on a project and compete for fabulous
prizes, fame, and glory.
Upon creation, a team needs:
* a name--hopefully an awesome one
* a slug, to be used for the URL of the team's page
* a project description
* a project type, so that we can differentiate "real" hacks vs. thought
experiments (aka "code vs. ppt")
* a creator
* a captain
* team members
* a judged category
* a charity that the team is supporting
The creator and captain may have management powers above and beyond
those of a mere member.
"""
name = models.CharField('name of team', max_length=255, db_index=True,
unique=True)
slug = models.SlugField('slugified team name', db_index=True, unique=True,
editable=False)
project = models.TextField('description of project')
logo = models.ImageField('team logo image', blank=True, upload_to='teams')
project_type = models.CharField('type of project', max_length=1,
db_index=True, choices=PROJECT_TYPE.CHOICES)
status = models.CharField(max_length=1, db_index=True,
choices=STATUS.CHOICES)
creator = models.ForeignKey(User,
related_name="%(app_label)s_%(class)s_creator")
captain = models.ForeignKey(User,
related_name="%(app_label)s_%(class)s_captain")
members = models.ManyToManyField(User,
related_name="%(app_label)s_%(class)s_members")
attachments = models.ManyToManyField(Attachment, blank=True,
related_name="%(app_label)s_%(class)s_attachments")
images = models.ManyToManyField(ImageAttachment, blank=True,
related_name="%(app_label)s_%(class)s_images")
links = models.ManyToManyField(Link, blank=True,
related_name="%(app_label)s_%(class)s_links")
category = models.ForeignKey(Category)
charity = models.ForeignKey(Charity)
create_date = models.DateTimeField('date created', auto_now_add=True)
mod_date = models.DateTimeField('date modified', auto_now=True)
def save(self, *args, **kwargs):
#TODO: check if slug exists in DB
if not self.slug:
self.slug = slugify(self.name)
return super(Team, self).save()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
|
Python
| 0
|
@@ -1,20 +1,33 @@
+import time%0A%0A
from django.contrib.
@@ -945,24 +945,482 @@
'),%0A )%0A%0A%0A
+def create_unique_team_filename(instance, filename):%0A %22%22%22 Return a uniqque filename for an uploaded team file.%0A -- called when saving a Team to the DB%0A %22%22%22%0A filename_parts = filename.split('.')%0A return 'teams/%7Bteam_slug%7D/%7Bfile_prefix%7D-%7Bstamp%7D.%7Bfile_suffix%7D'.format(%0A team_slug=instance.slug,%0A file_prefix='.'.join(filename_parts%5B:-1%5D),%0A stamp=time.time(),%0A file_suffix=filename_parts%5B-1%5D)%0A%0A%0A
class Team(m
@@ -2416,16 +2416,27 @@
nk=True,
+%0A
upload
@@ -2443,15 +2443,35 @@
_to=
-'teams'
+create_unique_team_filename
)%0A%0A
|
b941499db4b1ee57b4c576483b6b738259dc9fda
|
fix tf delete_status bug using keras.clear_session() instead of gc
|
rl/session.py
|
rl/session.py
|
import gc
import gym
import json
import multiprocessing as mp
from datetime import datetime
from functools import partial
from rl.spec import game_specs
from rl.util import *
class Session(object):
'''
main.py calls this
The base class for running a session of
a DQN Agent, at a problem, with agent params
'''
def __init__(self, problem, Agent, Memory, Policy, param):
self.problem = problem
self.Agent = Agent
self.Memory = Memory
self.Policy = Policy
self.param = param
def run_episode(self, sys_vars, env, agent):
'''run ane episode, return sys_vars'''
state = env.reset()
agent.memory.reset_state(state)
total_rewards = 0
debug_agent_info(agent)
for t in range(agent.env_spec['timestep_limit']):
sys_vars['t'] = t # update sys_vars t
if sys_vars.get('RENDER'):
env.render()
action = agent.select_action(state)
next_state, reward, done, info = env.step(action)
agent.memory.add_exp(action, reward, next_state, done)
agent.update(sys_vars)
if agent.to_train(sys_vars):
agent.train(sys_vars)
state = next_state
total_rewards += reward
if done:
break
update_history(agent, sys_vars, t, total_rewards)
return sys_vars
def run(self):
'''run a session of agent'''
sys_vars = init_sys_vars(
self.problem, self.param) # rl system, see util.py
env = gym.make(sys_vars['GYM_ENV_NAME'])
agent = self.Agent(get_env_spec(env), **self.param)
memory = self.Memory(**self.param)
policy = self.Policy(**self.param)
agent.compile(memory, policy)
logger.info('Compiled Agent, Memory, Policy')
for epi in range(sys_vars['MAX_EPISODES']):
sys_vars['epi'] = epi # update sys_vars epi
self.run_episode(sys_vars, env, agent)
if sys_vars['solved']:
break
gc.collect() # manual gc to fix TF issue 3388
return sys_vars
def experiment_analytics(data):
'''
helper: define the performance metric
given data from an experiment
'''
sys_vars_array = data['sys_vars_array']
mean_r_array = [sys_vars['mean_rewards'] for sys_vars in sys_vars_array]
metrics = {
'experiment_mean': np.mean(mean_r_array),
'experiment_std': np.std(mean_r_array),
}
return metrics
def save_experiment_data(data_grid):
'''
log the entire experiment data grid from inside run()
'''
# sort data, best first
data_grid.sort(
key=lambda data: data['metrics']['experiment_mean'],
reverse=True)
timestamp = '{:%Y-%m-%d_%H%M%S}'.format(datetime.now())
filename = './data/{}_{}_{}_{}_{}.json'.format(
data_grid[0]['sess_spec']['problem'],
data_grid[0]['sess_spec']['Agent'],
data_grid[0]['sess_spec']['Memory'],
data_grid[0]['sess_spec']['Policy'],
timestamp
)
with open(filename, 'w') as f:
json.dump(data_grid, f, indent=2, sort_keys=True)
logger.info('Experiment complete, written to data/')
def run_single_exp(sess_spec, data_grid, times=1):
'''
helper: run a experiment for Session
a number of times times given a sess_spec from gym_specs
'''
start_time = datetime.now().isoformat()
sess = Session(problem=sess_spec['problem'],
Agent=sess_spec['Agent'],
Memory=sess_spec['Memory'],
Policy=sess_spec['Policy'],
param=sess_spec['param'])
sys_vars_array = [sess.run() for i in range(times)]
end_time = datetime.now().isoformat()
data = { # experiment data
'start_time': start_time,
'sess_spec': stringify_param(sess_spec),
'sys_vars_array': sys_vars_array,
'metrics': None,
'end_time': end_time,
}
data.update({'metrics': experiment_analytics(data)})
# progressive update of data_grid, write when an exp is done
data_grid.append(data)
save_experiment_data(data_grid)
return data
def run(sess_name, run_param_selection=False, times=1):
'''
primary method:
run the experiment (single or multiple)
specifying if this should be a param_selection run
and run each for a number of times
calls run_single_exp internally
and employs parallelism whenever possible
'''
sess_spec = game_specs.get(sess_name)
data_grid = []
if run_param_selection:
param_grid = param_product(
sess_spec['param'], sess_spec['param_range'])
sess_spec_grid = [{
'problem': sess_spec['problem'],
'Agent': sess_spec['Agent'],
'Memory': sess_spec['Memory'],
'Policy': sess_spec['Policy'],
'param': param,
} for param in param_grid]
p = mp.Pool(mp.cpu_count())
list(p.map(
partial(run_single_exp, data_grid=data_grid, times=times),
sess_spec_grid))
else:
run_single_exp(sess_spec, data_grid=data_grid, times=times)
return data_grid
|
Python
| 0.000001
|
@@ -1,14 +1,4 @@
-import gc%0A
impo
@@ -105,16 +105,47 @@
partial%0A
+from keras import backend as K%0A
from rl.
@@ -2113,18 +2113,23 @@
-gc.collect
+K.clear_session
()
|
e4cc4447bf9aca4f579eef34baccd3aaf73939c3
|
Print statement verwijderd uit functie
|
hamming-code/hamming.py
|
hamming-code/hamming.py
|
from matrix import Matrix
#The encoding matrix
encoding_matrix = Matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
#The parity checking matrix
checking_matrix = Matrix([
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1]
])
#Takes binary vector of length 4 and adds the parity bits
#Returns result as vector
def encodemessage(message):
print(message.transpose())
vector_with_paritybits = encoding_matrix*(message.transpose())
return Matrix(vector_with_paritybits.getbinary())
#repairs message, may not work, can't test it yet
#Takes a matrix
def repairmessage(message):
vector = checking_matrix*message
checker = True
#checks if the return vector is the zero vector. If this is the case
#checker = True, and there is no mistake
for element in vector.values[0]:
if element == 1:
checker = False
if checker == False:
#finds out at what position the mistake is and saves it as
#counter
counter = 0
for i, element in enumerate(vector.values[0]):
counter += element * 2 ** i
else:
#in this case checker = True, so it returns the message
return message
new_message = message.values[0]
#fixes the message
if new_message[counter - 1] == 0:
new_message[counter - 1] = 1
else:
new_message[counter - 1] = 0
return Matrix(new_message)
#Example:
#boodschap = input('Vul hier je boodschap in: ')
#testvector = Matrix([[1, 0, 1, 1]])
#print(repairmessage(encodemessage(testvector)))
|
Python
| 0.999972
|
@@ -452,37 +452,8 @@
e):%0A
- print(message.transpose())%0A
ve
|
825eb37e15e2fb08ac205b7495e93a91acb79c26
|
Add function for flashing all form errors
|
app/utils.py
|
app/utils.py
|
import re
from flask import url_for
def register_template_utils(app):
"""Register Jinja 2 helpers (called from __init__.py)."""
@app.template_test()
def equalto(value, other):
return value == other
@app.template_global()
def is_hidden_field(field):
from wtforms.fields import HiddenField
return isinstance(field, HiddenField)
app.add_template_global(index_for_role)
def index_for_role(role):
return url_for(role.index)
def parse_phone_number(phone_number):
"""Make phone number conform to E.164 (https://en.wikipedia.org/wiki/E.164)
"""
stripped = re.sub(r'\D', '', phone_number)
if len(stripped) == 10:
stripped = '1' + stripped
stripped = '+' + stripped
return stripped
|
Python
| 0
|
@@ -28,16 +28,23 @@
url_for
+, flash
%0A%0A%0Adef r
@@ -757,16 +757,255 @@
return stripped%0A
+%0A%0Adef flash_errors(form):%0A for field, errors in form.errors.items():%0A for error in errors:%0A flash(u%22Error in the %25s field - %25s%22 %25 (%0A getattr(form, field).label.text,%0A error%0A ))%0A
|
3289a259a7fa9ed959eb18d012c2d2e52198b033
|
Update malshare.py
|
plugins/analytics/public/malshare.py
|
plugins/analytics/public/malshare.py
|
import json
import requests
import logging
from core.analytics import OneShotAnalytics
from core.errors import ObservableValidationError
from core.observables import Url, Hash
class MalshareAPI(object):
"""Base class for querying the Malshare API.
This is the public API, 1000 samples per day.
Limit rejection, as it could cause api key deactivation.
"""
settings = {
'malshare_api_key': {
'name': 'Malshare API Key',
'description': 'API Key provided by malshare.com'
}
}
@staticmethod
def fetch(observable, api_key):
"""
:param observable: The extended observable klass
:param api_key: The api key obtained from Malshare
:return: malshare json response or None if error
"""
try:
params = {
'hash': observable.value,
'api_key': api_key,
'action': 'details'
}
response = requests.get('https://malshare.com/api.php', params, verify=False)
if response.ok:
return response.json()
else:
return None
except Exception as e:
# TODO(sebdraven): Catch a better exception
print 'Exception while getting ip report {}'.format(e.message)
return None
class MalshareQuery(OneShotAnalytics, MalshareAPI):
default_values = {
'name': 'MalShare',
'description': 'Perform a MalShare query.',
}
ACTS_ON = ['Hash']
@staticmethod
def analyze(observable, results):
links = set()
json_result = MalshareAPI.fetch(
observable, results.settings['malshare_api_key'])
if json_result is not None:
json_string = json.dumps(
json_result, sort_keys=True, indent=4, separators=(',', ': '))
results.update(raw=json_string)
result = {'raw': json_string}
if 'SOURCES' in json_result:
for source in json_result['SOURCES']:
new_url = None
try:
new_url = Url.get_or_create(value=source.strip())
links.update(
observable.active_link_to(
new_url, 'c2', 'malshare_query'))
except ObservableValidationError:
logging.error(
"An error occurred when trying to add {} to the database".
format(source.strip()))
result['nb C2'] = len(json_result['SOURCES'])
try:
new_hash = Hash.get_or_create(value=json_result['MD5'])
links.update(
new_hash.active_link_to(observable, 'md5', 'malshare_query'))
new_hash = Hash.get_or_create(value=json_result['SHA1'])
links.update(
new_hash.active_link_to(observable, 'sha1', 'malshare_query'))
new_hash = Hash.get_or_create(value=json_result['SHA256'])
links.update(
new_hash.active_link_to(observable, 'sha256', 'malshare_query'))
except ObservableValidationError:
logging.error(
"An error occurred when trying to add hashes {} to the database".
format(json_string))
return list(links)
|
Python
| 0
|
@@ -1030,22 +1030,15 @@
rams
-, verify=False
+=params
)%0A
|
c0e1bed70bc331041622e0db06871d4f3e3277f3
|
Update activate-devices.py
|
cron/activate-devices.py
|
cron/activate-devices.py
|
#!/usr/bin/env python
import MySQLdb
#import datetime
#import urllib2
#import os
import datetime
import RPi.GPIO as GPIO
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO!")
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
GPIO.setmode(GPIO.BOARD)
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cursorselect = cnx.cursor()
query = ("SELECT * FROM devices;")
cursorselect.execute(query)
results_devices =cursorselect.fetchall()
cursorselect.close()
for result in results_devices:
print("* * * * * *")
DEVICE_PIN = result[2]
DEVICE_VALUE = result[3]
GPIO.setup(DEVICE_PIN, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(DEVICE_PIN, DEVICE_VALUE)
print( DEVICE_PIN, DEVICE_VALUE )
print("- - -")
cnx.close()
|
Python
| 0.000001
|
@@ -643,16 +643,21 @@
CE_PIN =
+ int(
result%5B
@@ -658,16 +658,18 @@
esult%5B2%5D
+ )
%0A DEV
@@ -679,16 +679,21 @@
_VALUE =
+ int(
result%5B
@@ -694,16 +694,18 @@
esult%5B3%5D
+ )
%0A%0A GP
|
8f02edd1a79c3f893964198d05edf2990144bb31
|
Update to sim code
|
predict_binary_similarity_two_libraries.py
|
predict_binary_similarity_two_libraries.py
|
#Author : Lewis Mervin lhm30@cam.ac.uk
#Supervisor : Dr. A. Bender
#All rights reserved 2016
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 21/06/16) and ChEMBL21
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
import cPickle
import glob
import zipfile
import os
import sys
import math
import numpy as np
from multiprocessing import Pool
import multiprocessing
from scipy.spatial.distance import rogerstanimoto
from scipy.spatial.distance import jaccard
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
#calculate fingerprints for chunked array of smiles
def arrayFP(inp):
outfp = []
outsmi = []
for i in inp:
try:
outfp.append(calcFingerprints(i))
outsmi.append(i)
except:
print 'SMILES Parse Error: ' + i
return outfp,outsmi
#import user query
def importQuery(in_file):
query = open(in_file).read().splitlines()
matrix = np.empty((len(query), 2048), dtype=np.uint8)
smiles_per_core = int(math.ceil(len(query) / N_cores)+1)
chunked_smiles = [query[x:x+smiles_per_core] for x in xrange(0, len(query), smiles_per_core)]
pool = Pool(processes=N_cores) # set up resources
jobs = pool.imap(arrayFP, chunked_smiles)
current_end = 0
processed_smi = []
for i, result in enumerate(jobs):
matrix[current_end:current_end+len(result[0]), :] = result[0]
current_end += len(result[0])
processed_smi += result[1]
pool.close()
pool.join()
return matrix[:current_end], processed_smi
#get info for uniprots
def getUniprotInfo():
if os.name == 'nt': sep = '\\'
else: sep = '/'
model_info = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'classes_in_model.txt').read().splitlines()]
return_dict = {l[0] : l[0:8] for l in model_info}
return return_dict
#unzip a pkl model
def open_Model(mod):
if os.name == 'nt': sep = '\\'
else: sep = '/'
with zipfile.ZipFile(os.path.dirname(os.path.abspath(__file__)) + sep + 'models' + sep + mod + '.pkl.zip', 'r') as zfile:
with zfile.open(mod + '.pkl', 'r') as fid:
clf = cPickle.load(fid)
return clf
#prediction worker
def doTargetPrediction(pickled_model_name):
if os.name == 'nt': sep = '\\'
else: sep = '/'
mod = pickled_model_name.split(sep)[-1].split('.')[0]
clf = open_Model(mod)
probs = clf.predict_proba(querymatrix)[:,1]
preds = map(int,probs > threshold)
return preds
#prediction runner
def performTargetPrediction(models):
prediction_results = []
pool = Pool(processes=N_cores, initializer=initPool, initargs=(querymatrix,threshold,)) # set up resources
jobs = pool.imap(doTargetPrediction, sorted(models))
for i, result in enumerate(jobs):
percent = (float(i)/float(len(models)))*100 + 1
sys.stdout.write(' Performing Classification on Query Molecules: %3d%%\r' % percent)
sys.stdout.flush()
if result is not None: prediction_results.append(result)
pool.close()
pool.join()
return np.array(prediction_results)
#initializer for the pool
def initPool(querymatrix_, threshold_):
global querymatrix, threshold
querymatrix = querymatrix_
threshold = threshold_
#main
if __name__ == '__main__':
if os.name == 'nt': sep = '\\'
else: sep = '/'
input_name = sys.argv[1]
input_name2 = sys.argv[2]
N_cores = int(sys.argv[3])
introMessage()
print ' Predicting Targets for ' + input_name
print ' Using ' + str(N_cores) + ' Cores'
try:
threshold = float(sys.argv[3])
except ValueError:
print 'ERROR: Enter a valid float (max 2 decimal places) for threshold'
quit()
models = [modelfile for modelfile in glob.glob(os.path.dirname(os.path.abspath(__file__)) + sep + 'models' + sep + '*.zip')]
model_info = getUniprotInfo()
print ' Total Number of Classes : ' + str(len(models))
print ' Using TPR threshold of : ' + str(threshold)
output_name = input_name + '_' + input_name2 + '_out_binary_sim_' + str(threshold) + '.txt'
out_file = open(output_name, 'w')
querymatrix,smiles = importQuery(input_name)
querymatrix2,smiles2 = importQuery(input_name2)
print ' Total Number of Query Molecules file 1 : ' + str(len(querymatrix))
print ' Total Number of Query Molecules file 2 : ' + str(len(querymatrix2))
prediction_results = performTargetPrediction(models)
prediction_results2 = performTargetPrediction(models)
sim_output = []
sim_output2 = []
for idx in range(prediction_results.shape[1]):
sim_output.append(rogerstanimoto(prediction_results[:,idx],prediction_results2[:,idx]))
sim_output2.append(jaccard(prediction_results[:,idx],prediction_results2[:,idx]))
out_file.write('Compound Pair No.\tSmiles 1\tSmiles 2\tJaccard Sim\n')
for idx, comp1 in enumerate(smiles):
comp2 = smiles2[idx]
s = sim_output[idx]
s2 = sim_output[idx]
out_file.write('\t'.join(map(str,[idx,comp1,comp2,1-s,1-s2])) + '\n')
print '\n Wrote Results to: ' + output_name
out_file.close()
|
Python
| 0
|
@@ -4057,25 +4057,25 @@
at(sys.argv%5B
-3
+4
%5D)%0A%09except V
|
8701318037b9d425149f0689fa137be78a782aa7
|
return the name of the face found
|
app/views.py
|
app/views.py
|
from app import app
from flask import Flask, request, jsonify
import kairos
DEFAULT_GALLERY = 'default_gallery'
# App Logic
@app.route('/', methods=['GET'])
def index():
return 'yo'
@app.route('/upload/<name>', methods=['POST'])
def upload(name):
img_url = request.form['img_url']
success = kairos.add_face_url(img_url, name, DEFAULT_GALLERY)
return jsonify({'success': success})
@app.route('/verify', methods=['GET'])
def verify():
link = request.args.get('img_url')
allowed = kairos.check_face_url(img_url, DEFAULT_GALLERY)
return jsonify({'allowed': allowed})
|
Python
| 0.999999
|
@@ -489,23 +489,20 @@
l')%0A
-allowed
+name
= kairo
@@ -507,13 +507,16 @@
ros.
-check
+identify
_fac
@@ -555,41 +555,133 @@
-return jsonify(%7B'allowed': allowed
+allowed = name is not None%0A # TODO: open the door.%0A return jsonify(%7B'allowed': allowed,%0A 'name': name
%7D)%0A
|
a8ace4f53a67d30e39b7f25d8db4cbc6fdd8fcf8
|
Update allowed hosts
|
cub/settings.py
|
cub/settings.py
|
"""
Django settings for cub project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONTACT_ADMINS = ('maria.nita.dn@gmail.com',)
# Per Environment settings
APP_ENVIRONMENT = os.environ.get('APP_ENVIRONMENT', 'DEV')
if APP_ENVIRONMENT == 'DEV':
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bu0-nf-yu99^$7!8z$#uiz22v6y&3-#i35&f3!s7e+u3ocs*3m'
ALLOWED_HOSTS = ['127.0.0.1', '0.0.0.0', '192.168.3.3']
DEBUG = True
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'cub.sqlite3'
}
}
MONGO_CONFIG = {
'NAME': 'cub',
'HOST': 'mongodb://127.0.0.1:27017',
}
elif APP_ENVIRONMENT == 'PROD':
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = ['*']
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CSRF_COOKIE_SECURE = True
STATIC_ROOT = os.path.join(BASE_DIR, "static")
DEBUG = False
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
MONGO_CONFIG = {
'NAME': 'connecthub',
'HOST': os.environ.get('MONGOLAB_URI'),
}
WSGI_APPLICATION = 'cub.wsgi.application'
# General settings
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'backend',
'tastypie',
'tastypie_mongoengine',
'djangojs'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
AUTH_USER_MODEL = 'backend.Account'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
ROOT_URLCONF = 'cub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
)
STATIC_URL = '/static/'
TASTYPIE_DEFAULT_FORMATS = ['json']
CONTRIB_DELTA = 600
EMAIL_USE_TLS = True
EMAIL_HOST = os.environ.get('MAILGUN_SMTP_SERVER')
EMAIL_HOST_USER = os.environ.get('MAILGUN_SMTP_LOGIN')
EMAIL_HOST_PASSWORD = os.environ.get('MAILGUN_SMTP_PASSWORD')
EMAIL_API_URL = os.environ.get('MAILGUN_DOMAIN')
EMAIL_API_KEY = os.environ.get('MAILGUN_API_KEY')
EMAIL_PORT = os.environ.get('MAILGUN_SMTP_PORT')
|
Python
| 0
|
@@ -779,43 +779,9 @@
= %5B'
-127.0.0.1', '0.0.0.0', '192.168.3.3
+*
'%5D%0A
@@ -1275,25 +1275,56 @@
D_HOSTS = %5B'
-*
+https://connecthub.herokuapp.com
'%5D%0A SECUR
|
b3b489fb8b476a17e8d9e08d70f90aec38756c8a
|
Allow use of a custom user model
|
cuser/fields.py
|
cuser/fields.py
|
# Copyright (c) 2009-2011 Dennis Kaarsemaker <dennis@kaarsemaker.net>
# 2011 Atamert Olcgen <muhuk@muhuk.com>
# 2012 Alireza Savand <alireza.savand@gmail.com>
#
# Small piece of middleware to be able to access authentication data from
# everywhere in the django code.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.fields.related import ForeignKey, ManyToOneRel
from cuser.middleware import CuserMiddleware
if 'cuser' not in settings.INSTALLED_APPS:
raise ValueError("Cuser middleware is not enabled")
# Register fields with south, if installed
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^cuser\.fields\.CurrentUserField"])
class CurrentUserField(ForeignKey):
def __init__(self, to_field=None, rel_class=ManyToOneRel, **kwargs):
self.add_only = kwargs.pop('add_only', False)
kwargs.update({
'editable': False,
'null': True,
'rel_class': rel_class,
'to': User,
'to_field': to_field,
})
super(CurrentUserField, self).__init__(**kwargs)
def pre_save(self, model_instance, add):
if add or not self.add_only:
user = CuserMiddleware.get_user()
if user:
setattr(model_instance, self.attname, user.pk)
return user.pk
return super(CurrentUserField, self).pre_save(model_instance, add)
|
Python
| 0
|
@@ -1829,16 +1829,105 @@
ettings%0A
+%0A%0Aif hasattr(settings, 'AUTH_USER_MODEL'):%0A User = settings.AUTH_USER_MODEL%0Aelse:%0A
from dja
@@ -1962,16 +1962,18 @@
rt User%0A
+%0A%0A
from dja
|
194dd71de22e34e5f262b8fe0735347d6b7f1bd8
|
Support title page (-1)
|
portfolio/pdf-scripts/do-page-generate.py
|
portfolio/pdf-scripts/do-page-generate.py
|
import subprocess
from music21 import *
from pyPdf import PdfFileReader, PdfFileWriter
from reportlab.pdfgen import canvas
from reportlab.lib import pagesizes
from reportlab.lib.units import inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
# some important constants
MUSIC_XML_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-xml\\"
MUSIC_LY_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-ly\\"
MUSIC_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-pdf\\"
PAGENUM_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pagenum-pdf\\"
PAGE_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\page-pdf\\"
OUTPUT_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pdf-output\\"
PATH_TO_CAMBRIA = "C:\\Windows\\Fonts\\CAMBRIA.TTC"
LILYPOND_EXE_LOCATION = r"c:\Program Files (x86)\lilypond\usr\bin\lilypond.exe"
pageNum = 0
pageNumber = str(pageNum)
numOfParts = 1
# generate .ly file in music21
music = converter.parse(MUSIC_XML_DIR + pageNumber + ".xml")
numOfParts = len(music.getElementsByClass(stream.Part))
music.write("lily", MUSIC_LY_DIR + pageNumber + ".ly")
# add styling information to .ly file
outFile = open(MUSIC_LY_DIR + pageNumber + ".ly", "a") # 'a' opens for appending
if numOfParts == 1:
outFile.write(file("ly-one-line.txt","r").read()) # 'r' opens for just reading
else:
outFile.write(file("ly-two-lines.txt","r").read()) # 'r' opens for just reading
outFile.close()
# turn .ly into .pdf
subprocess.call([ #will wait for finish exec
LILYPOND_EXE_LOCATION,
"-o", MUSIC_PDF_DIR,
MUSIC_LY_DIR + pageNumber + ".ly"
])
# merge pages and add page number:
musicLine = PdfFileReader(file(MUSIC_PDF_DIR + pageNumber + ".pdf", "rb"))
page = PdfFileReader(file(PAGE_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(musicLine.getPage(0))
hexPageNumber = str(hex(pageNum))[2:]
pageNumberPdfCanvas = canvas.Canvas(PAGENUM_PDF_DIR + pageNumber + ".pdf", pagesize=pagesizes.letter)
pdfmetrics.registerFont(TTFont("Cambria", PATH_TO_CAMBRIA))
pageNumberPdfCanvas.setFont("Cambria", 12)
if pageNum % 2 == 0: # even pages are on left, so put text on right
widthOfText = pageNumberPdfCanvas.stringWidth(hexPageNumber, "Cambria", 12)
pageNumberPdfCanvas.drawString(inch * 8.5 - inch * .5 - widthOfText, inch * 11 - inch * .5, hexPageNumber)
else: # put number on left
pageNumberPdfCanvas.drawString(inch * .5, inch * 11 - inch * .5, hexPageNumber)
pageNumberPdfCanvas.showPage()
pageNumberPdfCanvas.save()
pageNumberPdf = PdfFileReader(file(PAGENUM_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(pageNumberPdf.getPage(0))
output = PdfFileWriter()
output.addPage(page.getPage(0))
outStream = file(OUTPUT_DIR + pageNumber + ".pdf", "wb")
output.write(outStream)
outStream.close()
|
Python
| 0
|
@@ -2118,16 +2118,95 @@
a%22, 12)%0A
+%0Aif pageNum != -1:%09# title page is -1, and we don't want a page number there.%0A%09
if pageN
@@ -2265,16 +2265,17 @@
n right%0A
+%09
%09widthOf
@@ -2343,16 +2343,17 @@
a%22, 12)%0A
+%09
%09pageNum
@@ -2452,16 +2452,17 @@
Number)%0A
+%09
else:%09#
@@ -2480,16 +2480,17 @@
on left%0A
+%09
%09pageNum
|
f7ce07f6775fd88a0d8e5bf0f980eb22050f2d92
|
Update file_regression.py
|
src/pytest_regressions/file_regression.py
|
src/pytest_regressions/file_regression.py
|
# encoding: UTF-8
from functools import partial
import six
from .common import perform_regression_check, check_text_files
class FileRegressionFixture(object):
"""
Implementation of `file_regression` fixture.
"""
def __init__(self, datadir, original_datadir, request):
"""
:type datadir: Path
:type original_datadir: Path
:type request: FixtureRequest
"""
self.request = request
self.datadir = datadir
self.original_datadir = original_datadir
self.force_regen = False
def check(
self,
contents,
encoding=None,
extension=".txt",
newline=None,
basename=None,
fullpath=None,
binary=False,
obtained_filename=None,
check_fn=None,
):
"""
Checks the contents against a previously recorded version, or generate a new file.
:param str contents: contents to write to the file
:param str|None encoding: Encoding used to write file, if any.
:param str extension: Extension of file.
:param str|None newline: See `io.open` docs.
:param bool binary: If the file is binary or text.
:param obtained_filename: ..see:: FileRegressionCheck
:param check_fn: a function with signature ``(obtained_filename, expected_filename)`` that should raise
AssertionError if both files differ.
If not given, use internal function which compares text using :py:mod:`difflib`.
"""
__tracebackhide__ = True
if binary and encoding:
raise ValueError(
"Only binary ({!r}) or encoding ({!r}) parameters must be passed at the same time.".format(
binary, encoding
)
)
if binary:
assert isinstance(
contents, six.binary_type
), "Expected bytes contents but received type {}".format(
type(contents).__name__
)
else:
assert isinstance(
contents, six.text_type
), "Expected text/unicode contents but received type {}".format(
type(contents).__name__
)
import io
if check_fn is None:
if binary:
def check_fn(obtained_filename, expected_filename):
if obtained_filename.read_bytes() != expected_filename.read_bytes():
raise AssertionError(
"Binary files {} and {} differ.".format(
obtained_filename, expected_filename
)
)
else:
check_fn = partial(check_text_files, encoding=encoding)
def dump_fn(filename):
mode = "wb" if binary else "w"
with io.open(
six.text_type(filename), mode, encoding=encoding, newline=newline
) as f:
f.write(contents)
perform_regression_check(
datadir=self.datadir,
original_datadir=self.original_datadir,
request=self.request,
check_fn=check_fn,
dump_fn=dump_fn,
extension=extension,
basename=basename,
fullpath=fullpath,
force_regen=self.force_regen,
obtained_filename=obtained_filename,
)
# non-PEP 8 alias used internally at ESSS
Check = check
|
Python
| 0.000001
|
@@ -946,30 +946,24 @@
tent
-s
to
-write to the file
+be verified.
%0A
|
761b26d2e259fe5b36ed223fd3b54750a741869b
|
Expand activity list
|
src/sentry/api/endpoints/group_details.py
|
src/sentry/api/endpoints/group_details.py
|
from __future__ import absolute_import, print_function
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.group import GroupEndpoint
from sentry.api.fields import UserField
from sentry.api.serializers import serialize
from sentry.db.models.query import create_or_update
from sentry.constants import STATUS_CHOICES
from sentry.models import (
Activity, Group, GroupAssignee, GroupBookmark, GroupMeta, GroupSeen,
GroupStatus, GroupTagValue
)
from sentry.plugins import plugins
from sentry.utils.safe import safe_execute
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
isBookmarked = serializers.BooleanField()
hasSeen = serializers.BooleanField()
assignedTo = UserField()
class GroupDetailsEndpoint(GroupEndpoint):
doc_section = DocSection.EVENTS
def _get_activity(self, request, group, num=7):
activity_items = set()
activity = []
activity_qs = Activity.objects.filter(
group=group,
).order_by('-datetime').select_related('user')
# we select excess so we can filter dupes
for item in activity_qs[:num * 2]:
sig = (item.event_id, item.type, item.ident, item.user_id)
# TODO: we could just generate a signature (hash(text)) for notes
# so there's no special casing
if item.type == Activity.NOTE:
activity.append(item)
elif sig not in activity_items:
activity_items.add(sig)
activity.append(item)
activity.append(Activity(
project=group.project,
group=group,
type=Activity.FIRST_SEEN,
datetime=group.first_seen,
))
return activity[:num]
def _get_seen_by(self, request, group):
seen_by = sorted([
(gs.user, gs.last_seen)
for gs in GroupSeen.objects.filter(
group=group
).select_related('user')
], key=lambda ls: ls[1], reverse=True)
return [s[0] for s in seen_by]
def _get_actions(self, request, group):
project = group.project
action_list = []
for plugin in plugins.for_project(project, version=1):
results = safe_execute(plugin.actions, request, group, action_list)
if not results:
continue
action_list = results
for plugin in plugins.for_project(project, version=2):
for action in (safe_execute(plugin.get_actions, request, group) or ()):
action_list.append(action)
return action_list
def get(self, request, group):
"""
Retrieve an aggregate
Return details on an individual aggregate.
{method} {path}
"""
GroupMeta.objects.populate_cache([group])
data = serialize(group, request.user)
# TODO: these probably should be another endpoint
activity = self._get_activity(request, group, num=7)
seen_by = self._get_seen_by(request, group)
# find first seen release
try:
first_release = GroupTagValue.objects.filter(
group=group,
key='sentry:release',
).order_by('first_seen')[0]
except IndexError:
first_release = None
else:
first_release = {
'version': first_release.value,
# TODO(dcramer): this should look it up in Release
'dateCreated': first_release.first_seen,
}
action_list = self._get_actions(request, group)
data.update({
'firstRelease': first_release,
'activity': serialize(activity, request.user),
'seenBy': serialize(seen_by, request.user),
'pluginActions': action_list,
})
return Response(data)
def put(self, request, group):
"""
Update an aggregate
Updates an individual aggregate's attributes.
{method} {path}
{{
"status": "resolved"
}}
Attributes:
- status: resolved, unresolved, muted
- hasSeen: true, false
- isBookmarked: true, false
- assignedTo: user
"""
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
# TODO(dcramer): we should allow assignment to anyone who has membership
# even if that membership is not SSO linked
if result.get('assignedTo') and not group.project.member_set.filter(user=result['assignedTo']).exists():
return Response({'detail': 'Cannot assign to non-team member'}, status=400)
if result.get('status') == 'resolved':
now = timezone.now()
group.resolved_at = now
group.status = GroupStatus.RESOLVED
happened = Group.objects.filter(
id=group.id,
).exclude(status=GroupStatus.RESOLVED).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if happened:
create_or_update(
Activity,
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
elif result.get('status'):
group.status = STATUS_CHOICES[result['status']]
group.save()
if result.get('hasSeen'):
instance, created = create_or_update(
GroupSeen,
group=group,
user=request.user,
project=group.project,
defaults={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group=group,
user=request.user,
).delete()
if result.get('isBookmarked'):
GroupBookmark.objects.get_or_create(
project=group.project,
group=group,
user=request.user,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group=group,
user=request.user,
).delete()
if 'assignedTo' in result:
now = timezone.now()
if result['assignedTo']:
assignee, created = GroupAssignee.objects.get_or_create(
group=group,
defaults={
'project': group.project,
'user': result['assignedTo'],
'date_added': now,
}
)
if not created:
affected = GroupAssignee.objects.filter(
group=group,
).exclude(
user=result['assignedTo'],
).update(
user=result['assignedTo'],
date_added=now
)
else:
affected = True
if affected:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.ASSIGNED,
user=request.user,
data={
'assignee': result['assignedTo'].id,
}
)
activity.send_notification()
else:
affected = GroupAssignee.objects.filter(
group=group,
).delete()
if affected:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.UNASSIGNED,
user=request.user,
)
activity.send_notification()
return Response(serialize(group, request.user))
def delete(self, request, group):
"""
Delete an aggregate
Deletes an individual aggregate.
{method} {path}
"""
from sentry.tasks.deletion import delete_group
delete_group.delay(object_id=group.id)
return Response(status=202)
|
Python
| 0.998565
|
@@ -1053,18 +1053,16 @@
oup, num
-=7
):%0A
@@ -3189,9 +3189,11 @@
num=
-7
+100
)%0A
|
107ffa054406cefa71f245045fe0ad140d44cc55
|
Update drinkertest
|
drinkertest.py
|
drinkertest.py
|
from hieretikz import *
import subprocess
from hierarchy import *
formulae = lem, wlem, dp, he, dnsu, dnse, glpo, glpoa, gmp = \
'lem', 'wlem', 'dp', 'he', 'dnsu', 'dnse', 'glpo', 'glpoa', 'gmp'
_______ = None
formula_layout = [
' glpoa ',
' lem glpo ',
' ',
' dp he ',
' gmp ',
' dnsu dnse ',
' wlem ',
]
proofs = {
(lem, wlem): '', # Not yet
(dp, wlem): '',
(he, wlem): '',
(lem, glpo): '',
(glpo, lem): '',
(glpoa, lem): '',
(glpoa, glpo): '',
(dp, dnsu): '',
(glpoa, dnsu): '',
#
(he, dnse): '',
(gmp, dnse): '',
(gmp, dnsu): '',
}
counter_models = {
(dp, he): '',
(he, dp): '',
(lem, dp): '',
(lem, he): '',
(lem, glpoa): '',
(he, dnsu): '',
(dnsu, dp): '',
#
(dp, lem): '',
(he, lem): '',
(dnse, dp): '',
(dp, dnse): '',
}
document = r'''
\documentclass{article}
\usepackage{tikz}
\usepackage{amsmath}
\usepackage{fullpage}
\usepackage{multicol}
\begin{document}
\begin{tikzpicture}[node distance=1 cm, line width=0.3mm, auto]
''' + \
make_tikz(formulae, formula_layout, proofs, counter_models) + \
r'''
\end{tikzpicture}
\paragraph{}
It remains to investigate:
\begin{multicols}{3}
\noindent
''' + \
assist(formulae, formula_layout, proofs, counter_models) + \
r'''
\end{multicols}
\end{document}
'''
with open('drinker.tex', 'w') as f:
f.write(document)
subprocess.Popen(['pdflatex', 'drinker.tex'], stdout=subprocess.DEVNULL)
|
Python
| 0.000001
|
@@ -1640,32 +1640,41 @@
ula_layout,
+set(
proofs
+)
,
+set(
counter_mode
@@ -1668,32 +1668,33 @@
(counter_models)
+)
+ %5C%0Ar'''%0A%5Cend%7Bt
@@ -1821,16 +1821,25 @@
ut,
+set(
proofs
+)
,
+set(
coun
@@ -1849,16 +1849,17 @@
_models)
+)
+ %5C%0Ar''
|
70dce683db055fabab30b7aa90f8ea6cc9441e44
|
Fix minor issue when field_value is no longer a string
|
drip/models.py
|
drip/models.py
|
import six
from datetime import datetime, timedelta
from django.db import models
from django.db.models import F
try:
from django.conf import settings
User = settings.AUTH_USER_MODEL
except AttributeError:
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
# just using this to parse, but totally insane package naming...
# https://bitbucket.org/schinckel/django-timedelta-field/
import timedelta as djangotimedelta
class Drip(models.Model):
date = models.DateTimeField(auto_now_add=True)
lastchanged = models.DateTimeField(auto_now=True)
name = models.CharField(
max_length=255,
unique=True,
verbose_name='Drip Name',
help_text='A unique name for this drip.')
enabled = models.BooleanField(default=False)
from_email = models.EmailField(null=True, blank=True,
help_text='Set a custom from email.')
from_email_name = models.CharField(max_length=150, null=True, blank=True,
help_text="Set a name for a custom from email.")
subject_template = models.TextField(null=True, blank=True)
body_html_template = models.TextField(null=True, blank=True,
help_text='You will have settings and user in the context.')
message_class = models.CharField(max_length=120, blank=True, default='default')
@property
def drip(self):
from drip.drips import DripBase
drip = DripBase(drip_model=self,
name=self.name,
from_email=self.from_email if self.from_email else None,
from_email_name=self.from_email_name if self.from_email_name else None,
subject_template=self.subject_template if self.subject_template else None,
body_template=self.body_html_template if self.body_html_template else None)
return drip
def __unicode__(self):
return self.name
class SentDrip(models.Model):
"""
Keeps a record of all sent drips.
"""
date = models.DateTimeField(auto_now_add=True)
drip = models.ForeignKey('drip.Drip', related_name='sent_drips')
user = models.ForeignKey(User, related_name='sent_drips')
subject = models.TextField()
body = models.TextField()
from_email = models.EmailField(
null=True, default=None # For south so that it can migrate existing rows.
)
from_email_name = models.CharField(max_length=150,
null=True, default=None # For south so that it can migrate existing rows.
)
METHOD_TYPES = (
('filter', 'Filter'),
('exclude', 'Exclude'),
)
LOOKUP_TYPES = (
('exact', 'exactly'),
('iexact', 'exactly (case insensitive)'),
('contains', 'contains'),
('icontains', 'contains (case insensitive)'),
('regex', 'regex'),
('iregex', 'contains (case insensitive)'),
('gt', 'greater than'),
('gte', 'greater than or equal to'),
('lt', 'less than'),
('lte', 'less than or equal to'),
('startswith', 'starts with'),
('endswith', 'starts with'),
('istartswith', 'ends with (case insensitive)'),
('iendswith', 'ends with (case insensitive)'),
)
class QuerySetRule(models.Model):
date = models.DateTimeField(auto_now_add=True)
lastchanged = models.DateTimeField(auto_now=True)
drip = models.ForeignKey(Drip, related_name='queryset_rules')
method_type = models.CharField(max_length=12, default='filter', choices=METHOD_TYPES)
field_name = models.CharField(max_length=128, verbose_name='Field name of User')
lookup_type = models.CharField(max_length=12, default='exact', choices=LOOKUP_TYPES)
field_value = models.CharField(max_length=255,
help_text=('Can be anything from a number, to a string. Or, do ' +
'`now-7 days` or `today+3 days` for fancy timedelta.'))
def clean(self):
# github.com/omab/python-social-auth/commit/d8637cec02422374e4102231488481170dc51057
if isinstance(User, six.string_types):
app_label, model_name = User.split('.')
UserModel = models.get_model(app_label, model_name)
try:
self.apply(UserModel.objects.all())
except Exception as e:
raise ValidationError(
'%s raised trying to apply rule: %s' % (type(e).__name__, e))
@property
def annotated_field_name(self):
field_name = self.field_name
if field_name.endswith('__count'):
agg, _, _ = field_name.rpartition('__')
field_name = 'num_%s' % agg.replace('__', '_')
return field_name
def apply_any_annotation(self, qs):
if self.field_name.endswith('__count'):
field_name = self.annotated_field_name
agg, _, _ = self.field_name.rpartition('__')
qs = qs.annotate(**{field_name: models.Count(agg, distinct=True)})
return qs
def filter_kwargs(self, qs, now=datetime.now):
# Support Count() as m2m__count
field_name = self.annotated_field_name
field_name = '__'.join([field_name, self.lookup_type])
field_value = self.field_value
# set time deltas and dates
if field_value.startswith('now-'):
field_value = self.field_value.replace('now-', '')
field_value = now() - djangotimedelta.parse(field_value)
elif field_value.startswith('now+'):
field_value = self.field_value.replace('now+', '')
field_value = now() + djangotimedelta.parse(field_value)
elif field_value.startswith('today-'):
field_value = self.field_value.replace('today-', '')
field_value = now().date() - djangotimedelta.parse(field_value)
elif field_value.startswith('today+'):
field_value = self.field_value.replace('today+', '')
field_value = now().date() + djangotimedelta.parse(field_value)
# F expressions
if field_value.startswith('F_'):
field_value = self.field_value.replace('F_', '')
field_value = F(field_value)
# set booleans
if field_value == 'True':
field_value = True
if field_value == 'False':
field_value = False
kwargs = {field_name: field_value}
return kwargs
def apply(self, qs, now=datetime.now):
kwargs = self.filter_kwargs(qs, now)
qs = self.apply_any_annotation(qs)
if self.method_type == 'filter':
return qs.filter(**kwargs)
elif self.method_type == 'exclude':
return qs.exclude(**kwargs)
# catch as default
return qs.filter(**kwargs)
|
Python
| 0.000007
|
@@ -79,39 +79,8 @@
dels
-%0Afrom django.db.models import F
%0A%0Atr
@@ -5886,32 +5886,37 @@
ions%0A if
+self.
field_value.star
@@ -6016,16 +6016,23 @@
value =
+models.
F(field_
|
cd87391b989c04f39fe8d7dfae540c53222a8ab0
|
Simplify scoring
|
dwi/autoroi.py
|
dwi/autoroi.py
|
"""Automatic ROI search."""
ADCM_MIN = 0.00050680935535585281
ADCM_MAX = 0.0017784125828491648
import numpy as np
import dwi.util
def get_score_param(img, param):
"""Return parameter score of given ROI."""
if param.startswith('ADC'):
#r = 1-np.mean(img)
r = 1./(np.mean(img)-0.0008)
#if np.min(img) < 0.0002:
# r = 0
#if (img < ADCM_MIN).any() or (img > ADCM_MAX).any():
# r = 0
elif param.startswith('K'):
r = np.mean(img)/1000
elif param.startswith('score'):
r = np.mean(img)
elif param == 'prostate_mask':
# Ban areas more than a certain amount outside of prostate.
if float(img.sum())/img.size > 0.20:
r = 0
else:
r = -np.inf
else:
r = 0 # Unknown parameter
return r
def get_score(img, params):
"""Return total score of given ROI."""
scores = [get_score_param(img[...,i], p) for i, p in enumerate(params)]
r = sum(scores)
return r
def get_roi_scores(img, d, params):
"""Return array of all scores for each possible ROI of given dimension."""
scores_shape = tuple([img.shape[i]-d[i]+1 for i in range(3)])
scores = np.zeros(scores_shape)
scores.fill(np.nan)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
for k in range(scores.shape[2]):
z = (i, i+d[0])
y = (j, j+d[1])
x = (k, k+d[2])
roi = img[z[0]:z[1], y[0]:y[1], x[0]:x[1], :]
scores[i,j,k] = get_score(roi, params)
return scores
def get_scoremap(img, d, params, n_rois):
"""Return array like original image, with scores of n_rois best ROI's."""
scores = get_roi_scores(img, d, params)
indices = scores.ravel().argsort()[::-1] # Sort ROI's by descending score.
indices = indices[0:n_rois] # Select best ones.
indices = [np.unravel_index(i, scores.shape) for i in indices]
scoremap = np.zeros(img.shape[0:3] + (1,))
for z, y, x in indices:
scoremap[z:z+d[0], y:y+d[1], x:x+d[2], 0] += scores[z,y,x]
return scoremap
def find_roi(img, roidim, params, prostate_mask=None):
#dims = [(1,1,1)]
dims = [(2,i,i) for i in range(5, 10)]
dims += [(3,i,i) for i in range(5, 10)]
#dims = dwi.util.combinations([range(2, 4), range(5, 10), range(5, 10)])
#print dims
n_rois = 1000
if prostate_mask:
# Add mask to image as an extra parameter.
mask = prostate_mask.array.view()
mask.shape += (1,)
img = np.concatenate((img, mask), axis=3)
params = params + ['prostate_mask']
scoremaps = [get_scoremap(img, d, params, n_rois) for d in dims]
sum_scoremaps = sum(scoremaps)
roimap = get_scoremap(sum_scoremaps, roidim, ['score'], 1)
# Get first nonzero position at each axis.
corner = [axis[0] for axis in roimap[...,0].nonzero()]
# Convert to [(start, stop), ...] notation.
coords = [(x, x+d) for x, d in zip(corner, roidim)]
d = dict(sum_scoremaps=sum_scoremaps, roi_corner=corner, roi_coords=coords)
return d
|
Python
| 0.000088
|
@@ -271,24 +271,25 @@
mg)%0A
+#
r = 1./(np.m
@@ -305,16 +305,44 @@
0.0008)%0A
+ r = 1./np.mean(img)%0A
|
e6f477088dd3a32c389871a78d09effca269c017
|
Remove unused import and variable from base_templates for GCI.
|
app/soc/modules/gci/views/base_templates.py
|
app/soc/modules/gci/views/base_templates.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the view for the site menus."""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.api import users
from soc.views.template import Template
from soc.views.base_templates import LoggedInMsg
def siteMenuContext(data):
"""Generates URL links for the hard-coded GCI site menu items.
"""
redirect = data.redirect
program = data.program
from soc.modules.gci.models.program import GCIProgram
about_page = GCIProgram.about_page.get_value_for_datastore(program)
connect = GCIProgram.connect_with_us_page.get_value_for_datastore(program)
help_page = GCIProgram.help_page.get_value_for_datastore(program)
terms = GCIProgram.terms_and_conditions.get_value_for_datastore(program)
context = {
'about_link': redirect.document(about_page).url(),
'terms_link': redirect.document(terms).url(),
'events_link': redirect.events().url(),
'connect_link': redirect.document(connect).url(),
'help_link': redirect.document(help_page).url(),
}
if users.get_current_user():
context['logout_link'] = redirect.logout().url()
else:
context['login_link'] = redirect.login().url()
if data.user:
context['dashboard_link'] = redirect.dashboard().url()
if data.timeline.tasksPubliclyVisible():
context['tasks_link'] = ''
return context
class Header(Template):
"""MainMenu template.
"""
def __init__(self, data):
self.data = data
def templatePath(self):
return "v2/modules/gci/_header.html"
def context(self):
return {
'home_link': self.data.redirect.homepage().url(),
# TODO(SRabbelier): make this dynamic somehow
'gsoc_link': '/gsoc/homepage/google/gsoc2011',
}
class MainMenu(Template):
"""MainMenu template.
"""
def __init__(self, data):
self.data = data
def context(self):
context = siteMenuContext(self.data)
context.update({
'home_link': self.data.redirect.homepage().url(),
})
if self.data.profile:
self.data.redirect.program()
if self.data.profile.status == 'active':
context['profile_link'] = self.data.redirect.urlOf('edit_gci_profile')
# Add org admin dashboard link if the user has active
# org admin profile and is an org admin of some organization
if self.data.is_org_admin:
context['org_dashboard_link'] = self.data.redirect.urlOf(
'gci_org_dashboard')
else:
context['profile_link'] = self.data.redirect.urlOf('show_gci_profile')
if self.data.is_host:
self.data.redirect.program()
context['admin_link'] = self.data.redirect.urlOf('gci_admin_dashboard')
return context
def templatePath(self):
return "v2/modules/gci/_mainmenu.html"
class Footer(Template):
"""Footer template.
"""
def __init__(self, data):
self.data = data
def context(self):
context = siteMenuContext(self.data)
redirect = self.data.redirect
program = self.data.program
context.update({
'privacy_policy_link': program.privacy_policy_url,
'blogger_link': program.blogger,
'email_id': program.email,
'irc_link': program.irc,
})
return context
def templatePath(self):
return "v2/modules/gci/_footer.html"
class Status(Template):
"""Template to render the status block.
"""
def __init__(self, data):
self.data = data
def context(self):
return {
'user_email': self.data.user.account.email(),
'logout_link': self.data.redirect.logout().url(),
'dashboard_link': self.data.redirect.dashboard().url()
}
def templatePath(self):
return "v2/modules/gci/_status_block.html"
|
Python
| 0
|
@@ -900,57 +900,8 @@
ate%0A
-from soc.views.base_templates import LoggedInMsg%0A
%0A%0Ade
@@ -3582,42 +3582,8 @@
ta)%0A
- redirect = self.data.redirect%0A
|
a400ad0efd91550952f49598a0d5471839d36ce5
|
Refactor the Program project list to use the new Project model and its logic function.
|
app/soc/modules/gsoc/views/projects_list.py
|
app/soc/modules/gsoc/views/projects_list.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the views for listing all the projects accepted
into a GSoC program.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from django.conf.urls.defaults import url
from soc.logic.exceptions import AccessViolation
from soc.views.template import Template
from soc.modules.gsoc.logic.models.student_project import logic as sp_logic
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import lists
from soc.modules.gsoc.views.helper import url_patterns
class ProjectList(Template):
"""Template for listing the student projects accepted in the program.
"""
def __init__(self, request, data):
self.request = request
self.data = data
list_config = lists.ListConfiguration()
list_config.addColumn('student', 'Student',
lambda entity, *args: entity.student.name())
list_config.addSimpleColumn('title', 'Title')
list_config.addColumn('org', 'Organization',
lambda entity, *args: entity.scope.name)
list_config.addColumn('mentor', 'Mentor',
lambda entity, *args: entity.mentor.name())
list_config.setDefaultPagination(False)
list_config.setDefaultSort('student')
self._list_config = list_config
def context(self):
list = lists.ListConfigurationResponse(
self.data, self._list_config, idx=0,
description='List of projects accepted into %s' % (
self.data.program.name))
return {
'lists': [list],
}
def getListData(self):
"""Returns the list data as requested by the current request.
If the lists as requested is not supported by this component None is
returned.
"""
idx = lists.getListIndex(self.request)
if idx == 0:
fields = {'program': self.data.program,
'status': 'accepted'}
response_builder = lists.QueryContentResponseBuilder(
self.request, self._list_config, sp_logic,
fields, prefetch=['student', 'scope', 'mentor'])
return response_builder.build()
else:
return None
def templatePath(self):
return "v2/modules/gsoc/projects_list/_project_list.html"
class ListProjects(RequestHandler):
"""View methods for listing all the projects accepted into a program.
"""
def templatePath(self):
return 'v2/modules/gsoc/projects_list/base.html'
def djangoURLPatterns(self):
"""Returns the list of tuples for containing URL to view method mapping.
"""
return [
url(r'^gsoc/projects/list/%s$' % url_patterns.PROGRAM, self,
name='gsoc_accepted_projects')
]
def checkAccess(self):
"""Access checks for the view.
"""
self.check.acceptedStudentsAnnounced()
def jsonContext(self):
"""Handler for JSON requests.
"""
list_content = ProjectList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation(
'You do not have access to this data')
return list_content.content()
def context(self):
"""Handler for GSoC Accepted Projects List page HTTP get request.
"""
program = self.data.program
return {
'page_name': '%s - Accepted Projects' % program.short_name,
'program_name': program.name,
'project_list': ProjectList(self.request, self.data),
}
|
Python
| 0
|
@@ -931,56 +931,96 @@
ogic
-.models.student_project import logic as sp_logic
+ import project as project_logic%0Afrom soc.modules.gsoc.models.project import GSoCProject
%0Afro
@@ -1370,16 +1370,38 @@
= data%0A%0A
+ r = data.redirect%0A
list
@@ -1539,23 +1539,24 @@
entity.
-stud
+par
ent
+()
.name())
@@ -1710,21 +1710,19 @@
entity.
-scope
+org
.name)%0A
@@ -1922,16 +1922,183 @@
udent')%0A
+ list_config.setRowAction(lambda e, *args, **kwargs:%0A r.project(id=e.key().id_or_name(), student=e.parent().link_id).%0A urlOf('gsoc_project_details'))%0A
self
@@ -2640,28 +2640,70 @@
-fields
+list_query
=
-%7B'
pro
-gram':
+ject_logic.getAcceptedProjectsQuery(%0A
sel
@@ -2720,47 +2720,171 @@
gram
-,
+)%0A
%0A
- 'status': 'accepted'%7D
+starter = lists.keyStarter%0A prefetcher = lists.modelPrefetcher(GSoCProject, %5B'org', 'mentor'%5D,%0A parent=True)%0A
%0A
@@ -2911,16 +2911,19 @@
= lists.
+Raw
QueryCon
@@ -2990,16 +2990,18 @@
ig,
-sp_logic
+list_query
,%0A
@@ -3012,14 +3012,15 @@
-fields
+starter
, pr
@@ -3029,39 +3029,21 @@
etch
-=%5B'student', 'scope', 'mentor'%5D
+er=prefetcher
)%0A
|
23f87565a9074b7d6bd5e45b1ce8686ba49d3ce8
|
Update nsoltSynthesis2dNetwork.py
|
appendix/pytorch/nsoltSynthesis2dNetwork.py
|
appendix/pytorch/nsoltSynthesis2dNetwork.py
|
import torch
import torch.nn as nn
from nsoltBlockIdct2dLayer import NsoltBlockIdct2dLayer
from nsoltFinalRotation2dLayer import NsoltFinalRotation2dLayer
class NsoltSynthesis2dNetwork(nn.Module):
def __init__(self,
number_of_channels=[],
decimation_factor=[]):
super(NsoltSynthesis2dNetwork, self).__init__()
self.number_of_channels = number_of_channels
self.decimation_factor = decimation_factor
# Instantiation of layers
self.layerV0 = NsoltFinalRotation2dLayer(
number_of_channels=number_of_channels,
decimation_factor=decimation_factor,
name='V0'
)
self.layerE0 = NsoltBlockIdct2dLayer(
decimation_factor=decimation_factor,
name='E0'
)
def forward(self,x):
u = self.layerV0.forward(x)
y = self.layerE0.forward(u)
return y
|
Python
| 0.000001
|
@@ -85,16 +85,17 @@
t2dLayer
+
%0D%0Afrom n
@@ -154,16 +154,128 @@
Layer %0D%0A
+from nsoltLayerExceptions import InvalidNumberOfChannels, InvalidPolyPhaseOrder, InvalidNumberOfVanishingMoments
%0D%0A%0D%0Aclas
@@ -403,66 +403,532 @@
r=%5B%5D
-):%0D%0A super(NsoltSynthesis2dNetwork, self).__init__(
+,%0D%0A polyphase_order=%5B0,0%5D,%0D%0A number_of_vanishing_moments=1):%0D%0A super(NsoltSynthesis2dNetwork, self).__init__()%0D%0A %0D%0A # Check and set parameters%0D%0A # # of channels%0D%0A if number_of_channels%5B0%5D != number_of_channels%5B1%5D:%0D%0A raise InvalidNumberOfChannels(%0D%0A '%5B%25d %25d%5D : Currently, Type-I NSOLT is only suported, where the symmetric and antisymmetric channel numbers should be the same.'%5C%0D%0A %25(number_of_channels%5B0%5D,number_of_channels%5B1%5D)
)%0D%0A
@@ -972,32 +972,70 @@
er_of_channels%0D%0A
+ %0D%0A # Decimaton factor%0D%0A
self.dec
@@ -1072,32 +1072,783 @@
ctor%0D%0A %0D%0A
+ # Polyphase order%0D%0A if any(torch.tensor(polyphase_order)%252):%0D%0A raise InvalidPolyPhaseOrder(%0D%0A '%25d + %25d : Currently, even polyphase orders are only supported.'%5C%0D%0A %25(polyphase_order%5B0%5D,polyphase_order%5B1%5D))%0D%0A self.polyphase_order = polyphase_order%0D%0A %0D%0A # # of vanishing moments%0D%0A if number_of_vanishing_moments %3C 0 %5C%0D%0A or number_of_vanishing_moments %3E 1:%0D%0A raise InvalidNumberOfVanishingMoments(%0D%0A '%25d : The number of vanishing moment must be either of 0 or 1.'%5C%0D%0A %25(number_of_vanishing_moments))%0D%0A self.number_of_vanishing_moments = number_of_vanishing_moments%0D%0A %0D%0A %0D%0A
# Instan
@@ -1886,16 +1886,17 @@
.layerV0
+T
= Nsolt
@@ -2040,16 +2040,17 @@
name='V0
+~
'%0D%0A
@@ -2075,16 +2075,17 @@
.layerE0
+T
= Nsolt
@@ -2173,16 +2173,17 @@
name='E0
+~
'%0D%0A
@@ -2240,16 +2240,17 @@
.layerV0
+T
.forward
@@ -2278,16 +2278,17 @@
.layerE0
+T
.forward
|
0d056fefa1896a1e4d17b56f0e84dae106c17c57
|
fix bug
|
meta/api/views.py
|
meta/api/views.py
|
from django.conf.urls import patterns, url
from django.shortcuts import render
from django.http import Http404
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sites.shortcuts import get_current_site
import requests
from .forms import UploadImageForm
from .models import Picture
# Create your views here.
@csrf_exempt
def image_handler(request):
possibe_food = set([
'apple',
'banana',
'carrot',
'broccoli'
])
if request.method != 'POST':
raise Http404('wrong method')
else:
form = UploadImageForm(request.POST, request.FILES)
current_site = get_current_site(request)
print current_site
if form.is_valid():
newpic = Picture(image=request.FILES['image'])
newpic.save()
auth = ('acc_2569f28daa2ca36', '5f3d54692a4dcdeda460024d50505ecd')
image_path = \
'http://' + str(current_site) + '/media/' + str(newpic.image.name)
r_url = 'https://api.imagga.com/v1/tagging?url=' + image_path
r = requests.get(r_url, auth=auth)
if r.status_code < 400:
data = r.json()
print data
foods = data['results'][0]['tags']
for food in foods:
if food['tag'] in possibe_food:
return JsonResponse({'food': food['tag']})
return JsonResponse({'food': foods[0]['tag']})
else:
raise Http404('Imagga error occured')
raise Http404('Unknown error occured')
def recipe_handler(request):
if request.method != 'POST':
return Http404('wrong method')
|
Python
| 0.000001
|
@@ -501,16 +501,32 @@
roccoli'
+,%0A 'pear'
%0A %5D)%0A
|
421ef714711ceda42877b1e0bd171ffc88ad911d
|
Update Python requirement.
|
postgresql/release/distutils.py
|
postgresql/release/distutils.py
|
##
# .release.distutils - distutils data
##
"""
Python distutils data provisions module.
For sub-packagers, the `prefixed_packages` and `prefixed_extensions` functions
should be of particular interest. If the distribution including ``py-postgresql``
uses the standard layout, chances are that `prefixed_extensions` and
`prefixed_packages` will supply the appropriate information by default as they
use `default_prefix` which is derived from the module's `__package__`.
"""
import sys
import os
from ..project import version, name, identity as url
try:
from setuptools import Extension, Command
except ImportError as e:
from distutils.core import Extension, Command
LONG_DESCRIPTION = """
.. warning::
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
py-postgresql is a set of Python modules providing interfaces to various parts
of PostgreSQL. Primarily, it provides a pure-Python driver with some C optimizations for
querying a PostgreSQL database.
http://github.com/python-postgres/fe
Features:
* Prepared Statement driven interfaces.
* Cluster tools for creating and controlling a cluster.
* Support for most PostgreSQL types: composites, arrays, numeric, lots more.
* COPY support.
Sample PG-API Code::
>>> import postgresql
>>> db = postgresql.open('pq://user:password@host:port/database')
>>> db.execute("CREATE TABLE emp (emp_first_name text, emp_last_name text, emp_salary numeric)")
>>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2, $3)")
>>> make_emp("John", "Doe", "75,322")
>>> with db.xact():
... make_emp("Jane", "Doe", "75,322")
... make_emp("Edward", "Johnson", "82,744")
...
There is a DB-API 2.0 module as well::
postgresql.driver.dbapi20
However, PG-API is recommended as it provides greater utility.
Once installed, try out the ``pg_python`` console script::
$ python3 -m postgresql.bin.pg_python -h localhost -p port -U theuser -d database_name
If a successful connection is made to the remote host, it will provide a Python
console with the database connection bound to the `db` name.
"""
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: MIT License',
'License :: OSI Approved :: Attribution Assurance License',
'License :: OSI Approved :: Python Software Foundation License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Database',
]
subpackages = [
'bin',
'encodings',
'lib',
'protocol',
'driver',
'test',
'documentation',
'python',
'port',
'release',
# Modules imported from other packages.
'resolved',
'types',
'types.io',
]
extensions_data = {
'port.optimized' : {
'sources' : [os.path.join('port', '_optimized', 'module.c')],
},
}
subpackage_data = {
'lib' : ['*.sql'],
'documentation' : ['*.txt']
}
try:
# :)
if __package__ is not None:
default_prefix = __package__.split('.')[:-1]
else:
default_prefix = __name__.split('.')[:-2]
except NameError:
default_prefix = ['postgresql']
def prefixed_extensions(
prefix = default_prefix,
extensions_data = extensions_data,
) -> [Extension]:
"""
Generator producing the `distutils` `Extension` objects.
"""
pkg_prefix = '.'.join(prefix) + '.'
path_prefix = os.path.sep.join(prefix)
for mod, data in extensions_data.items():
yield Extension(
pkg_prefix + mod,
[os.path.join(path_prefix, src) for src in data['sources']],
libraries = data.get('libraries', ()),
optional = True,
)
def prefixed_packages(
prefix = default_prefix,
packages = subpackages,
):
"""
Generator producing the standard `package` list prefixed with `prefix`.
"""
prefix = '.'.join(prefix)
yield prefix
prefix = prefix + '.'
for pkg in packages:
yield prefix + pkg
def prefixed_package_data(
prefix = default_prefix,
package_data = subpackage_data,
):
"""
Generator producing the standard `package` list prefixed with `prefix`.
"""
prefix = '.'.join(prefix)
prefix = prefix + '.'
for pkg, data in package_data.items():
yield prefix + pkg, data
def standard_setup_keywords(build_extensions = True, prefix = default_prefix):
"""
Used by the py-postgresql distribution.
"""
d = {
'name' : name,
'version' : version,
'description' : 'PostgreSQL driver and tools library.',
'long_description' : LONG_DESCRIPTION,
'long_description_content_type' : 'text/x-rst',
'author' : 'James William Pye',
'author_email' : 'james.pye@gmail.com',
'maintainer' : 'James William Pye',
'maintainer_email' : 'james.pye@gmail.com',
'url' : url,
'classifiers' : CLASSIFIERS,
'packages' : list(prefixed_packages(prefix = prefix)),
'package_data' : dict(prefixed_package_data(prefix = prefix)),
'cmdclass': dict(test=TestCommand),
'python_requires': '>=3.3',
}
if build_extensions:
d['ext_modules'] = list(prefixed_extensions(prefix = prefix))
return d
class TestCommand(Command):
description = "run tests"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import unittest
unittest.main(module='postgresql.test.testall', argv=('setup.py',))
|
Python
| 0
|
@@ -5265,17 +5265,17 @@
': '%3E=3.
-3
+8
',%0A%09%7D%0A%09i
|
afebd530cb19196d2101b91cee59011c770b9709
|
fix bug
|
meta/api/views.py
|
meta/api/views.py
|
from django.conf.urls import patterns, url
from django.shortcuts import render
from django.http import Http404
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sites.shortcuts import get_current_site
import requests
from .forms import UploadImageForm
from .models import Picture
# Create your views here.
@csrf_exempt
def image_handler(request):
possibe_food = set([
'apple',
'banana',
'carrot',
'broccoli',
'pear',
'watermelon'
])
if request.method != 'POST':
raise Http404('wrong method')
else:
form = UploadImageForm(request.POST, request.FILES)
current_site = get_current_site(request)
print current_site
if form.is_valid():
newpic = Picture(image=request.FILES['image'])
newpic.save()
auth = ('acc_2569f28daa2ca36', '5f3d54692a4dcdeda460024d50505ecd')
image_path = \
'http://' + str(current_site) + '/media/' + str(newpic.image.name)
r_url = 'https://api.imagga.com/v1/tagging?url=' + image_path
r = requests.get(r_url, auth=auth)
if r.status_code < 400:
data = r.json()
print data
foods = data['results'][0]['tags']
for food in foods:
if food['tag'] in possibe_food:
return JsonResponse({'food': food['tag']})
return JsonResponse({'food': foods[0]['tag']})
else:
raise Http404('Imagga error occured')
raise Http404('Unknown error occured')
def recipe_handler(request):
if request.method != 'GET':
return Http404('wrong method')
query = request.GET['id']
if query:
r_url = 'http://api.bigoven.com/recipe/%s?api_key=dvx4Bf83RbNOha0Re4c8ZYaTAe0X3hRj' % str(query)
r = requests.get(r_url, headers={"Accept": "application/json"})
if r.status_code < 400:
recipe = r.json()
processed_results = {}
if 'Instructions' not in recipe:
return JsonResponse({'error': 'Instructions not found'})
else:
instruction = recipe['Instructions'].replace('\n', ' ').replace('\r', '')
instructions = instruction.split('.')
instructions = map(
lambda sentence: sentence.strip(),
instructions
)
instructions = filter(
lambda s: not s.isspace(),
instructions
)
processed_results['Instructions'] = instructions
processed_results['Ingredient'] = map(
lambda ingredient: ingredient['Name'],
recipe['Ingredients']
)
return JsonResponse(processed_results)
raise Http404('Unknown error occured')
def list_handler(request):
if request.method != 'GET':
return Http404('Wrong method')
query = request.GET['name']
if query:
r_url = 'http://api.bigoven.com/recipes?title_kw=%s&api_key=dvx4Bf83RbNOha0Re4c8ZYaTAe0X3hRj&pg=1&rpp=3' % query
r = requests.get(r_url, headers={"Accept": "application/json"})
if r.status_code < 400:
results = r.json()['Results']
processed_results = map(
lambda recipe: {'title': recipe['Title'], 'id': recipe['RecipeID']},
results
)
return JsonResponse({'result': processed_results})
return Http404('Unknown error occured')
|
Python
| 0.000001
|
@@ -2600,16 +2600,22 @@
sspace()
+ and s
,%0A
|
8ba569e719ccf552c8ff3e1a1d64ccd23d5aa4ff
|
move debug log to main process, fix nb_epoch
|
minos/train/trainer.py
|
minos/train/trainer.py
|
'''
Created on Feb 12, 2017
@author: julien
'''
import logging
from multiprocessing import Queue, Process
from threading import Thread
from time import time
import traceback
from minos.experiment.training import EpochStoppingCondition,\
AccuracyDecreaseStoppingCondition, AccuracyDecreaseStoppingConditionWrapper
from minos.train.utils import is_gpu_device, get_device_idx, get_logical_device
from minos.utils import disable_sysout
class MultiProcessModelTrainer(object):
def __init__(self, batch_iterator, test_batch_iterator, environment):
self.batch_iterator = batch_iterator
self.test_batch_iterator = test_batch_iterator
self.environment = environment
def build_and_train_models(self, blueprints):
logging.debug('Training %d models' % len(blueprints))
return self._start_training_workers(blueprints)
def _start_training_workers(self, blueprints):
try:
total_n_jobs = sum(self.environment.n_jobs)
work_queue = Queue(total_n_jobs)
result_queue = Queue(total_n_jobs)
self.processes = [
Process(
target=model_training_worker,
args=(
self.batch_iterator,
self.test_batch_iterator,
device_id,
device,
work_queue,
result_queue))
for device_id, device in enumerate(self.environment.devices)
for _job in range(self.environment.n_jobs[device_id])]
self.process_count = 0
for process in self.processes:
self.process_count += 1
process.start()
def _work_feeder():
count = len(blueprints)
for i, blueprint in enumerate(blueprints):
work_queue.put((i, count, blueprint))
for _ in range(sum(self.environment.n_jobs)):
work_queue.put(None)
Thread(target=_work_feeder).start()
results = []
while self.process_count > 0:
result = result_queue.get()
if result:
results.append(result)
else:
self.process_count -= 1
results = list(
sorted(
results,
key=lambda e: e[0]))
return results
except Exception as ex:
logging.error(ex)
logging.error(traceback.format_exc())
class ModelTrainer(object):
def __init__(self, batch_iterator, test_batch_iterator):
from minos.model.build import ModelBuilder
self.model_builder = ModelBuilder()
self.batch_iterator = batch_iterator
self.test_batch_iterator = test_batch_iterator
def train(self, blueprint, device_id, device):
try:
model = self.model_builder.build(
blueprint,
get_logical_device(device))
except Exception as ex:
return 0, blueprint, 0, device_id
try:
disable_sysout()
self._setup_tf(device)
nb_epoch, stopping_callbacks = self._get_stopping_parameters(blueprint)
start = time()
history = model.fit_generator(
self.batch_iterator,
self.batch_iterator.samples_per_epoch,
nb_epoch,
callbacks=stopping_callbacks,
validation_data=self.test_batch_iterator,
nb_val_samples=self.test_batch_iterator.sample_count)
score = model.evaluate_generator(
self.test_batch_iterator,
val_samples=self.test_batch_iterator.sample_count)
return score[1], history.epoch[-1], blueprint, (time() - start), device_id
except Exception as ex:
logging.error(ex)
logging.error(traceback.format_exc())
return 0, 0, blueprint, 0, device_id
def _get_stopping_parameters(self, blueprint):
if isinstance(blueprint.training.stopping, EpochStoppingCondition):
nb_epoch = blueprint.training.stopping.epoch
stopping_callbacks = []
if isinstance(blueprint.training.stopping, AccuracyDecreaseStoppingCondition):
nb_epoch = blueprint.training.stopping.max_epoch
stopping_callbacks = [
AccuracyDecreaseStoppingConditionWrapper(blueprint.training.stopping)]
return nb_epoch, stopping_callbacks
def _setup_tf(self, device):
import tensorflow as tf
config = tf.ConfigProto()
if is_gpu_device(device):
config.gpu_options.visible_device_list = str(
get_device_idx(device))
config.gpu_options.allow_growth = True
elif hasattr(config, 'gpu_options'):
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = ''
from keras import backend
backend.set_session(tf.Session(config=config))
def model_training_worker(batch_iterator, test_batch_iterator,
device_id, device, work_queue, result_queue):
model_trainer = ModelTrainer(
batch_iterator,
test_batch_iterator)
work = work_queue.get()
while work:
try:
idx, total, blueprint = work
logging.debug('Processing blueprint %d/%d', idx, total)
result = model_trainer.train(
blueprint,
device_id,
device)
logging.debug(
'Blueprint %d: score %f after %d epochs',
idx,
result[0],
result[1])
result_queue.put([idx] + list(result))
work = work_queue.get()
except Exception as ex:
logging.error(ex)
result_queue.put(None)
|
Python
| 0
|
@@ -2217,16 +2217,222 @@
result:%0A
+ logging.debug(%0A 'Blueprint %25d: score %25f after %25d epochs',%0A result%5B0%5D,%0A result%5B1%5D,%0A result%5B2%5D)%0A
@@ -4585,32 +4585,127 @@
nb_epoch =
+ max(%0A 1,%0A blueprint.training.stopping.min_epoch,%0A
blueprint.train
@@ -4722,24 +4722,25 @@
ng.max_epoch
+)
%0A
@@ -5744,76 +5744,8 @@
ork%0A
- logging.debug('Processing blueprint %25d/%25d', idx, total)%0A
@@ -5864,168 +5864,8 @@
ce)%0A
- logging.debug(%0A 'Blueprint %25d: score %25f after %25d epochs',%0A idx,%0A result%5B0%5D,%0A result%5B1%5D)%0A
|
3400dc215ecd8f06a00d68ddcd9dda54d837a040
|
tweak unionfs config wording.
|
mint/buildtemplates.py
|
mint/buildtemplates.py
|
# Copyright (c) 2004-2006 rPath, Inc.
#
# All Rights Reserved
import sys
from mint.data import RDT_STRING, RDT_BOOL, RDT_INT, RDT_ENUM, RDT_TROVE
from mint import buildtypes
class BuildOption(tuple):
def __new__(self):
return tuple.__new__(tuple, (self.type, self.default, self.prompt))
class StringOption(BuildOption):
type = RDT_STRING
class IntegerOption(BuildOption):
type = RDT_INT
class BooleanOption(BuildOption):
type = RDT_BOOL
class TroveOption(BuildOption):
type = RDT_TROVE
class EnumOption(BuildOption):
type = RDT_ENUM
def __new__(self):
return tuple.__new__(tuple, (self.type, self.default, self.prompt, self.options))
optionNameMap = {
'anacondaCustomTrove': 'anaconda-custom',
'anacondaTemplatesTrove': 'anaconda-templates',
'mediaTemplateTrove': 'media-template',
}
class Template(dict):
def __init__(self):
for option in self.__slots__:
newOption = optionNameMap.get(option, option)
dict.__setitem__(self, newOption,
sys.modules[__name__].__dict__[option]())
# *** Extremely Important ***
# Changing the names or semantic meanings of option classes or templates is
# the same thing as making a schema upgrade! do not do this lightly.
###
# Option Classes
###
class bugsUrl(StringOption):
default = 'http://issues.rpath.com/'
prompt = 'Bug report URL'
class installLabelPath(StringOption):
default = ''
prompt = 'Custom Conary installLabelPath setting (leave blank for default)'
class autoResolve(BooleanOption):
default = False
prompt = 'Automatically install required dependencies during updates'
class baseFileName(StringOption):
default = ''
prompt = 'Custom output filename prefix (replaces name-version-arch)'
class showMediaCheck(BooleanOption):
default = False
prompt = 'Prompt to verify CD/DVD images during install'
class betaNag(BooleanOption):
default = False
prompt = 'This build is considered a beta'
class maxIsoSize(EnumOption):
default = '681574400'
prompt = 'ISO Size'
options = buildtypes.discSizes
class freespace(IntegerOption):
default = 250
prompt = 'How many MB of free space should be allocated in the image?'
class swapSize(IntegerOption):
default = 128
prompt = 'How many MB swap space should be reserved in this image?'
class vmMemory(IntegerOption):
default = 256
prompt = 'How much memory should VMware use when running this image?'
class unionfs(BooleanOption):
default = False
prompt = "Enable copy-on-write for entire filesystem. To use this, your group must contain the unionfs kernel module. (unionfs is available in contrib (unsupported). The unionfs module you use must match your kernel version.)"
class zisofs(BooleanOption):
default = True
prompt = 'Compress filesystem'
class boolArg(BooleanOption):
default = False
prompt = 'Garbage Boolean'
class stringArg(StringOption):
default = ''
prompt = 'Garbage String'
class intArg(IntegerOption):
default = 0
prompt = 'Garbage Integer'
class enumArg(EnumOption):
default = '2'
prompt = 'Garbage Enum'
options = {'foo' : '0', 'bar': '1', 'baz': '2'}
class mediaTemplateTrove(TroveOption):
default = ''
prompt = 'Version of media-template to use when creating this image'
class anacondaCustomTrove(TroveOption):
default = ''
prompt = 'Version of anaconda-custom to use when creating this image'
class anacondaTemplatesTrove(TroveOption):
default = ''
prompt = 'Version of anaconda-templates to use when creating this image'
###
# Templates
# classes must end with 'Template' to be properly processed.
###
class StubImageTemplate(Template):
__slots__ = ['boolArg', 'stringArg', 'intArg', 'enumArg']
id = buildtypes.STUB_IMAGE
class RawHdTemplate(Template):
__slots__ = ['autoResolve', 'freespace', 'baseFileName',
'installLabelPath', 'swapSize']
id = buildtypes.RAW_HD_IMAGE
class RawFsTemplate(Template):
__slots__ = ['autoResolve', 'freespace', 'baseFileName',
'installLabelPath', 'swapSize']
id = buildtypes.RAW_FS_IMAGE
class VmwareImageTemplate(Template):
__slots__ = ['autoResolve', 'freespace', 'baseFileName', 'vmMemory',
'installLabelPath', 'swapSize']
id = buildtypes.VMWARE_IMAGE
class InstallableIsoTemplate(Template):
__slots__ = ['autoResolve', 'maxIsoSize', 'baseFileName', 'bugsUrl',
'installLabelPath', 'showMediaCheck', 'betaNag',
'mediaTemplateTrove', 'anacondaCustomTrove', 'anacondaTemplatesTrove']
id = buildtypes.INSTALLABLE_ISO
class NetbootTemplate(Template):
__slots__ = ['autoResolve', 'baseFileName', 'installLabelPath']
id = buildtypes.NETBOOT_IMAGE
class LiveIsoTemplate(Template):
__slots__ = ['autoResolve', 'baseFileName', 'installLabelPath', 'zisofs',
'unionfs']
id = buildtypes.LIVE_ISO
class TarballTemplate(Template):
__slots__ = ['autoResolve', 'baseFileName', 'installLabelPath', 'swapSize']
id = buildtypes.TARBALL
########################
dataHeadings = {}
dataTemplates = {}
for templateName in [x for x in sys.modules[__name__].__dict__.keys() \
if x.endswith('Template') and x != 'Template']:
template = sys.modules[__name__].__dict__[templateName]()
dataHeadings[template.id] = buildtypes.typeNames[template.id] + \
' Settings'
dataTemplates[template.id] = template
def getDataTemplate(buildType):
if buildType:
return dataTemplates[buildType]
else:
return {}
def getDisplayTemplates():
return [(x, dataHeadings[x], dataTemplates[x]) \
for x in dataTemplates.keys()]
|
Python
| 0
|
@@ -2578,25 +2578,23 @@
ble
-copy-on-write for
+UnionFS for the
ent
@@ -2613,56 +2613,37 @@
em.
-To use this, your group must contain
+(For this option,
the
-u
+U
nion
-fs
+FS
ker
@@ -2656,112 +2656,94 @@
dule
-. (unionfs is available in contrib (unsupported). The unionfs module you use must match your kernel vers
+ is required in the group. See rBuilder documentation for more information on this opt
ion.
|
bfe039111e2b8e83a450b0d5e5f56827338cd017
|
Fix slashes in blog urlpatterns.
|
mezzanine/urls.py
|
mezzanine/urls.py
|
"""
This is the main ``urlconf`` for Mezzanine - it sets up patterns for
all the various Mezzanine apps, third-party apps like Grappelli and
filebrowser.
"""
from __future__ import unicode_literals
from future.builtins import str
from django.conf.urls import include, url
from django.contrib.sitemaps.views import sitemap
from django.views.i18n import javascript_catalog
from django.http import HttpResponse
from mezzanine.conf import settings
from mezzanine.core.sitemaps import DisplayableSitemap
urlpatterns = []
# JavaScript localization feature
js_info_dict = {'domain': 'django'}
urlpatterns += [
url(r'^jsi18n/(?P<packages>\S+?)/$', javascript_catalog, js_info_dict),
]
if settings.DEBUG and "debug_toolbar" in settings.INSTALLED_APPS:
try:
import debug_toolbar
except ImportError:
pass
else:
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
# Django's sitemap app.
if "django.contrib.sitemaps" in settings.INSTALLED_APPS:
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
urlpatterns += [
url("^sitemap\.xml$", sitemap, sitemaps),
]
# Return a robots.txt that disallows all spiders when DEBUG is True.
if getattr(settings, "DEBUG", False):
urlpatterns += [
url("^robots.txt$", lambda r: HttpResponse("User-agent: *\nDisallow: /",
content_type="text/plain")),
]
# Miscellanous Mezzanine patterns.
urlpatterns += [
url("^", include("mezzanine.core.urls")),
url("^", include("mezzanine.generic.urls")),
]
# Mezzanine's Accounts app
if "mezzanine.accounts" in settings.INSTALLED_APPS:
# We don't define a URL prefix here such as /account/ since we want
# to honour the LOGIN_* settings, which Django has prefixed with
# /account/ by default. So those settings are used in accounts.urls
urlpatterns += [
url("^", include("mezzanine.accounts.urls")),
]
# Mezzanine's Blog app.
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
BLOG_SLUG = settings.BLOG_SLUG.rstrip("/")
blog_patterns = [
url("^%s" % BLOG_SLUG, include("mezzanine.blog.urls")),
]
urlpatterns += blog_patterns
# Mezzanine's Pages app.
PAGES_SLUG = ""
if "mezzanine.pages" in settings.INSTALLED_APPS:
# No BLOG_SLUG means catch-all patterns belong to the blog,
# so give pages their own prefix and inject them before the
# blog urlpatterns.
if blog_installed and not BLOG_SLUG:
PAGES_SLUG = getattr(settings, "PAGES_SLUG", "pages").strip("/") + "/"
blog_patterns_start = urlpatterns.index(blog_patterns[0])
urlpatterns[blog_patterns_start:len(blog_patterns)] = [
url("^%s" % str(PAGES_SLUG), include("mezzanine.pages.urls")),
]
else:
urlpatterns += [
url("^", include("mezzanine.pages.urls")),
]
|
Python
| 0.000001
|
@@ -2116,16 +2116,22 @@
rip(%22/%22)
+ + %22/%22
%0A blo
@@ -2530,16 +2530,28 @@
LOG_SLUG
+.rstrip(%22/%22)
:%0A
|
338282a17766345f054f570d5063a7b0b803727b
|
Enable migration with special chars in pw
|
migrations/env.py
|
migrations/env.py
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from sqlalchemy.engine.url import make_url
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
def set_database_url(config):
url = current_app.config.get('SQLALCHEMY_DATABASE_URI')
try:
# In case of MySQL, add ``charset=utf8`` to the parameters (if no charset is set),
# because this is what Flask-SQLAlchemy does
if url.startswith("mysql"):
parsed_url = make_url(url)
parsed_url.query.setdefault("charset", "utf8")
url = str(parsed_url)
except Exception as exx:
print(u"Attempted to set charset=utf8 on connection, but failed: {}".format(exx))
# set_main_option() requires escaped "%" signs in the string
config.set_main_option('sqlalchemy.url', url.replace('%', '%%'))
set_database_url(config)
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# FIX for Postgres updates
url = config.get_section(config.config_ini_section).get("sqlalchemy.url")
driver = url.split(":")[0]
if driver == "postgresql+psycopg2":
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
isolation_level="AUTOCOMMIT",
poolclass=pool.NullPool)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
print("Running offline")
run_migrations_offline()
else:
print("Running online")
run_migrations_online()
|
Python
| 0
|
@@ -183,24 +183,65 @@
t fileConfig
+%0Afrom six.moves.urllib.parse import quote
%0A%0A# this is
@@ -1032,16 +1032,155 @@
%22utf8%22)%0A
+ # We need to quote the password in case it contains special chars%0A parsed_url.password = quote(parsed_url.password)%0A
@@ -3367,16 +3367,17 @@
lose()%0A%0A
+%0A
if conte
@@ -3514,13 +3514,12 @@
ns_online()%0A
-%0A
|
3380d0fed1a8d24eba8627bd65dccc1fb2f772dd
|
Update version to next release
|
minio/__init__.py
|
minio/__init__.py
|
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio - MinIO Python Library for Amazon S3 Compatible Cloud Storage
~~~~~~~~~~~~~~~~~~~~~
>>> import minio
>>> minio = Minio('https://s3.amazonaws.com')
>>> for bucket in minio.list_buckets():
... print(bucket.name)
:copyright: (c) 2015, 2016, 2017 by MinIO, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'minio-py'
__author__ = 'MinIO, Inc.'
__version__ = '5.0.1'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015, 2016, 2017, 2018, 2019 MinIO, Inc.'
from .api import Minio
from .error import ResponseError
from .post_policy import PostPolicy
from .copy_conditions import CopyConditions
from .definitions import Bucket, Object
|
Python
| 0
|
@@ -1074,17 +1074,17 @@
= '5.0.
-1
+2
'%0A__lice
|
5f7cde32f64965fe8f75dd229d67598a53362701
|
Fix checker
|
model/simple-python/components/checker.py
|
model/simple-python/components/checker.py
|
from . import *
import z3
class PropertyChecker (object):
"""Actually check for properties in the network graph etc."""
def __init__ (self, context, network):
self.ctx = context
self.net = network
self.solver = z3.Solver()
self.constraints = list ()
self.primed = False
def CheckIsolationProperty (self, src, dest):
assert(src in self.net.elements)
assert(dest in self.net.elements)
if not self.primed:
self.CheckNow()
self.solver.push ()
p = z3.Const('__reachability_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
eh = z3.Const('__reachability_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
self.solver.add(z3.Exists([eh], self.ctx.recv(eh, dest.z3Node, p)))
self.solver.add(self.ctx.packet.origin(p) == src.z3Node)
self.result = self.solver.check()
if self.result == z3.sat:
self.model = self.solver.model ()
self.solver.pop()
return self.result
def CheckImpliedIsolation (self, srcn, destn, src, dest):
assert(srcn in self.net.elements)
assert(destn in self.net.elements)
if not self.primed:
self.CheckNow()
self.solver.push()
pn = z3.Const('__implied_reachability_neg_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
ehn = z3.Const('__implied_reachability_neg_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
p = z3.Const('__implied_reachability_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
eh = z3.Const('__implied_reachability_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
self.solver.add(z3.And(z3.Not(z3.Exists([pn, ehn], \
z3.And(self.ctx.recv(ehn, destn.z3Node, pn), \
self.ctx.packet.origin(pn) == srcn))),
z3.And(z3.Exists([eh], \
self.ctx.recv(eh, dest.node, p)), \
self.ctx.packet.origin(p) == src)))
self.result = self.solver.check()
if self.result == z3.sat:
self.model = self.solver.model ()
self.solver.pop()
return self.result
def CheckNow (self):
self.ctx._addConstraints(self.solver)
self.net._addConstraints(self.solver)
for el in self.net.elements:
el._addConstraints(self.solver)
self.primed = True
|
Python
| 0.000003
|
@@ -1911,16 +1911,23 @@
== srcn
+.z3Node
))),%0A
@@ -2041,17 +2041,19 @@
h, dest.
-n
+z3N
ode, p))
@@ -2127,16 +2127,23 @@
) == src
+.z3Node
)))%0A
|
7b3e630f60ab2a917106cc6e09b92123001a0083
|
Fix code indentation typo
|
module/plugins/internal/CaptchaService.py
|
module/plugins/internal/CaptchaService.py
|
# -*- coding: utf-8 -*-
import re
from random import random
class CaptchaService:
__name__ = "CaptchaService"
__version__ = "0.07"
__description__ = """Base captcha service plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
KEY_PATTERN = None
key = None
def __init__(self, plugin):
self.plugin = plugin
def detect_key(self, html=None):
if not html:
if hasattr(self.plugin, "html") and self.plugin.html:
html = self.plugin.html
else:
errmsg = "%s html missing" % self.__name__
self.plugin.fail(errmsg)
raise TypeError(errmsg)
m = re.search(self.KEY_PATTERN, html)
if m:
self.key = m.group("KEY")
self.plugin.logDebug("%s key: %s" % (self.__name__, self.key)
return self.key
else:
self.plugin.logDebug("%s key not found" % self.__name__)
return None
def challenge(self, key):
raise NotImplementedError
def result(self, server, challenge):
raise NotImplementedError
class ReCaptcha(CaptchaService):
__name__ = "ReCaptcha"
__version__ = "0.02"
__description__ = """ReCaptcha captcha service plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
KEY_PATTERN = r"https?://(?:www\.)?google\.com/recaptcha/api/challenge\?k=(?P<KEY>\w+?)"
KEY_AJAX_PATTERN = r"Recaptcha\.create\s*\(\s*[\"'](?P<KEY>\w+)[\"']\s*,"
def detect_key(self, html=None):
if not html:
if hasattr(self.plugin, "html") and self.plugin.html:
html = self.plugin.html
else:
errmsg = "ReCaptcha html missing"
self.plugin.fail(errmsg)
raise TypeError(errmsg)
m = re.search(self.KEY_PATTERN, html)
if m is None:
m = re.search(self.KEY_AJAX_PATTERN, html)
if m:
self.key = m.group("KEY")
self.plugin.logDebug("ReCaptcha key: %s" % self.key)
return self.key
else:
self.plugin.logDebug("ReCaptcha key not found")
return None
def challenge(self, key=key):
if not key:
errmsg = "ReCaptcha key missing"
self.plugin.fail(errmsg)
raise TypeError(errmsg)
js = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={'k': key}, cookies=True)
try:
challenge = re.search("challenge : '(.+?)',", js).group(1)
server = re.search("server : '(.+?)',", js).group(1)
except:
self.plugin.parseError("ReCaptcha challenge pattern not found")
result = self.result(server, challenge)
return challenge, result
def result(self, server, challenge):
return self.plugin.decryptCaptcha("%simage" % server, get={'c': challenge},
cookies=True, forceUser=True, imgtype="jpg")
class AdsCaptcha(CaptchaService):
__name__ = "AdsCaptcha"
__version__ = "0.02"
__description__ = """AdsCaptcha captcha service plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
ID_PATTERN = r'http://api\.adscaptcha\.com/Get\.aspx\?[^"\']*CaptchaId=(?P<ID>\d+)'
KEY_PATTERN = r'http://api\.adscaptcha\.com/Get\.aspx\?[^"\']*PublicKey=(?P<KEY>[\w-]+)'
def detect_key(self, html=None):
if not html:
if hasattr(self.plugin, "html") and self.plugin.html:
html = self.plugin.html
else:
errmsg = "AdsCaptcha html missing"
self.plugin.fail(errmsg)
raise TypeError(errmsg)
m = re.search(self.ID_PATTERN, html)
n = re.search(self.KEY_PATTERN, html)
if m and n:
self.key = (m.group("ID"), m.group("KEY"))
self.plugin.logDebug("AdsCaptcha id|key: %s | %s" % self.key)
return self.key
else:
self.plugin.logDebug("AdsCaptcha id or key not found")
return None
def challenge(self, key=key): #: key is tuple(CaptchaId, PublicKey)
CaptchaId, PublicKey = key
if not CaptchaId or not PublicKey:
errmsg = "AdsCaptcha key missing"
self.plugin.fail(errmsg)
raise TypeError(errmsg)
js = self.plugin.req.load("http://api.adscaptcha.com/Get.aspx", get={'CaptchaId': CaptchaId, 'PublicKey': PublicKey}, cookies=True)
try:
challenge = re.search("challenge: '(.+?)',", js).group(1)
server = re.search("server: '(.+?)',", js).group(1)
except:
self.plugin.parseError("AdsCaptcha challenge pattern not found")
result = self.result(server, challenge)
return challenge, result
def result(self, server, challenge):
return self.plugin.decryptCaptcha("%sChallenge.aspx" % server, get={'cid': challenge, 'dummy': random()},
cookies=True, imgtype="jpg")
class SolveMedia(CaptchaService):
__name__ = "SolveMedia"
__version__ = "0.02"
__description__ = """SolveMedia captcha service plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
KEY_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.(no)?script\?k=(?P<KEY>.+?)"'
def challenge(self, key=key):
if not key:
errmsg = "SolveMedia key missing"
self.plugin.fail(errmsg)
raise TypeError(errmsg)
html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript", get={'k': key}, cookies=True)
try:
challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="([^"]+)">',
html).group(1)
server = "http://api.solvemedia.com/papi/media"
except:
self.plugin.parseError("SolveMedia challenge pattern not found")
result = self.result(server, challenge)
return challenge, result
def result(self, server, challenge):
return self.plugin.decryptCaptcha(server, get={'c': challenge}, imgtype="gif")
|
Python
| 0.000008
|
@@ -137,9 +137,9 @@
%220.0
-7
+8
%22%0A%0A
@@ -2231,16 +2231,17 @@
None%0A%0A%0A
+
def c
|
97b594b84811da7bd90a615752c47c8982c1303c
|
fix addon order - dumper must be last
|
mitmproxy/dump.py
|
mitmproxy/dump.py
|
from __future__ import absolute_import, print_function, division
from typing import Optional # noqa
import typing # noqa
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import builtins
from mitmproxy import options
from mitmproxy.builtins import dumper, termlog
from netlib import tcp
class DumpError(Exception):
pass
class Options(options.Options):
def __init__(
self,
keepserving=False, # type: bool
filtstr=None, # type: Optional[str]
flow_detail=1, # type: int
tfile=None, # type: Optional[typing.io.TextIO]
**kwargs
):
self.filtstr = filtstr
self.flow_detail = flow_detail
self.keepserving = keepserving
self.tfile = tfile
super(Options, self).__init__(**kwargs)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, flow.DummyState())
self.has_errored = False
self.addons.add(dumper.Dumper())
self.addons.add(termlog.TermLog())
self.addons.add(*builtins.default_addons())
# This line is just for type hinting
self.options = self.options # type: Options
self.set_stream_large_bodies(options.stream_large_bodies)
if not self.options.no_server and server:
self.add_log(
"Proxy server listening at http://{}".format(server.address),
"info"
)
if self.server and self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
self.add_log(
"ALPN support missing (OpenSSL 1.0.2+ required)!\n"
"HTTP/2 is disabled. Use --no-http2 to silence this warning.",
"error"
)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log("Flow file corrupted.", "error")
raise DumpError(v)
if self.options.app:
self.start_app(self.options.app_host, self.options.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except exceptions.FlowReadException as e:
raise DumpError(str(e))
@controller.handler
def log(self, e):
if e.level == "error":
self.has_errored = True
def run(self): # pragma: no cover
if self.options.rfile and not self.options.keepserving:
self.addons.done()
return
super(DumpMaster, self).run()
|
Python
| 0
|
@@ -1048,49 +1048,8 @@
lse%0A
- self.addons.add(dumper.Dumper())%0A
@@ -1135,24 +1135,65 @@
t_addons())%0A
+ self.addons.add(dumper.Dumper())%0A
# Th
|
66201aadbace8515239bc93668ded544cc9a9e32
|
Version 0.0.8.3
|
mochi/__init__.py
|
mochi/__init__.py
|
__author__ = 'Yasushi Itoh'
__version__ = '0.0.8.2'
__license__ = 'MIT License'
import sys
IS_PYTHON_34 = sys.version_info.major == 3 and sys.version_info.minor == 4
|
Python
| 0.000001
|
@@ -46,9 +46,9 @@
0.8.
-2
+3
'%0A__
|
5bd911cb1f6975f8ecafe5a53a2bbc365793256c
|
save tagger.py
|
tagger.py
|
tagger.py
|
# Convolutional tagger over subwords
import itertools
import functools
import json
import os
import pickle
import sys
from datetime import datetime
import numpy as np
np.random.seed(1337) # for reproducibility
import tensorflow as tf
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.models import Model
from keras.layers import *
from keras.callbacks import EarlyStopping
from keras.objectives import categorical_crossentropy
from keras.metrics import categorical_accuracy
from keras.utils import np_utils
from keras import backend as K
from loss import *
from tags import *
from subwords import *
print('Vocab size:', len(word_map.word_counts))
print('Tag set size:', len(tag_map.word_counts))
size = '8k'
max_subwords = 10
subword_paths = [
'data/ptb_all.' + size + '.txt',
'data/web_all.' + size + '.txt',
]
if len(sys.argv) > 1:
subword_paths = sys.argv
subword_map, subworder = map_and_subworder(texts, subword_paths, max_subwords)
# %%
# Build a model.
num_words = len(word_map.word_index)+1
num_subwords = len(subword_map.word_index)+1
num_tags = len(tag_map.word_index)+1
max_len = max(w.count(' ') + 1 for w in subworder.keys())
# max_subwords = max(w.count(' ') + 1 for s in subworder.values() for w in s)
word_size = 64
# Embed each subword
subwords = Input(shape=(max_len, max_subwords), dtype='int32', name='Subwords')
sub_embedding = Embedding(input_dim=num_subwords, output_dim=word_size, mask_zero=True)
# Embed subword sequences each into a single word vector.
embedded = TimeDistributed(LSTM(word_size))(TimeDistributed(sub_embedding)(subwords))
# Build a convolutional network
convolved = Convolution1D(word_size, 5, border_mode='same')(embedded)
representations = [embedded, convolved]
merged = merge(representations, mode='sum')
tags = Dense(num_tags, activation='softmax', name='Tag')(merged)
model = Model(input=subwords, output=tags)
model.compile(optimizer='adam',
loss=padded_categorical_crossentropy,
metrics=[padded_categorical_accuracy])
# %%
# Prepare data format for model.
def word_string(tagged):
return str(' '.join(w for w, t in tagged))
def tag_string(tagged):
return str(' '.join(t for w, t in tagged))
def prep(tagged_sents):
"""Convert a dataset of tagged sentences into inputs and outputs."""
tagged_sents = list(tagged_sents) # because we'll iterate twice
x = np.array([prep_subword(word_string(t)) for t in tagged_sents])
tags = tag_map.texts_to_sequences(map(tag_string, tagged_sents))
padded_tags = sequence.pad_sequences(tags, maxlen=max_len, value=0)
y = np.array([np_utils.to_categorical(t, num_tags) for t in padded_tags])
return x, y
def prep_subword(sentence):
"""Convert sentence into a padded array of subword embeddings."""
subs = subword_map.texts_to_sequences(subworder[sentence])
padded_subs = sequence.pad_sequences(subs, maxlen=max_subwords, value=0)
padding = np.zeros([max_len-len(subs), max_subwords])
return np.append(padding, padded_subs, axis=0).astype(np.int32)
x, y = prep(itertools.islice(tagged_sents([ptb_train]), 0, 1000))
# x, y = prep(tagged_sents([ptb_train]))
val = prep(tagged_sents([ptb_dev]))
test = prep(tagged_sents([ptb_test]))
web_tests = [prep(tagged_sents([w])) for w in web_all]
# %%
early_stopping = EarlyStopping(monitor='val_padded_categorical_accuracy',
min_delta=0.001, patience=2, verbose=1)
history = model.fit(x, y, batch_size=32, nb_epoch=1, verbose=1,
validation_data=val, callbacks=[early_stopping])
losses = []
accs = []
for name, data in zip(['val', 'test'] + web_genres, [val, test] + web_tests):
loss = model.evaluate(*data, verbose=2)
losses.append((name, loss))
accs.append('{:0.4f}'.format(loss[1]))
print('{}: loss: {:0.4f} - acc: {:0.4f}'.format(name, *loss))
print('\t'.join(accs))
# %%
# Save everything
output_dir = datetime.today().strftime('exp/%y%m%d_%H%M%S')
if not os.path.exists('exp'):
os.mkdir('exp')
os.mkdir(output_dir)
with open(output_dir + '/model.json', 'w') as jout:
jout.write(model.to_json())
model.save(output_dir + '/model.h5')
with open(output_dir + '/history.json', 'w') as jout:
pickle.dump(history.history, jout)
with open(output_dir + '/loss.json', 'w') as jout:
json.dump(loss, jout)
|
Python
| 0.000001
|
@@ -4367,16 +4367,18 @@
ump(loss
+es
, jout)%0A
@@ -4369,16 +4369,73 @@
p(losses, jout)%0A
+%0Ashutil.copyfile('tagger.py', output_dir + '/tagger.py')%0A
|
13f73e514165b54bfb879fa324cdc1364b79579e
|
remove some params, check prev experiments.
|
movielens_test.py
|
movielens_test.py
|
import numpy as np
import tensorflow as tf
from sklearn.model_selection import KFold
import itertools
from model import VAEMF
num_user = 943
num_item = 1682
hidden_encoder_dim = 216
hidden_decoder_dim = 216
latent_dim = 24
output_dim = 24
learning_rate = 0.002
batch_size = 64
reg_param = 0
one_hot = False
n_steps = 1000
hedims = [64, 128, 256, 512]
hddims = [64, 128, 256, 512]
ldims = [8, 16, 32, 64, 128]
odims = [8, 16, 32, 64, 128]
lrates = [0.001, 0.002, 0.01, 0.02]
bsizes = [64, 128, 256, 512, 1024, 2048]
regs = [0.001, 0.002, 0.01, 0.1, 0.5, 1]
def read_dataset():
M = np.zeros([num_user, num_item])
with open('./data/ml-100k/u.data', 'r') as f:
for line in f.readlines():
tokens = line.split()
user_id = int(tokens[0]) - 1 # 0 base index
item_id = int(tokens[1]) - 1
rating = int(tokens[2])
M[user_id, item_id] = rating
return M
def cross_validation():
M = read_dataset()
n_fold = 10
rating_idx = np.array(M.nonzero()).T
kf = KFold(n_splits=n_fold, random_state=0)
with tf.Session() as sess:
model = VAEMF(sess, num_user, num_item,
hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param)
for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
n_fold, train_idx.size, test_idx.size))
model.train(M, train_idx=train_idx,
test_idx=test_idx, n_steps=n_steps)
def train_test_validation():
M = read_dataset()
num_rating = np.count_nonzero(M)
idx = np.arange(num_rating)
np.random.seed(0)
np.random.shuffle(idx)
train_idx = idx[:int(0.8 * num_rating)]
valid_idx = idx[int(0.8 * num_rating):int(0.9 * num_rating)]
test_idx = idx[int(0.9 * num_rating):]
for hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param in itertools.product(hedims, hddims, ldims, odims, lrates, bsizes, regs):
result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}".format(hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param)
with tf.Session() as sess:
model = VAEMF(sess, num_user, num_item,
hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=True)
print("Train size={0}, Validation size={1}, Test size={2}".format(
train_idx.size, valid_idx.size, test_idx.size))
best_mse, best_mae = model.train_test_validation(
M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)
print("Best MSE = {0}, best MAE = {1}".format(best_mse, best_mae))
with open('result.csv', 'a') as f:
f.write("{0},{1},{2},{3},{4},{5},{6},{7},{8}\n".format(hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, best_mse, best_mae))
tf.reset_default_graph()
if __name__ == '__main__':
train_test_validation()
|
Python
| 0
|
@@ -1,12 +1,33 @@
+import os, itertools%0A
import numpy
@@ -102,25 +102,8 @@
Fold
-%0Aimport itertools
%0A%0Afr
@@ -306,12 +306,11 @@
t =
-Fals
+Tru
e%0An_
@@ -321,16 +321,17 @@
s = 1000
+0
%0A%0Ahedims
@@ -326,36 +326,32 @@
0000%0A%0Ahedims = %5B
-64,
128, 256, 512%5D%0Ah
@@ -355,28 +355,24 @@
%5D%0Ahddims = %5B
-64,
128, 256, 51
@@ -375,35 +375,32 @@
, 512%5D%0Aldims = %5B
-8,
16, 32, 64, 128%5D
@@ -409,19 +409,16 @@
dims = %5B
-8,
16, 32,
@@ -472,20 +472,16 @@
izes = %5B
-64,
128, 256
@@ -2433,16 +2433,84 @@
_param)%0A
+ if not os.path.exists(result_path+%22/model.ckpt.index%22):%0A
@@ -2540,32 +2540,36 @@
ss:%0A
+
model = VAEMF(se
@@ -2584,32 +2584,36 @@
user, num_item,%0A
+
@@ -2692,32 +2692,36 @@
en_decoder_dim,%0A
+
@@ -2871,24 +2871,28 @@
+
+
print(%22Train
@@ -2946,16 +2946,20 @@
format(%0A
+
@@ -3026,16 +3026,20 @@
+
best_mse
@@ -3096,16 +3096,20 @@
+
+
M, train
@@ -3215,24 +3215,28 @@
+
print(%22Best
@@ -3299,24 +3299,28 @@
+
+
with open('r
@@ -3334,32 +3334,36 @@
sv', 'a') as f:%0A
+
|
87f08dc5a847a12ecbb8f9dfc21303f324e12e99
|
Fix execution of `commit_msg`
|
resources/git/hooks/commit_msg.py
|
resources/git/hooks/commit_msg.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import sys
import textwrap
#===============================================================================
FIRST_LINE_MAX_CHAR_LENGTH = 70
MESSAGE_BODY_MAX_CHAR_LINE_LENGTH = 72
COMMIT_MESSAGE_FUNCS_PREFIX = "commit_msg"
def print_error_message_and_exit(message, exit_status=1):
print(message, file=sys.stderr)
sys.exit(exit_status)
#===============================================================================
def commit_msg_check_first_line_length(commit_message):
first_line = commit_message.split("\n")[0]
if len(first_line) <= FIRST_LINE_MAX_CHAR_LENGTH:
return commit_message
else:
print_error_message_and_exit(
"First line of commit message too long ({}), must be at most {}".format(
len(first_line), FIRST_LINE_MAX_CHAR_LENGTH))
def commit_msg_check_second_line_is_empty(commit_message):
lines = commit_message.split("\n")
if len(lines) <= 1 or not lines[1]:
return commit_message
else:
print_error_message_and_exit(
"If writing a commit message body, the second line must be empty")
def commit_msg_remove_trailing_period_from_first_line(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
first_line_processed = first_line.rstrip(".")
return "\n".join([first_line_processed] + body)
def commit_msg_capitalize_first_letter_in_header(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
first_line_segments = first_line.split(":", 1)
if len(first_line_segments) <= 1:
first_line_processed = first_line
else:
scope, header = first_line_segments
header_without_leading_space = header.lstrip(" ")
header_capitalized = (
" " + header_without_leading_space[0].upper()
+ header_without_leading_space[1:])
first_line_processed = ":".join([scope, header_capitalized])
return "\n".join([first_line_processed] + body)
def commit_msg_wrap_message_body(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
if not body:
return commit_message
else:
wrapped_body = [
textwrap.fill(
line,
MESSAGE_BODY_MAX_CHAR_LINE_LENGTH,
replace_whitespace=False,
drop_whitespace=False)
for line in body]
return "\n".join([first_line] + wrapped_body)
def commit_msg_remove_trailing_newlines(commit_message):
return commit_message.rstrip("\n")
def process_commit_messages(commit_message_filepath):
with open(commit_message_filepath, "r") as commit_message_file:
commit_message = commit_message_file.read()
commit_message_funcs = (
_get_module_level_functions_with_prefix(COMMIT_MESSAGE_FUNCS_PREFIX))
for func_and_args in commit_message_funcs:
func, args = func_and_args[0], func_and_args[1:]
commit_message = func(commit_message, *args)
with open(commit_message_filepath, "w") as commit_message_file:
commit_message_file.write(commit_message)
def _get_module_level_functions_with_prefix(prefix):
return [
member_obj
for member_name, member_obj in inspect.getmembers(sys.modules[__name__])
if inspect.isfunction(member_obj) and member_name.startswith(prefix)]
#===============================================================================
def main():
process_commit_messages(sys.argv[1])
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2886,25 +2886,16 @@
for func
-_and_args
in comm
@@ -2916,61 +2916,8 @@
cs:%0A
- func, args = func_and_args%5B0%5D, func_and_args%5B1:%5D%0A
@@ -2956,15 +2956,8 @@
sage
-, *args
)%0A
|
87443336f6ff71d7b43520308c32a6f7b2cd594c
|
move assignmentId to data
|
mturk/viewsets.py
|
mturk/viewsets.py
|
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from rest_framework.decorators import detail_route, list_route
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import status
from hashids import Hashids
from django.db import transaction
from mturk.models import MTurkHIT, MTurkAssignment, MTurkNotification
from crowdsourcing.models import TaskWorker, TaskWorkerResult
from crowdsourcing.serializers.task import TaskSerializer, TaskWorkerResultSerializer
from mturk.interface import MTurkProvider
from mturk.permissions import IsValidHITAssignment
from mturk.utils import get_or_create_worker
from csp import settings
class MTurkAssignmentViewSet(mixins.CreateModelMixin, GenericViewSet):
queryset = MTurkAssignment.objects.all()
serializer_class = TaskSerializer
def create(self, request, *args, **kwargs):
worker = get_or_create_worker(worker_id=request.query_params.get('workerId'))
provider = MTurkProvider('https://' + request.get_host())
task_id = request.data.get('taskId', -1)
task_hash = Hashids(salt=settings.SECRET_KEY, min_length=settings.MTURK_HASH_MIN_LENGTH)
task_id = task_hash.decode(task_id)
if len(task_id) == 0:
task_id = -1
hit_id = request.data.get('hitId', -1)
mturk_hit = get_object_or_404(MTurkHIT, task_id=task_id, hit_id=hit_id)
assignment_id = request.data.get('assignmentId', -1)
mturk_assignment_id = None
if assignment_id != 'ASSIGNMENT_ID_NOT_AVAILABLE':
assignment, is_valid = provider.get_assignment(assignment_id)
if not assignment or (is_valid and assignment.HITId != hit_id):
return Response(data={"message": "Invalid assignment"}, status=status.HTTP_400_BAD_REQUEST)
task_worker, created = TaskWorker.objects.get_or_create(worker=worker, task_id=task_id)
if created:
task_worker.task_status=TaskWorker.STATUS_IN_PROGRESS
task_worker.save()
assignment, created = MTurkAssignment.objects.get_or_create(hit=mturk_hit,
assignment_id=assignment_id,
task_worker=task_worker)
mturk_assignment_id = assignment.id
if created:
assignment.status = TaskWorker.STATUS_IN_PROGRESS
assignment.save()
task_serializer = TaskSerializer(instance=mturk_hit.task,
fields=('id', 'template', 'project_data', 'status'))
response_data = {
'task': task_serializer.data,
'assignment': mturk_assignment_id
}
return Response(data=response_data, status=status.HTTP_200_OK)
@detail_route(methods=['post'], permission_classes=[IsValidHITAssignment], url_path='submit-results')
def submit_results(self, request, *args, **kwargs):
worker = get_or_create_worker(worker_id=request.query_params.get('workerId'))
mturk_assignment = self.get_object()
template_items = request.data.get('template_items', [])
with transaction.atomic():
if not mturk_assignment.task_worker:
task_worker, created = TaskWorker.objects.get_or_create(worker=worker,
task=mturk_assignment.hit.task,
task_status=TaskWorker.STATUS_SUBMITTED)
mturk_assignment.task_worker = task_worker
mturk_assignment.save()
task_worker_results = TaskWorkerResult.objects.filter(task_worker_id=mturk_assignment.task_worker.id)
serializer = TaskWorkerResultSerializer(data=template_items, many=True)
if serializer.is_valid():
if task_worker_results.count() != 0:
serializer.update(task_worker_results, serializer.validated_data)
else:
serializer.create(task_worker=mturk_assignment.task_worker)
return Response(data={'message': 'Success'}, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
@list_route(methods=['post', 'get'], url_path='notification')
def notification(self, request, *args, **kwargs):
MTurkNotification.objects.create(data={'id': 1})
MTurkNotification.objects.create(data=request.data)
MTurkNotification.objects.create(data=request.query_params)
return Response(data={}, status=status.HTTP_200_OK)
|
Python
| 0.000001
|
@@ -1011,36 +1011,28 @@
_id=request.
-query_params
+data
.get('worker
@@ -3096,94 +3096,8 @@
s):%0A
- worker = get_or_create_worker(worker_id=request.query_params.get('workerId'))%0A
@@ -3240,460 +3240,8 @@
():%0A
- if not mturk_assignment.task_worker:%0A task_worker, created = TaskWorker.objects.get_or_create(worker=worker,%0A task=mturk_assignment.hit.task,%0A task_status=TaskWorker.STATUS_SUBMITTED)%0A mturk_assignment.task_worker = task_worker%0A mturk_assignment.save()%0A
|
ef1336fcb30f951d057b5c943a948e8e5d95f07b
|
Add import json in roomfinder_dispo
|
roomfinder_dispo/roomfinder_dispo/dispo.py
|
roomfinder_dispo/roomfinder_dispo/dispo.py
|
#!/usr/bin/env python2.7
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, render_template, request, jsonify
import argparse
import datetime
import os, sys
import requests
from socket import error as SocketError
import errno
app = Flask(__name__)
@app.route("/book", methods=["GET"])
def book():
starttime=request.args.get('starttime', '')
endtime=request.args.get('endtime', '')
user_name=request.args.get('user_name', '')
user_email=request.args.get('user_email', '')
room_name=request.args.get('room_name', '')
return "no parameter provided to book request\n"
data = {
"cmd": "book",
"data": {"starttime": starttime, "endtime": endtime, "user_name": user_name, "user_email": user_email, "room_name": room_name}
}
message = json.dumps(data)
return send_message_to_queue(message)
@app.route("/dispo", methods=["GET"])
def dispo():
key=request.args.get('key', '')
sys.stderr.write( "key: "+str(key)+'\r\n')
if key is not None and str(key) is not "":
data = {
"cmd": "dispo",
"data": {"key": key}
}
message = json.dumps(data)
return send_message_to_queue(message)
return "no parameter provided to dispo request\n"
def on_response(ch, method, props, body):
global corr_id
global response
if corr_id == props.correlation_id:
response = body
def send_message_to_queue(message):
global corr_id
global response
global connection
global channel
global callback_queue
response=None
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq,port=rabbitmq_port,heartbeat_interval=30))
channel = connection.channel()
result=channel.queue_declare(exclusive=True)
callback_queue = result.method.queue
channel.basic_consume(on_response, no_ack=True,
queue=callback_queue)
corr_id=str(uuid.uuid4())
response = None
corr_id = str(uuid.uuid4())
channel.basic_publish( exchange='',
routing_key="rpc_queue",
properties=pika.BasicProperties(
reply_to = callback_queue,
correlation_id = corr_id),
body=message)
print(" [x] Sent data to RabbitMQ")
while response is None:
connection.process_data_events()
print(" [x] Get response from RabbitMQ")
return response
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser("Room Finder Dispo Service")
parser.add_argument("-r","--rabbitmq", help="IP or hostname for rabbitmq server, e.g. 'rabbit.domain.com'.")
parser.add_argument("-p","--port", help="tcp port for rabitmq server, e.g. '2765'.")
args = parser.parse_args()
rabbitmq = args.rabbitmq
if (rabbitmq == None):
rabbitmq = os.getenv("roomfinder_rabbitmq_server")
if (rabbitmq == None):
get_rabbitmq_server = raw_input("What is the rabbitmq server IP or hostname? ")
rabbitmq = get_rabbitmq_server
rabbitmq_port = args.port
if (rabbitmq_port == None):
rabbitmq_port = os.getenv("roomfinder_rabbitmq_port")
if (rabbitmq_port == None):
get_rabbitmq_port = raw_input("What is the rabbitmq TCP port? ")
rabbitmq_port = get_rabbitmq_port
try:
app.run(debug=True, host='0.0.0.0', port=int("5000"))
except:
try:
app.run(debug=True, host='0.0.0.0', port=int("5000"))
except:
print "Dispo web services error"
|
Python
| 0.000001
|
@@ -248,16 +248,28 @@
rt errno
+%0Aimport json
%0A%0Aapp =
@@ -573,16 +573,129 @@
', '')%0A%0A
+ if starttime is None or endtime is None or user_name is None or user_email is None or room_name is None:%0A
retu
@@ -739,17 +739,16 @@
uest%5Cn%22%0A
-%0A
data
|
4815315b2200cb8061640f6333f2cb96b0707965
|
update admin to enable list edit
|
django_sso/admin.py
|
django_sso/admin.py
|
from django.contrib import admin
from django_sso import settings
from django_sso.models import Assignment
class AssignmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Assignment, AssignmentAdmin)
if settings.DJANGO_SSO_ADD_LOGIN_BUTTON:
admin.site.login_template = 'django_sso/login.html'
|
Python
| 0
|
@@ -151,12 +151,173 @@
-pass
+list_display = ('__unicode__', 'username', 'username_mode', 'domain', 'user', 'weight')%0A list_editable = ('username', 'username_mode', 'domain', 'user', 'weight')
%0A%0Aad
|
5022cefce20ecf20d7689fa6d2c280f8774ee677
|
add forgot password link to admin login
|
djangogirls/urls.py
|
djangogirls/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
urlpatterns = patterns('',
# Redirect old links:
url(r'^pages/in-your-city/$', RedirectView.as_view(url='/organize/', permanent=True)),
url(r'^admin$', RedirectView.as_view(url='/admin/', permanent=True)),
# Regular links:
url(r'^community/', include('jobs.urls', namespace='jobs')),
url(r'^admin/', include(admin.site.urls)),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^account/', include('django.contrib.auth.urls')),
url(r'', include('core.urls', namespace='core')),
url(r'', include('applications.urls', namespace='applications')),
url(r'^ckeditor/', include('ckeditor.urls')),
)
|
Python
| 0
|
@@ -350,16 +350,336 @@
rue)),%0A%0A
+ # Admin link for password reset%0A # See: https://github.com/darklow/django-suit/blob/92a745d72935622220eca80edfce779419c30094/suit/templates/admin/login.html#L61%0A url(r'%5Eadmin/password_reset/$',%0A RedirectView.as_view(url='/account/password_reset', permanent=True),%0A name='admin_password_reset'),%0A%0A
# Re
|
caa63e934fb252c17feac25a01f4298ee4d9b3e1
|
Remove STATIC_URL from Django settings.
|
hello_world/settings.py
|
hello_world/settings.py
|
"""
Django settings for hello_world project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '80o&#_i#(1haanv=!4w1ns3r5oal!rih-)lm8+@aens#^i(eu7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
Python
| 0
|
@@ -3178,52 +3178,4 @@
c/'%0A
-%0ASTATIC_ROOT = os.path.join(BASE_DIR, 'static')%0A
|
c7291b333c00d11e54339b2b2ae14b399cfdc12c
|
fix sys import (thank you, Orlando)
|
retirement_api/utils/check_api.py
|
retirement_api/utils/check_api.py
|
# script to check the retirement api to make sure
# the SSA Quick Calculator is operational
# and to log the result to a csv (currently via cron)
import requests
import datetime
import json
import time
import signal
timestamp = datetime.datetime.now()
# rolling dob to guarantee subject is 44 and full retirement age is 67
dob = timestamp - datetime.timedelta(days=44*365+30)
timeout_seconds = 15
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError("Request timed out")
class Collector(object):
data = ''
date = "%s" % timestamp
status = ''
error = ''
note = ''
api_fail = ''
timer = ''
collector = Collector()
log_header = ['data', 'date', 'status', 'error', 'note', 'api_fail', 'timer']
local_base = 'http://localhost:8080'
api_base = 'retirement/retirement-api'
api_string = '%s/%s/estimator/%s-%s-%s/70000/'
def print_msg(collector):
msg = ",".join([collector.__getattribute__(key) for key in log_header])
print msg
return msg
def check_data(data):
""" For a 44-year-old, the api should
always return an age, a full retirement age
and a value for benefits at age 70
"""
if (data['current_age'] == 44 and
data['data']['full retirement age'] == '67' and
data['data']['benefits']['age 70']):
return "OK"
else:
return "BAD DATA"
def run(base):
url = api_string % (base, api_base, dob.month, dob.day, dob.year)
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_seconds)
start = time.time()
try:
test_request = requests.get(url)
except requests.ConnectionError:
end = time.time()
signal.alarm(0)
collector.status = "ABORTED"
collector.error = 'Server connection error'
collector.api_fail = 'FAIL'
except TimeoutError:
end = time.time()
signal.alarm(0)
collector.status = "TIMEDOUT"
collector.error = 'SSA request exceeded 15 sec'
collector.api_fail = 'FAIL'
else:
if test_request.status_code != 200:
signal.alarm(0)
end = time.time()
collector.status = "%s" % test_request.status_code
collector.error = test_request.reason
collector.api_fail = 'FAIL'
else:
end = time.time()
signal.alarm(0)
data = json.loads(test_request.text)
collector.status = "%s" % test_request.status_code
collector.error = data['error']
collector.note = data['note']
collector.data = check_data(data)
collector.timer = "%s" % (end - start)
print_msg(collector)
print url
return collector
if __name__ == '__main__':
"""runs against a local url unless a domain is passed
"""
try:
base = sys.argv[1]
except:
base = local_base
run(base)
|
Python
| 0
|
@@ -139,16 +139,27 @@
a cron)%0A
+import sys%0A
import r
@@ -219,16 +219,46 @@
t signal
+%0Afrom urlparse import urlparse
%0A%0Atimest
@@ -2861,24 +2861,39 @@
%22%22%22%0A
-%0A
-try
+for arg in sys.argv
:%0A
@@ -2898,39 +2898,98 @@
-ba
+par
se
+d
=
-sys.argv%5B1%5D%0A except:%0A
+urlparse(arg)%0A if parsed.netloc:%0A base = arg%0A else:%0A
|
123f3e706e1931120e035acd2f11fcb1fa3320c7
|
Remove unnecessary files
|
rules/QEMU.py
|
rules/QEMU.py
|
import xyz
class Qemu(xyz.BuildProtocol):
pkg_name = 'QEMU'
deps = ['pkg-config', 'gettext', 'glib']
def configure(self, builder, config):
env = {}
ldflags = '{standard_ldflags} -F/Library/Frameworks -F/System/Library/Frameworks'
args = ('{source_dir_from_build}/configure',
'--prefix={prefix}',
'--disable-cocoa',
'--target-list=arm-softmmu',
'--disable-curses',
'--disable-vnc',
'--disable-console',
'--enable-werror',
'--disable-slirp',
'--disable-curl',
'--disable-guest-base',
'--disable-guest-agent' ,
'--disable-blobs',
'--audio-drv-list=',
'--audio-card-list=',
'--disable-usb',
'--disable-smartcard',
'--disable-ide',
# '--exec-prefix={eprefix}',
# '--host={host}',
# '--build={build}',
#'--target-list={target}'
)
base_env = {
'LDFLAGS': ldflags,
'PKG_CONFIG_PATH': '{devtree_dir_abs}/{host}/lib/pkgconfig'.format(**config),
'QEMU_PKG_CONFIG_FLAGS': '--define-variable prefix={devtree_dir_abs} --define-variable exec_prefix={devtree_dir_abs}/{host}'.format(**config)
}
base_env.update(env)
builder.cmd(*args, env=base_env, config=config)
rules = Qemu()
|
Python
| 0.000004
|
@@ -3,16 +3,30 @@
port xyz
+%0Aimport shutil
%0A%0Aclass
@@ -144,25 +144,8 @@
self
-, builder, config
):%0A
@@ -1279,24 +1279,29 @@
g'.format(**
+self.
config),%0A
@@ -1443,16 +1443,21 @@
ormat(**
+self.
config)%0A
@@ -1515,16 +1515,21 @@
+self.
builder.
@@ -1560,16 +1560,21 @@
config=
+self.
config)%0A
@@ -1574,16 +1574,1039 @@
onfig)%0A%0A
+ def install(self):%0A super().install()%0A # Now we go and remove all the stuff we don't want.%0A # In fact, it may be easy to just install manually what we do want, but%0A # to try and keep this working for future version we take this%0A # approach for now.%0A%0A keymaps_dir = self.builder.j('%7Binstall_dir%7D', self.config%5B'prefix'%5D%5B1:%5D, 'share', 'qemu',%0A config=self.config)%0A xyz.rmtree(keymaps_dir)%0A%0A etc_dir = self.builder.j('%7Binstall_dir%7D', self.config%5B'prefix'%5D%5B1:%5D, 'etc', config=self.config)%0A xyz.rmtree(etc_dir)%0A%0A # Copy qemu-system-arm to the right bin location...%0A bin_dir = self.builder.j('%7Binstall_dir%7D', self.config%5B'prefix'%5D%5B1:%5D, 'bin', config=self.config)%0A ebin_dir = self.builder.j('%7Binstall_dir%7D', self.config%5B'prefix'%5D%5B1:%5D, '%7Bhost%7D', 'bin', config=self.config)%0A xyz.ensure_dir(ebin_dir)%0A shutil.copy(self.builder.j(bin_dir, 'qemu-system-arm'), ebin_dir)%0A xyz.rmtree(bin_dir)
%0A%0Arules
@@ -2611,11 +2611,9 @@
s = Qemu
-()
%0A
|
7bab4f09278a14e0b2dbc64de82cf56ef25ed066
|
add a random sleep as workaround to prevent mangadex api spam blocker from throwing 503 errors
|
cum/scrapers/mangadex.py
|
cum/scrapers/mangadex.py
|
from bs4 import BeautifulSoup
from cum import config, exceptions, output
from cum.scrapers.base import BaseChapter, BaseSeries, download_pool
from functools import partial
from mimetypes import guess_type
from urllib.parse import urljoin, urlparse
import concurrent.futures
import re
import requests
import json
class MangadexSeries(BaseSeries):
url_re = re.compile(r'(?:https?://mangadex\.(?:org|com))?/manga/([0-9]+)')
def __init__(self, url, **kwargs):
super().__init__(url, **kwargs)
self._get_page(self.url)
self.chapters = self.get_chapters()
def _get_page(self, url):
manga_id = re.search(self.url_re, url)
r = requests.get('https://mangadex.org/api/manga/' + manga_id.group(1))
self.json = json.loads(r.text)
def get_chapters(self):
result_chapters = []
manga_name = self.name
chapters = self.json['chapter'] if self.json.get('chapter') else []
for c in chapters:
url = 'https://mangadex.org/chapter/' + c
chapter = chapters[c]['chapter']
title = chapters[c]['title'] if chapters[c]['title'] else None
language = chapters[c]['lang_code']
# TODO: Add an option to filter by language.
if language != 'gb':
continue
groups = [chapters[c]['group_name']]
result = MangadexChapter(name=manga_name, alias=self.alias,
chapter=chapter,
url=url,
groups=groups, title=title)
result_chapters = [result] + result_chapters
return result_chapters
@property
def name(self):
return self.json['manga']['title']
class MangadexChapter(BaseChapter):
# match /chapter/12345 and avoid urls like /chapter/1235/comments
url_re = re.compile(
r'(?:https?://mangadex\.(?:org|com))?/chapter/([0-9]+)'
r'(?:/[^a-zA-Z0-9]|/?$)'
)
uses_pages = True
@staticmethod
def _reader_get(url, page_index):
chapter_id = re.search(MangadexChapter.url_re, url)
api_url = "https://mangadex.org/api/chapter/" + chapter_id.group(1)
return requests.get(api_url)
def available(self):
self.r = self.reader_get(1)
if not len(self.r.text):
return False
elif self.r.status_code == 404:
return False
elif re.search(re.compile(r'Chapter #[0-9]+ does not exist.'),
self.r.text):
return False
else:
return True
def download(self):
if getattr(self, 'r', None):
r = self.r
else:
r = self.reader_get(1)
chapter_hash = self.json['hash']
pages = self.json['page_array']
files = [None] * len(pages)
# This can be a mirror server or data path. Example:
# var server = 'https://s2.mangadex.org/'
# var server = '/data/'
mirror = self.json['server']
server = urljoin('https://mangadex.org', mirror)
futures = []
last_image = None
with self.progress_bar(pages) as bar:
for i, page in enumerate(pages):
if guess_type(page)[0]:
image = server + chapter_hash + '/' + page
else:
print('Unkown image type for url {}'.format(page))
raise ValueError
r = requests.get(image, stream=True)
if r.status_code == 404:
r.close()
raise ValueError
fut = download_pool.submit(self.page_download_task, i, r)
fut.add_done_callback(partial(self.page_download_finish,
bar, files))
futures.append(fut)
last_image = image
concurrent.futures.wait(futures)
self.create_zip(files)
def from_url(url):
r = MangadexChapter._reader_get(url, 1)
data = json.loads(r.text)
manga_id = data['manga_id']
series = MangadexSeries('https://mangadex.org/manga/' + str(manga_id))
for chapter in series.chapters:
parsed_chapter_url = ''.join(urlparse(chapter.url)[1:])
parsed_url = ''.join(urlparse(url)[1:])
if parsed_chapter_url == parsed_url:
return chapter
def reader_get(self, page_index):
r = self._reader_get(self.url, page_index)
self.json = json.loads(r.text)
return r
|
Python
| 0
|
@@ -241,16 +241,68 @@
rlparse%0A
+from time import sleep%0Afrom random import randrange%0A
import c
@@ -471,16 +471,97 @@
0-9%5D+)')
+%0A # TODO remove when there are properly spaced api calls%0A spam_failures = 0
%0A%0A de
@@ -780,32 +780,32 @@
lf.url_re, url)%0A
-
r = requ
@@ -868,16 +868,974 @@
oup(1))%0A
+ %0A # TODO FIXME replace with properly spaced api calls%0A # This is a bad workaround for%0A # '503 please stop spaming the site'%0A # erros when making requests to /api/ urls quickly.%0A # It may still break when 4 calls are done at the same time%0A sleep(randrange(0, 900) / 1000.0)%0A if r.status_code == 503 and self.spam_failures %3C 3:%0A # sleep 10-17 seconds to wait out the spam protection%0A # and make it less likely for all threads to hit at the same time%0A sleep(randrange(10000, 17000) / 1000.0)%0A self.spam_failures = self.spam_failures+1%0A return self._get_page(url)%0A elif self.spam_failures %3E= 3:%0A print(%22Error: Mangadex server probably contacted too often%5Cn%22)%0A print(r.text)%0A raise ScrapingError(%22Mangadex spam error%22)%0A %0A self.spam_failures = 0%0A
|
232d4c2c8876b05cec4a427fcdf141a036c97045
|
Rename "Events" into "EventRegistry" and "trigger" into "notify"
|
pyofwave_server/pyofwave/core/operation.py
|
pyofwave_server/pyofwave/core/operation.py
|
"""
Standard interface for connecting client protocols to the operation extensions.
"""
from delta import DeltaObserverPool as dop
import opdev, delta
# Perform operation
def _getChildren(tag):
rep = [tag.text, ]
for child in tag:
rep.append(child)
rep.append(child.tail)
return rep
def performOperation(event, operation):
""" Execute a operation."""
rep = opdev._receive[operation.tag](event, *_getChildren(operation), **operation.attrib)
Events.trigger(operation)
return rep
# Events
def get(obj, prop, default = {}):
if not obj.get(prop):
obj[prop] = default
return obj[prop]
_handlers = {}
class Events(object):
"""Keeps track of all the events a user registers to."""
def __init__(self, user, callback):
self.user = user
self._callback = callback
def _handlers(self, url, operation):
# XXX : Why is it a list that is associated to an operation ?
# XXX : Is it possible to assign several callback to an operation ?
return get(get(_handlers, url), operation, [])
def register(self, url, operation):
# XXX: All registered operations will have the save callback
self._handlers(url, operation).append(self._callback)
def unregister(self, url, operation="*"):
url_handlers = get(_handlers, url)
if operation == "*":
for operation in url_handlers.keys():
operation_callback = self._handlers(url, operation)
if self._callback in operation_callback:
operation_callback.remove(self._callback)
else:
self._handlers(url, operation).remove(self._callback)
@staticmethod
def trigger(operation, src = None):
if src == None:
src = operation.get("href", operation.get("src", ""))
for handler in _handlers.get(src, {}).get(operation.tag, []):
dop.apply_async(handler, (operation.tag))
@delta.alphaDeltaObservable.addObserver
@staticmethod
def applyDelta(doc, delta):
""" Calculate and send events. """
|
Python
| 0.000001
|
@@ -486,17 +486,22 @@
vent
-s.trigger
+Registy.notify
(ope
@@ -671,17 +671,23 @@
ss Event
-s
+Registy
(object)
@@ -1737,15 +1737,14 @@
def
-trigger
+notify
(ope
|
7c120c02097bfaa1f494627ac93d6cddf5fb9049
|
FIX adding newline for chunks
|
cutools/diff/__init__.py
|
cutools/diff/__init__.py
|
from hashlib import md5
from clint.textui import puts, colored
def clean_diff(diff):
"""Removes diff header from a diff.
"""
res = []
skip = True
for line in diff.split('\n'):
if line.startswith('diff --git'):
skip = True
if line.startswith('@@ '):
skip = False
if not skip:
res.append(line)
return '\n'.join(res)
def print_diff(diff):
"""Prints colored diff.
"""
for line in diff.split('\n'):
line = unicode(line).encode('utf-8')
if line.startswith('+'):
puts(colored.green(line))
elif line.startswith('-'):
puts(colored.red(line))
else:
puts(line)
def get_chunks(diff):
"""Returns a list with all the chunks in this diff.
"""
diff = clean_diff(diff)
chunk = []
chunks = []
for line in diff.split('\n'):
if not line:
continue
if line.startswith('@@ '):
if chunk:
chunks.append('\n'.join(chunk))
chunk = [line]
else:
chunk.append(line)
if chunk:
chunks.append('\n'.join(chunk))
return chunks
def get_hashed_chunks(chunks):
chunks_dict = {}
for chunk in chunks:
chunks_dict[md5(unicode(chunk).encode('utf-8')).hexdigest()] = chunk
return chunks_dict
def clean_chunk(chunk):
"""Clean headers from chunk.
"""
return '\n'.join([x[1:] for x in chunk.split('\n')
if x and x[0] not in ('-', '@')])
def chunk_in_text(chunk, text):
"""Checks if chunk is inside text.
"""
chunk = clean_chunk(chunk)
return text.find(chunk) >= 0
|
Python
| 0
|
@@ -1033,24 +1033,31 @@
.join(chunk)
+ + '%5Cn'
)%0A
@@ -1170,16 +1170,23 @@
n(chunk)
+ + '%5Cn'
)%0A re
|
c8fa72a130d84d921b23f5973dafb8fa91367381
|
Make ip_type a RadioSelect in the PTR form
|
cyder/cydns/ptr/forms.py
|
cyder/cydns/ptr/forms.py
|
from django import forms
from cyder.cydns.forms import DNSForm
from cyder.cydns.ptr.models import PTR
class PTRForm(DNSForm):
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
widgets = {'views': forms.CheckboxSelectMultiple}
|
Python
| 0.000001
|
@@ -368,10 +368,59 @@
Multiple
+,%0A 'ip_type': forms.RadioSelect
%7D%0A
|
e68c85ae4526557efd0d3c1bd45857583d542659
|
handle errors in better bibtex
|
python/citation_vim/zotero/betterbibtex.py
|
python/citation_vim/zotero/betterbibtex.py
|
# -*- coding: utf-8 -*-
import os
import shutil
import json
import sqlite3
class betterBibtex(object):
def __init__(self, zotero_path, cache_path):
self.bb_file = os.path.join(zotero_path, 'better-bibtex/db.json')
self.bb_database = os.path.join(zotero_path, 'betterbibtex-lokijs.sqlite')
self.bb_copy = os.path.join(cache_path, 'betterbibtex.sqlite')
bb_data_query = u"""
select lokijs.data
from lokijs
where lokijs.name = "db.json"
"""
def load_citekeys(self):
"""
Loads better-bibtex citekeys if they exist.
"""
# The storage method for betterbibtex keeps changing so we'll try a few.
try:
bb_data = open(self.bb_file).read()
bb_json = json.loads(bb_data)
except:
try:
desc_strings.append(getattr(entry, desc_field))
shutil.copyfile(self.bb_database, self.bb_copy)
conn = sqlite3.connect(self.bb_copy)
cur = conn.cursor()
cur.execute(self.bb_data_query)
bb_data = cur.fetchone()[0]
bb_json = json.loads(bb_data)
except:
return {}
citekeys = {}
for item in bb_json['collections'][0]['data']:
if 'citekey' in item:
citekeys[item['itemID']] = item['citekey']
else:
citekeys[item['itemID']] = ""
return citekeys
|
Python
| 0.000002
|
@@ -1247,16 +1247,33 @@
ys = %7B%7D%0A
+ try:%0A
@@ -1331,16 +1331,20 @@
+
if 'cite
@@ -1355,18 +1355,43 @@
in item
-:%0A
+ and 'itemID' in item:%0A
@@ -1457,14 +1457,22 @@
+
else:%0A
+
@@ -1512,16 +1512,54 @@
'%5D%5D = %22%22
+%0A except:%0A return %7B%7D
%0A%0A
|
cbdbe14365d5caad28fe77d9c2ca1c66cbf783bd
|
test travis turning off db switch
|
python/marvin/tests/misc/test_db_switch.py
|
python/marvin/tests/misc/test_db_switch.py
|
#!/usr/bin/env python2
# encoding: utf-8
#
# test_db_switch.py
#
# Created by José Sánchez-Gallego on Sep 7, 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
def create_connection(db_name):
"""Creates the connection and import the model classes."""
from marvin.db.DatabaseConnection import DatabaseConnection
database_connection_string = 'postgresql+psycopg2:///{0}'.format(db_name)
db = DatabaseConnection(database_connection_string=database_connection_string)
import marvin.db.models.DataModelClasses as mangaData
return db, mangaData
def perform_query(db, mangaData):
"""Performs a simple query and return the value."""
session = db.Session()
xfocal = session.query(mangaData.Cube.xfocal).filter(
mangaData.Cube.plate == 8485, mangaData.Cube.mangaid == '1-209232').join(
mangaData.PipelineInfo, mangaData.PipelineVersion).filter(
mangaData.PipelineVersion.version == 'v1_5_1').one()
return xfocal
db_name = 'manga'
db, mangaData = create_connection(db_name)
print(perform_query(db, mangaData))
db_name_copy = 'manga_copy'
db, mangaData = create_connection(db_name_copy)
print(perform_query(db, mangaData))
|
Python
| 0
|
@@ -1050,16 +1050,18 @@
focal%0A%0A%0A
+#
db_name
@@ -1070,16 +1070,18 @@
'manga'%0A
+#
db, mang
@@ -1115,16 +1115,18 @@
b_name)%0A
+#
print(pe
@@ -1154,16 +1154,18 @@
Data))%0A%0A
+#
db_name_
@@ -1184,16 +1184,18 @@
a_copy'%0A
+#
db, mang
@@ -1234,16 +1234,18 @@
e_copy)%0A
+#
print(pe
|
c8df75a2112cd8e6a4f929ceac21714b716e46ce
|
Use the IRC nickname for !twitter if one is not provided.
|
dasbit/plugin/twitter.py
|
dasbit/plugin/twitter.py
|
from twisted.web.client import getPage
from urllib import urlencode
import json
class Twitter:
def __init__(self, manager):
self.client = manager.client
manager.registerCommand('twitter', 'lookup', 'twitter', '(?P<query>.*?)', self.lookup)
def lookup(self, source, query):
if query.isdigit():
url = 'http://api.twitter.com/1/statuses/show/%s.json' % query
else:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : query})
getPage(url).addCallback(self._returnResult, source, query.isdigit())
def _returnResult(self, value, source, isNumericLookup):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if 'error' in data:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if isNumericLookup:
user = data['user']['screen_name']
text = data['text']
id = data['id_str']
else:
user = data['screen_name']
text = data['status']['text']
id = data['status']['id_str']
url = 'https://twitter.com/#!/%s/status/%s' % (user, id)
self.client.reply(source, '<%s> %s (%s)' % (user, text, url))
|
Python
| 0
|
@@ -398,34 +398,49 @@
query%0A el
-se
+if len(query) %3E 0
:%0A ur
@@ -525,16 +525,151 @@
query%7D)
+%0A else:%0A url = 'http://api.twitter.com/1/users/show.json?%25s' %25 urlencode(%7B'screen_name' : source.prefix%5B'nickname'%5D%7D)
%0A%0A
|
2dd7e97c1bf0ca927e41ad3aff05013e470f7702
|
Add twitter alias command
|
dasbit/plugin/twitter.py
|
dasbit/plugin/twitter.py
|
from dasbit.core import Config
from twisted.web.client import getPage
from urllib import urlencode, quote_plus
import os
import base64
import json
import HTMLParser
class Twitter:
def __init__(self, manager):
self.client = manager.client
self.parser = HTMLParser.HTMLParser()
self.config = Config(os.path.join(manager.dataPath, 'twitter'))
manager.registerCommand('twitter', 'authenticate', 'authenticate-twitter', '(?P<key>[a-zA-Z0-9]+) (?P<secret>[a-zA-Z0-9]+)', self.authenticate)
manager.registerCommand('twitter', 'lookup', 'twitter', '(?P<query>.*?)', self.lookup)
def authenticate(self, source, key, secret):
bearerTokenCredentials = base64.b64encode('%s:%s' % (quote_plus(key), quote_plus(secret)))
getPage(
'https://api.twitter.com/oauth2/token',
method = 'POST',
postdata = 'grant_type=client_credentials',
headers = {
'Authorization' : 'Basic %s' % bearerTokenCredentials,
'Content-Type' : 'application/x-www-form-urlencoded;charset=UTF-8'
}
).addCallback(self._authenticate, source).addErrback(self._error, source)
def _error(self, failure, source):
value = failure.value.response
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the response', 'notice')
return
if 'errors' in data:
self.client.reply(source, 'API failure: %s' % data['errors'][0]['message'], 'notice')
return
def _authenticate(self, value, source):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the authentication', 'notice')
return
if not 'token_type' in data:
self.client.reply(source, 'Missing token type in authentication response', 'notice')
return
if not 'access_token' in data:
self.client.reply(source, 'Missing access_token in authentication response', 'notice')
return
if data['token_type'] != 'bearer':
self.client.reply(source, 'Returned token type is not bearer', 'notice')
return
self.config['access_token'] = data['access_token']
self.config.save()
self.client.reply(source, 'Authentication succeeded', 'notice')
def lookup(self, source, query):
if not 'access_token' in self.config:
self.client.reply(source, 'Twitter plugin not authenticated yet', 'notice')
return
if query.isdigit():
url = 'https://api.twitter.com/1.1/statuses/show/%s.json' % query
elif len(query) > 0:
url = 'https://api.twitter.com/1.1/users/show.json?%s' % urlencode({'screen_name' : query})
else:
url = 'https://api.twitter.com/1.1/users/show.json?%s' % urlencode({'screen_name' : source.prefix['nickname']})
getPage(
url,
method = 'GET',
headers = {'Authorization' : 'Bearer %s' % self.config['access_token']}
).addCallback(self._returnResult, source, query.isdigit()).addErrback(self._error, source)
def _returnResult(self, value, source, isNumericLookup):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if isNumericLookup:
user = data['user']['screen_name']
text = data['text']
id = data['id_str']
else:
user = data['screen_name']
text = data['status']['text']
id = data['status']['id_str']
text = self.parser.unescape(text).replace('\n', ' ').replace('\r', '')
url = 'https://twitter.com/#!/%s/status/%s' % (user, id)
self.client.reply(source, '<%s> %s (%s)' % (user, text, url))
|
Python
| 0.000132
|
@@ -359,24 +359,215 @@
twitter'))%0A%0A
+ if not 'aliases' in self.config:%0A self.config%5B'aliases'%5D = %7B%7D%0A%0A manager.registerCommand('twitter', 'authenticate', 'alias-twitter', '(?P%3Chandle%3E%5B%5E %5D+)', self.alias)%0A
mana
@@ -802,16 +802,245 @@
ookup)%0A%0A
+ def alias(self, source, handle):%0A self.config%5B'aliases'%5D%5Bsource.prefix%5B'nickname'%5D%5D = handle%0A print self.config%5B'aliases'%5D%0A self.config.save()%0A self.client.reply(source, 'Alias stored', 'notice')%0A%0A
def
@@ -3310,32 +3310,286 @@
)%0A else:%0A
+ print self.config%5B'aliases'%5D%0A if source.prefix%5B'nickname'%5D in self.config%5B'aliases'%5D:%0A handle = self.config%5B'aliases'%5D%5Bsource.prefix%5B'nickname'%5D%5D%0A else:%0A handle = source.prefix%5B'nickname'%5D%0A%0A
url
@@ -3672,33 +3672,14 @@
' :
-source.prefix%5B'nickname'%5D
+handle
%7D)%0A%0A
|
1cd445e7e2f41665200a43728cbd5196098b7cad
|
fix crash
|
school/api.py
|
school/api.py
|
# -*- coding: utf-8 -*-
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.urls import re_path
from django.db.models import Q
from haystack.query import SearchQuerySet
from tastypie import fields
from tastypie.resources import ModelResource
from tastypie.resources import ALL, ALL_WITH_RELATIONS
from tastypie.utils import trailing_slash
from tastypie.authorization import DjangoAuthorization
from people.api import ArtistResource
from .models import Promotion, Student, StudentApplication
from assets.api import GalleryResource
class PromotionResource(ModelResource):
class Meta:
queryset = Promotion.objects.all()
resource_name = 'school/promotion'
ordering = ['starting_year']
class StudentResource(ArtistResource):
class Meta:
queryset = Student.objects.all()
resource_name = 'school/student'
ordering = ['user', ]
filtering = {
'artist': ALL_WITH_RELATIONS,
'user': ALL_WITH_RELATIONS,
'user__last_name__istartswith': ALL_WITH_RELATIONS,
'promotion': ALL,
}
fields = ['id', 'number', 'promotion', 'graduate', 'user', 'artist']
promotion = fields.ForeignKey(PromotionResource, 'promotion')
artist = fields.ForeignKey(ArtistResource, 'artist', full=True)
# BUG Error (why?) user__last_name__istartswith
# "The 'last_name' field does not support relations"
def build_filters(self, filters=None):
# turn off error : ignore_bad_filters et True
return super(StudentResource, self).build_filters(filters, ignore_bad_filters=True)
# override user__last_name__istartswith query
def apply_filters(self, request, applicable_filters):
base_object_list = super(StudentResource, self).apply_filters(request, applicable_filters)
# override
query = request.GET.get('user__last_name__istartswith', None)
if query:
qset = (Q(user__last_name__istartswith=query))
base_object_list = base_object_list.filter(qset).distinct()
return base_object_list
# end of Bug Error
def prepend_urls(self):
return [
re_path(r"^(?P<resource_name>%s)/search%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('get_search'),
name="api_get_search"),
]
def get_search(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Do the query.
sqs = SearchQuerySet().models(Student).load_all().autocomplete(content_auto=request.GET.get('q', ''))
paginator = Paginator(sqs, 20)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("Sorry, no results on that page.")
objects = []
for result in page.object_list:
bundle = self.build_bundle(obj=result.object, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
object_list = {
'objects': objects,
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
class StudentApplicationResource(ModelResource):
class Meta:
queryset = StudentApplication.objects.all()
resource_name = 'school/application'
ordering = ['created_on']
# no authorization for Anonymous user
authorization = DjangoAuthorization()
artist = fields.ForeignKey(ArtistResource, 'artist')
administrative_galleries = fields.ToManyField(GalleryResource, 'administrative_galleries', full=True, null=True)
artwork_galleries = fields.ToManyField(GalleryResource, 'artwork_galleries', full=True, null=True)
|
Python
| 0.000003
|
@@ -1495,16 +1495,42 @@
ers=None
+, ignore_bad_filters=False
):%0A
|
774da53edef30cb2f3c45cc47c63d46f142a4e07
|
Use four space indentation, repo_path to arguments
|
score_repo.py
|
score_repo.py
|
#!/usr/bin/env python3
import argparse
import importlib
import json
import sys
def loadAttributePlugins(attributes):
for attribute in attributes:
if attribute['enabled']:
try:
attribute['implementation'] = importlib.import_module("attributes.{0}.main".format(attribute['name']))
except ImportError:
print("Failed to load the {0} attribute.".format(attribute['name']))
def processConfiguration(config_file):
try:
config = json.load(config_file)
return config
except:
print("Malformatted or missing configuration.")
sys.exit(2)
def processArguments():
parser = argparse.ArgumentParser(description='Calculate the score of a repository.')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='config.json', dest='config_file', help='Path to the configuration file.')
parser.add_argument('repository_id', type=int, nargs=1, help='Identifier for a repository as it appears in the GHTorrent database.')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
args = processArguments()
config = processConfiguration(args.config_file)
attributes = config['attributes']
loadAttributePlugins(attributes)
score = 0
for attribute in attributes:
result = attribute['implementation'].run(metadata, repo_path, attribute['options'])
score += result * attribute['weight']
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -62,16 +62,26 @@
rt json%0A
+import os%0A
import s
@@ -88,33 +88,34 @@
ys%0A%0Adef load
-A
+_a
ttribute
Plugins(attr
@@ -94,33 +94,34 @@
f load_attribute
-P
+_p
lugins(attribute
@@ -124,16 +124,18 @@
butes):%0A
+
for at
@@ -153,24 +153,28 @@
attributes:%0A
+
if attri
@@ -196,21 +196,35 @@
:%0A
+
+
try:%0A
+
@@ -326,16 +326,22 @@
ame'%5D))%0A
+
ex
@@ -362,24 +362,32 @@
or:%0A
+
+
print(%22Faile
@@ -447,33 +447,34 @@
%5D))%0A%0Adef process
-C
+_c
onfiguration(con
@@ -484,23 +484,29 @@
_file):%0A
+
try:%0A
+
conf
@@ -537,16 +537,20 @@
le)%0A
+
+
return c
@@ -555,16 +555,18 @@
config%0A
+
except
@@ -567,16 +567,20 @@
except:%0A
+
prin
@@ -623,24 +623,28 @@
tion.%22)%0A
+
+
sys.exit(2)%0A
@@ -652,28 +652,401 @@
def
-processArguments():%0A
+repository_path(path_string):%0A if os.path.exists(path_string):%0A if os.path.exists(%22%7B0%7D/.git%22.format(path_string)):%0A return path_string%0A else:%0A raise argparse.ArgumentTypeError(%22%7B0%7D is not a git repository.%22.format(path_string))%0A else:%0A raise argparse.ArgumentTypeError(%22%7B0%7D is not a directory.%22.format(path_string))%0A%0Adef process_arguments():%0A
pa
@@ -1124,24 +1124,26 @@
pository.')%0A
+
parser.add
@@ -1284,16 +1284,18 @@
le.')%0A
+
+
parser.a
@@ -1419,17 +1419,137 @@
base.')%0A
-%0A
+ parser.add_argument('repository_path', type=repository_path, nargs=1, help='Path to the repository source code.')%0A%0A
if len
@@ -1565,24 +1565,28 @@
) is 1:%0A
+
+
parser.print
@@ -1593,16 +1593,20 @@
_help()%0A
+
sys.
@@ -1616,16 +1616,18 @@
t(1)%0A%0A
+
+
return p
@@ -1658,16 +1658,18 @@
main():%0A
+
args =
@@ -1676,17 +1676,18 @@
process
-A
+_a
rguments
@@ -1689,16 +1689,18 @@
ments()%0A
+
config
@@ -1709,17 +1709,18 @@
process
-C
+_c
onfigura
@@ -1744,16 +1744,18 @@
file)%0A
+
+
attribut
@@ -1784,23 +1784,26 @@
s'%5D%0A
+
load
-A
+_a
ttribute
Plug
@@ -1798,17 +1798,18 @@
ttribute
-P
+_p
lugins(a
@@ -1820,16 +1820,18 @@
butes)%0A%0A
+
score
@@ -1836,16 +1836,18 @@
e = 0%0A
+
+
for attr
@@ -1863,24 +1863,28 @@
attributes:%0A
+
result =
@@ -1920,22 +1920,47 @@
run(
-metadata, repo
+config.repository_id, config.repository
_pat
@@ -1984,16 +1984,20 @@
ions'%5D)%0A
+
scor
@@ -2060,15 +2060,124 @@
n__':%0A
-main(
+ try:%0A main()%0A except KeyboardInterrupt:%0A print(%22Caught interrupt, exiting.%22)%0A sys.exit(1
)%0A
|
564f61102bbcdd357a768fc2768b4ec0f44bf8e2
|
Add QWebSettings to modify browsing behavior
|
screenshot.py
|
screenshot.py
|
# -*- coding: utf-8 -*-
"""Web screen capture script with QtWebKit
How to use
==========
$ python screenshot.py -h
usage: screenshot.py [-h] [-a AGENT] [-l LANGUAGE] [-w WIDTH] [-H HEIGHT]
[-p PREFIX] [-s]
url
positional arguments:
url specify request url
optional arguments:
-h, --help show this help message and exit
-a AGENT, --agent AGENT
UA strings for HTTP Header 'User-Agent'
-l LANGUAGE, --language LANGUAGE
specify langs for HTTP Header 'Accept-Language'
-w WIDTH, --width WIDTH
specify window width to capture screen
-H HEIGHT, --height HEIGHT
specify minimum window height to capture screen
-p PREFIX, --prefix PREFIX
specify PNG file prefix (timestamp follows)
-s, --with-smooth-scroll
whether scroll down to bottom when capture the page or
not
"""
import datetime
import sys
from argparse import ArgumentParser
try:
from PySide.QtCore import QUrl, QTimer, Qt
from PySide.QtGui import QApplication, QImage, QPainter
from PySide.QtNetwork import QNetworkRequest
from PySide.QtWebKit import QWebView, QWebPage
except ImportError:
# Use PyQt5 when it couldn't have found PySide modules
from PyQt5.QtCore import QUrl, QTimer, Qt
from PyQt5.QtGui import QImage, QPainter
from PyQt5.QtNetwork import QNetworkRequest
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
from PyQt5.QtWidgets import QApplication
DEFAULT_WIDTH = 1024
DEFAULT_HEIGHT = 768
DEFAULT_USERAGENT = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5)'
' AppleWebKit/537.36 (KHTML, like Gecko)'
' CDP/47.0.2526.73 Safari/537.36')
DEFAULT_PREFIX = 'screenshot'
class Page(QWebPage):
"""psedo webpage class
"""
def __init__(self, ua):
QWebPage.__init__(self)
self.ua = ua
def userAgentForUrl(self, url):
"""override 'userAgentForUrl' method
"""
return self.ua
class Browser(QWebView):
"""psedo browser class
"""
def __init__(self, page=None):
"""Initialize browser class
"""
QWebView.__init__(self)
if page:
self.setPage(page)
self.use_smooth_scroll = args.with_smooth_scroll
self.initialize()
def initialize(self):
self.timerDelay = QTimer()
self.timerDelay.setInterval(20)
self.timerDelay.setSingleShot(True)
self.timerDelay.timeout.connect(self.delay_action)
self.loadFinished.connect(self.load_finished_slot)
def load_finished_slot(self, ok):
"""Callback function when content loading finished
"""
if not ok:
print("Loaded but not completed: %s".format(self.url))
return
print("Load completed: %s".format(self.url))
self.delay_action()
def delay_action(self):
frame = self.page().mainFrame()
target_y = frame.scrollBarMaximum(Qt.Vertical)
current_y = frame.scrollBarValue(Qt.Vertical)
if self.use_smooth_scroll and target_y > current_y:
y = current_y + 50
frame.evaluateJavaScript("window.scrollTo(0, {});".format(y))
self.timerDelay.start()
else:
self.take_screenshot()
def take_screenshot(self):
frame = self.page().mainFrame()
size = frame.contentsSize()
self.page().setViewportSize(size)
image = QImage(size, QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
file_name = "{}_{}.png".format(args.prefix, timestamp)
print("page title: [{}] --> save as {}".format(self.title(), file_name))
image.save(file_name)
sys.exit()
def run(self, args):
"""prepare request object, then call 'load' method of QWebView object
"""
request = QNetworkRequest()
request.setUrl(QUrl(args.url))
request.setRawHeader(bytes("Accept-Languages", 'utf-8'), bytes(', '.join(args.language), 'utf-8'))
request.setRawHeader(bytes("User-Agent", 'utf-8'), bytes(args.agent, 'utf-8'))
self.resize(int(args.width), int(args.height))
self.load(request)
def main(args):
"""main function
"""
print(args)
app = QApplication(sys.argv)
page = Page(args.agent) if args.agent else None
browser = Browser(page)
browser.run(args)
browser.show()
app.exec_()
if __name__ == "__main__":
ap = ArgumentParser()
ap.add_argument('-a', '--agent', default=DEFAULT_USERAGENT,
help="UA strings for HTTP Header 'User-Agent'")
ap.add_argument('-l', '--language', action="append",
help="specify langs for HTTP Header 'Accept-Language'")
ap.add_argument('-w', '--width', default=DEFAULT_WIDTH,
help="specify window width to capture screen")
ap.add_argument('-H', '--height', default=DEFAULT_HEIGHT,
help="specify minimum window height to capture screen")
ap.add_argument('-p', '--prefix', default=DEFAULT_PREFIX,
help="specify PNG file prefix (timestamp follows)")
ap.add_argument('-s', '--with-smooth-scroll', default=False, action="store_true",
help="whether scroll down to bottom when capture the page or not")
ap.add_argument('url', help="specify request url")
args = ap.parse_args()
if not args.language:
args.language = ['ja']
main(args)
|
Python
| 0
|
@@ -1333,16 +1333,30 @@
QWebPage
+, QWebSettings
%0A%0Aexcept
@@ -1559,32 +1559,76 @@
QNetworkRequest%0A
+ from PyQt5.QtWebKit import QWebSettings%0A
from PyQt5.Q
@@ -2540,24 +2540,342 @@
itialize()%0A%0A
+ def _private_browse(self):%0A print(%22Enable private browsing mode%22)%0A self.settings().setAttribute(QWebSettings.PrivateBrowsingEnabled, True)%0A%0A def _hide_scroll_bars(self):%0A print(%22Disable scroll bars%22)%0A self.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)%0A%0A
def init
@@ -3127,16 +3127,81 @@
_slot)%0A%0A
+ self._private_browse()%0A self._hide_scroll_bars()%0A%0A
def
|
47c7cccc674beee06c2d4d6f6f197cb860d33354
|
Update bno055.py
|
home/Calamity/bno055.py
|
home/Calamity/bno055.py
|
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM11")
bno = Runtime.createAndStart("bno","Bno055")
bno.setController(arduino)
if bno.begin():
event = bno.getEvent()
print event.orientation.x
print event.orientation.y
print event.orientation.z
|
Python
| 0
|
@@ -161,16 +161,34 @@
egin():%0A
+ while (True):%0A
event
@@ -204,16 +204,18 @@
Event()%0A
+
print
@@ -236,16 +236,18 @@
ion.x%0A
+
+
print ev
@@ -266,16 +266,18 @@
ion.y%0A
+
+
print ev
@@ -294,9 +294,21 @@
ation.z%0A
+ sleep(1)
%0A
|
59f96d2ca0f3752052d870ef9c7bc5bc21f21e40
|
add header
|
host/pydaq/HL/tdc_s3.py
|
host/pydaq/HL/tdc_s3.py
|
#
# ------------------------------------------------------------
# Copyright (c) SILAB , Physics Institute of Bonn University
# ------------------------------------------------------------
#
# SVN revision information:
# $Rev:: 1 $:
# $Author:: TheresaObermann $:
# $Date:: 2013-10-09 10:58:06 #$:
#
from HL.HardwareLayer import HardwareLayer
import struct
import array
class tdc_s3(HardwareLayer):
'''
TDC controller interface
'''
def __init__(self, intf, conf):
HardwareLayer.__init__(self, intf, conf)
'''
Resets the TDC controller module inside the FPGA, base adress zero
'''
def reset(self):
self._intf.write(self._conf['base_addr'], [0])
'''
Initialise the TDC controller module
'''
def init(self):
self.reset()
def set_en(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 1)[0]
self._intf.write(self._conf['base_addr'] + 1, [(current & 0xfe) | enable])
def get_en(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 1)[0] & 0x01) else False
def set_exten(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 4)
self._intf.write(self._conf['base_addr'] + 1, [(current[3] & 0xfe) | enable,current[2],current[1],current[0]])
def get_exten(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 4)[3] & 0x01) else False
|
Python
| 0.000001
|
@@ -228,17 +228,17 @@
$Rev::
-1
+
@@ -270,23 +270,23 @@
r::
-TheresaObermann
+
@@ -304,29 +304,29 @@
e::
-2013-10-09 10:58:06 #
+
$:%0D%0A
|
b7339a48f8a2622a571d1dadb312e2625d1e4daf
|
fix mda_tweets None case
|
anyway/parsers/mda_twitter/mda_twitter.py
|
anyway/parsers/mda_twitter/mda_twitter.py
|
from .get_mda_tweets import get_user_tweets
from anyway.utilities import init_flask
from flask_sqlalchemy import SQLAlchemy
import os
def get_latest_tweet_id_from_db(db):
"""
get the latest tweet id
:return: latest tweet id
"""
tweet_id = db.session.execute(
"SELECT id FROM news_flash where source='twitter' ORDER BY date DESC LIMIT 1").fetchone()
if tweet_id:
return tweet_id[0]
def insert_mda_tweet(db, id_tweet, title, link, date_parsed, author, description, location, lat, lon, road1,
road2, intersection, city, street, street2, resolution, geo_extracted_street,
geo_extracted_road_no, geo_extracted_intersection, geo_extracted_city,
geo_extracted_address, geo_extracted_district, accident, source):
"""
insert new mda_tweet to db
:param id_tweet: id of the mda_tweet
:param title: title of the mda_tweet
:param link: link to the mda_tweet
:param date_parsed: parsed date of the mda_tweet
:param author: author of the mda_tweet
:param description: description of the mda tweet
:param location: location of the mda tweet (textual)
:param lat: latitude
:param lon: longitude
:param road1: road 1 if found
:param road2: road 2 if found
:param intersection: intersection if found
:param city: city if found
:param street: street if found
:param street2: street 2 if found
:param resolution: resolution of found location
:param geo_extracted_street: street from data extracted from the geopoint
:param geo_extracted_road_no: road number from data extracted from the geopoint
:param geo_extracted_intersection: intersection from data extracted from the geopoint
:param geo_extracted_city: city from data extracted from the geopoint
:param geo_extracted_address: address from data extracted from the geopoint
:param geo_extracted_district: district from data extracted from the geopoint
:param accident: is the mda tweet an accident
:param source: source of the mda tweet
"""
db.session.execute('INSERT INTO news_flash (id,title, link, date, author, description, location, lat, lon, '
'road1, road2, intersection, city, street, street2, resolution, geo_extracted_street, '
'geo_extracted_road_no, geo_extracted_intersection, geo_extracted_city, '
'geo_extracted_address, geo_extracted_district, accident, source) VALUES \
(:id, :title, :link, :date, :author, :description, :location, :lat, :lon, \
:road1, :road2, :intersection, :city, :street, :street2, :resolution, :geo_extracted_street,\
:geo_extracted_road_no, :geo_extracted_intersection, :geo_extracted_city, \
:geo_extracted_address, :geo_extracted_district, :accident, :source)',
{'id': id_tweet, 'title': title, 'link': link, 'date': date_parsed, 'author': author,
'description': description, 'location': location, 'lat': lat, 'lon': lon,
'road1': int(road1) if road1 else road1,
'road2': int(road2) if road2 else road2, 'intersection': intersection, 'city': city,
'street': street, 'street2': street2,
'resolution': resolution, 'geo_extracted_street': geo_extracted_street,
'geo_extracted_road_no': geo_extracted_road_no,
'geo_extracted_intersection': geo_extracted_intersection,
'geo_extracted_city': geo_extracted_city,
'geo_extracted_address': geo_extracted_address,
'geo_extracted_district': geo_extracted_district,
'accident': accident, 'source': source})
db.session.commit()
def mda_twitter():
app = init_flask()
db = SQLAlchemy(app)
TWITTER_CONSUMER_KEY = os.environ.get('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = os.environ.get('TWITTER_CONSUMER_SECRET')
TWITTER_ACCESS_KEY = os.environ.get('TWITTER_ACCESS_KEY')
TWITTER_ACCESS_SECRET = os.environ.get('TWITTER_ACCESS_SECRET')
GOOGLE_MAPS_API_KEY = os.environ.get('GOOGLE_MAPS_KEY')
twitter_user = 'mda_israel'
latest_tweet_id = get_latest_tweet_id_from_db(db)
# check if there are any MDA tweets in the DB
if latest_tweet_id:
mda_tweets = get_user_tweets(twitter_user, latest_tweet_id, TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET, TWITTER_ACCESS_KEY, TWITTER_ACCESS_SECRET, GOOGLE_MAPS_API_KEY)
else:
mda_tweets = get_user_tweets(
twitter_user, 'no_tweets', TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_ACCESS_KEY, TWITTER_ACCESS_SECRET, GOOGLE_MAPS_API_KEY)
mda_tweets = mda_tweets[['id', 'accident', 'author', 'date', 'description', 'lat', 'link', 'lon', 'title', 'source', 'location', 'city', 'intersection', 'road1', 'road2', 'street',
'geo_extracted_address', 'geo_extracted_city', 'geo_extracted_district', 'geo_extracted_intersection', 'geo_extracted_road_no', 'geo_extracted_street', 'resolution', 'street2']]
if mda_tweets is None:
return
for row in mda_tweets.itertuples(index=False):
(tweet_id, accident, author, date, description, lat, link, lon, title, source, location, city, intersection, road1, road2, street, geo_extracted_address,
geo_extracted_city, geo_extracted_district, geo_extracted_intersection, geo_extracted_road_no, geo_extracted_street, resolution, street2) = row
insert_mda_tweet(db, tweet_id, title, link, date, author, description, location, lat, lon, road1,
road2, intersection, city, street, street2, resolution, geo_extracted_street,
geo_extracted_road_no, geo_extracted_intersection, geo_extracted_city,
geo_extracted_address, geo_extracted_district, accident, source)
|
Python
| 0.999842
|
@@ -4971,16 +4971,58 @@
I_KEY)%0D%0A
+ if mda_tweets is None:%0D%0A return
%0D%0A md
@@ -5413,50 +5413,8 @@
%5D%5D%0D%0A
- if mda_tweets is None:%0D%0A return
%0D%0A
|
70f460d3d0ba4c605704bb7179e3618583311621
|
add docstrings for SPIRE funcs
|
hsadownload/getspire.py
|
hsadownload/getspire.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hsadownload.access import getHsaFits, getObsUrn, parseContextHdu, fixHerschelHeader
import os
import astropy.io.fits as fits
def downloadSpireMap(ldict, obsid, lev, bandKey, direc='./SpirePhoto/', \
spgVersion='SPG v13.0.0', overWrite=False):
normVersion = ''.join(spgVersion.split())
if bandKey in ldict:
filename = os.path.join(direc,"%s_SPIRE_%s_%s_%s.fits.gz"%(obsid,lev,bandKey,normVersion))
hdu = getHsaFits(ldict[bandKey], fname=filename, save=True)
hdu.close()
print('downloaded ' + filename)
else:
print('did not find %s in %s for %s' %(bandKey, lev, obsid))
def storeSpirePhoto(obsid, spgVersion='SPG v13.0.0', direc='./SpirePhotoScan/'):
instrument = 'SPIRE'
normVersion = ''.join(spgVersion.split())
urn = getObsUrn(obsid,instrument,spgVersion=spgVersion)
hdulist = getHsaFits(urn)
cdict = parseContextHdu(hdulist)
if 'level2_5' in cdict:
lev = 'L25'
lhdulist = getHsaFits(cdict['level2_5'])
ldict = parseContextHdu(lhdulist)
for bandKey in ['psrcPLW','psrcPMW', 'psrcPSW']:
if (bandKey in ldict):
if (obsid == lhdulist[0].header['obsid001']):
downloadSpireMap(ldict, obsid, lev, bandKey, direc,
spgVersion=spgVersion)
else:
print('skipping %s for %s since obsid001 is %s' % (bandKey, obsid, lhdulist[0].header['obsid001']))
elif 'level2' in cdict:
lev = 'L2'
lhdulist = getHsaFits(cdict['level2'])
ldict = parseContextHdu(lhdulist)
for bandKey in ['psrcPLW','psrcPMW', 'psrcPSW']:
downloadSpireMap(ldict, obsid, lev, bandKey, direc,
spgVersion=spgVersion)
else:
return(0)
return(1)
|
Python
| 0
|
@@ -398,16 +398,588 @@
False):%0A
+ %22%22%22%0A Download SPIRE map. Not meant to be called directly but to be called by%0A storeSpirePhoto.%0A%0A Parameters:%0A -----------%0A ldict (dict) : dictionary of level labels and urn strings%0A obsid (long int) : observation id%0A lev (string) : level being downloaded%0A bandKey (string) : key for band being downloaded%0A direc (string) : directory in which to store file, defaults to './SpirePhoto/'%0A spgVersion (string) : pipeline version, to put in output filename%0A overwrite (bool) : overwrite file if it already exists? defaults to False%0A %22%22%22%0A
norm
@@ -1429,16 +1429,390 @@
can/'):%0A
+ %22%22%22%0A Download and store a SPIRE map%0A%0A Parameters:%0A -----------%0A obsid (long int): observation id%0A spgVersion (string) : pipeline version, defaults to 'SPG v13.0.0'%0A direc (string) : path to directory for storing files, defaults to './SpirePhoto/'%0A Returns:%0A --------%0A Returns 0 if no level2_5 or level2 in observation; otherwise 1%0A %22%22%22%0A
inst
|
f52921e78cc6a8af38df50f0b0ba4d04b15fd768
|
fix the import error in db.py
|
service/db.py
|
service/db.py
|
#coding=utf-8
import torndb
import datetime
from constants.errorcode import Errorcode
from util.lt_exception import LTException
class DB(object):
def __init__(self, application):
self.mysql_read = application.mysql_conn_read
self.mysql_write = application.mysql_conn_write
#self.mongo_conn = application.mongo_conn
def sample(self):
'''
示例代码
'''
try:
sql = ''' select count(1) from tag'''
result = self.mysql_write.query(sql)
except:
# 抛出异常
pass
finally:
return result[0]
def get_article_by_id(self,id):
'''
示例代码
'''
try:
sql = ''' select * from article where id =%s limit 1'''%(id)
result = self.mysql_write.query(sql)
except:
# 抛出异常
pass
finally:
return result[0]
|
Python
| 0.000035
|
@@ -6,17 +6,16 @@
ng=utf-8
-%0D
%0Aimport
@@ -20,17 +20,16 @@
t torndb
-%0D
%0Aimport
@@ -36,17 +36,16 @@
datetime
-%0D
%0Afrom co
@@ -78,17 +78,16 @@
rrorcode
-%0D
%0Afrom ut
@@ -89,18 +89,19 @@
om util.
-lt
+gip
_excepti
@@ -114,10 +114,11 @@
ort
-LT
+Gip
Exce
@@ -126,14 +126,10 @@
tion
- %0D%0A%0D%0A%0D
+%0A%0A
%0Acla
@@ -142,23 +142,21 @@
object):
-%0D
%0A
-%0D
%0A def
@@ -184,17 +184,16 @@
cation):
-%0D
%0A
@@ -238,17 +238,16 @@
onn_read
-%0D
%0A
@@ -294,17 +294,16 @@
nn_write
-%0D
%0A
@@ -348,21 +348,18 @@
conn
-%0D
%0A
-%0D
%0A
-%0D
%0A
@@ -376,17 +376,16 @@
e(self):
-%0D
%0A
@@ -389,39 +389,8 @@
-'''%0D%0A %E7%A4%BA%E4%BE%8B%E4%BB%A3%E7%A0%81%0D%0A '''%0D
%0A
@@ -390,33 +390,32 @@
%0A try:
-%0D
%0A sql
@@ -448,17 +448,16 @@
m tag'''
-%0D
%0A
@@ -489,33 +489,32 @@
write.query(sql)
-%0D
%0A except:
@@ -513,37 +513,16 @@
except:
-%0D%0A # %E6%8A%9B%E5%87%BA%E5%BC%82%E5%B8%B8%0D
%0A
@@ -522,33 +522,32 @@
pass
-%0D
%0A finally
@@ -539,33 +539,32 @@
finally:
-%0D
%0A %0D%0A
@@ -552,33 +552,32 @@
ly:%0A
-%0D
%0A ret
@@ -589,25 +589,22 @@
esult%5B0%5D
-%0D%0A%0D
+%0A
%0A
-%0D
%0A def
@@ -631,17 +631,16 @@
elf,id):
-%0D
%0A
@@ -644,39 +644,8 @@
-'''%0D%0A %E7%A4%BA%E4%BE%8B%E4%BB%A3%E7%A0%81%0D%0A '''%0D
%0A
@@ -653,17 +653,16 @@
try:
-%0D
%0A
@@ -726,17 +726,16 @@
'''%25(id)
-%0D
%0A
@@ -775,17 +775,16 @@
ery(sql)
-%0D
%0A
@@ -795,29 +795,8 @@
ept:
-%0D%0A # %E6%8A%9B%E5%87%BA%E5%BC%82%E5%B8%B8%0D
%0A
@@ -808,17 +808,16 @@
pass
-%0D
%0A
@@ -825,17 +825,16 @@
finally:
-%0D
%0A
@@ -838,17 +838,16 @@
-%0D
%0A
@@ -871,14 +871,12 @@
t%5B0%5D
-%0D%0A%0D
+%0A
%0A
-%0D
+%0A
%0A
|
624adf50b90f97454857185d71259f6fb7a7fed6
|
fix imports
|
hublib/rappture/tool.py
|
hublib/rappture/tool.py
|
from __future__ import print_function
from .node import Node
import numpy as np
from lxml import etree as ET
import os
from subprocess import call, Popen, PIPE
import sys
from .rappture import RapXML
from hublib.use import _use
class Tool(RapXML):
def __init__(self, tool):
"""
tool can be any of the following:
- Path to a tool.xml file.
- Name of a published tool. The current version will be run.
"""
dirname, xml = os.path.split(tool)
if dirname == "":
if xml != "tool.xml":
# must be tool name
dirname = "/apps/%s/current" % xml
xml = dirname + "/rappture/tool.xml"
else:
dirname = os.getcwd()
else:
xml = tool
dirname = os.path.abspath(os.path.join(dirname, '..'))
xml = os.path.abspath(xml)
if not os.path.isfile(xml):
raise ValueError("tool must be a toolname or path to a tool.xml file.")
invoke_file = os.path.join(dirname, 'middleware', 'invoke')
if os.path.isfile(invoke_file):
self.invoke_file = invoke_file
sessdir = os.environ['SESSIONDIR']
self.tmp_name = os.path.join(sessdir, 'tool_driver_%s.xml' % os.getpid())
self.run_name = ""
self.toolparameters_name = os.path.join(sessdir, 'driver_%s.hz' % os.getpid())
self.rappturestatus_name = os.path.join(sessdir, 'rappture.status')
self.fname = xml
self.tree = ET.parse(xml)
self.path = ''
def run(self, verbose=True):
# print("Writing", self.tmp_name)
with open(self.tmp_name, 'w') as f:
f.write(str(self.xml(pretty=False, header=True)))
with open(self.toolparameters_name, 'w') as f:
f.write("file(execute):%s" % (self.tmp_name))
cmd = "TOOL_PARAMETERS=%s %s" % (self.toolparameters_name,self.invoke_file)
if verbose:
print("cmd=", cmd)
cwd = os.getcwd()
os.chdir(os.environ['SESSIONDIR'])
try:
ret = subprocess.call(cmd, shell=True)
if ret:
print('Error: "%s"' % cmd, file=sys.stderr)
if ret < 0:
print("Terminated by signal", -ret, file=sys.stderr)
else:
print("Returncode", ret, file=sys.stderr)
except OSError as e:
print('Error: "%s"' % cmd, file=sys.stderr)
print("Failed:", e, file=sys.stderr)
sys.exit(1)
with(open(self.rappturestatus_name, 'r')) as f:
statusData = f.readlines()
for record in statusData:
if 'output saved in' in record:
self.run_name = record.strip().split()[-1]
break
if self.run_name:
self.tree = ET.parse(self.run_name)
os.chdir(cwd)
|
Python
| 0.000002
|
@@ -112,20 +112,22 @@
port os%0A
-from
+import
subproc
@@ -133,33 +133,8 @@
cess
- import call, Popen, PIPE
%0Aimp
@@ -174,36 +174,8 @@
XML%0A
-from hublib.use import _use%0A
%0A%0Acl
|
0b4b57f90ee3d0fe0af3ba9921adccda784d6301
|
Allow to order payment profile by name, type and status.
|
src/waldur_mastermind/invoices/filters.py
|
src/waldur_mastermind/invoices/filters.py
|
import django_filters
from rest_framework import filters
from waldur_core.core import filters as core_filters
from . import models
class InvoiceFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(
view_name='customer-detail', field_name='customer__uuid'
)
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
state = django_filters.MultipleChoiceFilter(choices=models.Invoice.States.CHOICES)
o = django_filters.OrderingFilter(fields=(('year', 'month'),))
class Meta:
model = models.Invoice
fields = ('year', 'month')
class PaymentProfileFilter(django_filters.FilterSet):
organization = core_filters.URLFilter(
view_name='customer-detail', field_name='organization__uuid'
)
organization_uuid = django_filters.UUIDFilter(field_name='organization__uuid')
payment_type = django_filters.MultipleChoiceFilter(
choices=models.PaymentType.CHOICES
)
class Meta:
model = models.PaymentProfile
fields = []
class PaymentProfileFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if request.user.is_staff or request.user.is_support:
return queryset
return queryset.filter(is_active=True)
class PaymentFilter(django_filters.FilterSet):
profile = core_filters.URLFilter(
view_name='payment-profile-detail', field_name='profile__uuid'
)
profile_uuid = django_filters.UUIDFilter(field_name='profile__uuid')
class Meta:
model = models.Payment
fields = ['date_of_payment']
|
Python
| 0
|
@@ -958,16 +958,103 @@
ES%0A )
+%0A o = django_filters.OrderingFilter(fields=(('name', 'payment_type', 'is_active'),))
%0A%0A cl
|
af0fbfe74ecaac67fb37f03e01a9aefcd06ce83f
|
Change default scriptPubKey in coinbase
|
qa/rpc-tests/test_framework/blocktools.py
|
qa/rpc-tests/test_framework/blocktools.py
|
# blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create an anyone-can-spend coinbase transaction, assuming no miner fees
def create_coinbase(heightAdjust = 0, absoluteHeight = None):
global counter
height = absoluteHeight if absoluteHeight is not None else counter+heightAdjust
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((height)/150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.scriptPubKey = ""
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
|
Python
| 0
|
@@ -268,17 +268,39 @@
ScriptOp
+, OP_TRUE, OP_CHECKSIG
%0A
-
%0A# Creat
@@ -1126,34 +1126,16 @@
Create a
-n anyone-can-spend
coinbas
@@ -1171,16 +1171,126 @@
ner fees
+.%0A# If pubkey is passed in, the coinbase output will be a P2PK output;%0A# otherwise an anyone-can-spend output.
%0Adef cre
@@ -1336,16 +1336,31 @@
Height =
+ None, pubkey =
None):%0A
@@ -1547,17 +1547,16 @@
ffffff),
-
%0A
@@ -1789,16 +1789,45 @@
alvings%0A
+ if (pubkey != None):%0A
coin
@@ -1852,18 +1852,113 @@
ubKey =
-%22%22
+CScript(%5Bpubkey, OP_CHECKSIG%5D)%0A else:%0A coinbaseoutput.scriptPubKey = CScript(%5BOP_TRUE%5D)
%0A coi
|
4d413d45def838d730806097484d7ccf9d49744f
|
Fix to test code
|
mycluster/test.py
|
mycluster/test.py
|
import mycluster
mycluster.create_submit('hybrid:hybrid.q',script_name='test.job',num_tasks=2,
tasks_per_node=2,
my_script='test.bsh',
user_email='test@email.com',
)
mycluster.submit('test.job')
for i in mycluster.job_list():
print i, mycluster.get_job(i).status
|
Python
| 0.000018
|
@@ -13,16 +13,34 @@
luster%0A%0A
+mycluster.init()%0A%0A
mycluste
|
1db0f887dc3f89533beb986bcd19c5fb7b0c80e7
|
clean unicode encoding (fix UnicodeEncodeError in non-ascii environments)
|
taskgv.py
|
taskgv.py
|
#!/usr/bin/env python
'graph dependencies in projects'
import json
from subprocess import Popen, PIPE
import subprocess
import sys
import textwrap
# Typical command line usage:
#
# taskgv TASKFILTER
#
# TASKFILTER is a taskwarrior filter, documentation can be found here: http://taskwarrior.org/projects/taskwarrior/wiki/Feature_filters
#
# Probably the most helpful commands are:
#
# taskgv project:fooproject status:pending
# --> graph pending tasks in project 'fooproject'
#
# taskgv project:fooproject
# --> graphs all tasks in 'fooproject', pending, completed, deleted
#
# taskgv status:pending
# --> graphs all pending tasks in all projects
#
# taskgv
# --> graphs everything - could be massive
#
#Wrap label text at this number of characters
charsPerLine = 20;
#full list of colors here: http://www.graphviz.org/doc/info/colors.html
blockedColor = 'gold4'
maxUrgencyColor = 'red2' #color of the tasks that have absolutely the highest urgency
unblockedColor = 'green'
doneColor = 'grey'
waitColor = 'white'
deletedColor = 'pink';
#The width of the border around the tasks:
penWidth = 1
#Corrected arrow direction so I don't get confused.
dir = 'back'
#Have one HEADER (and only one) uncommented at a time, or the last uncommented value will be the only one considered
#Left to right layout, my favorite, ganntt-ish
#HEADER = "digraph dependencies { splines=true; overlap=ortho; rankdir=LR; weight=2;"
#Spread tasks on page
HEADER = "digraph dependencies { layout=neato; splines=true; overlap=scalexy; rankdir=LR; weight=2;"
#More information on setting up graphviz: http://www.graphviz.org/doc/info/attrs.html
#-----------------------------------------#
# Editing under this might break things #
#-----------------------------------------#
FOOTER = "}"
JSON_START = '['
JSON_END = ']'
validUuids = list()
def call_taskwarrior(cmd):
'call taskwarrior, returning output and error'
tw = Popen(['task'] + cmd.split(), stdout=PIPE, stderr=PIPE)
return tw.communicate()
def get_json(query):
'call taskwarrior, returning objects from json'
result, err = call_taskwarrior('end.after:today xor status:pending export %s' % query)
return json.loads(JSON_START + result + JSON_END)
def call_dot(instr):
'call dot, returning stdout and stdout'
dot = Popen('dot -Tgv'.split(), stdout=PIPE, stderr=PIPE, stdin=PIPE)
return dot.communicate(instr)
if __name__ == '__main__':
query = sys.argv[1:]
print ('Calling TaskWarrior')
data = get_json(' '.join(query))
#print data
maxUrgency = -9999;
for datum in data:
if float(datum['urgency']) > maxUrgency:
maxUrgency = float(datum['urgency'])
# first pass: labels
lines = [HEADER]
print ('Printing Labels')
for datum in data:
validUuids.append(datum['uuid'])
if datum['description']:
style = ''
color = ''
style = 'filled'
if datum['status']=='pending':
prefix = datum['id']
if not datum.get('depends','') : color = unblockedColor
else :
hasPendingDeps = 0
for depend in datum['depends'].split(','):
for datum2 in data:
if datum2['uuid'] == depend and datum2['status'] == 'pending':
hasPendingDeps = 1
if hasPendingDeps == 1 : color = blockedColor
else : color = unblockedColor
elif datum['status'] == 'waiting':
prefix = 'WAIT'
color = waitColor
elif datum['status'] == 'completed':
prefix = 'DONE'
color = doneColor
elif datum['status'] == 'deleted':
prefix = 'DELETED'
color = deletedColor
else:
prefix = ''
color = 'white'
if float(datum['urgency']) == maxUrgency:
color = maxUrgencyColor
label = '';
descriptionLines = textwrap.wrap(datum['description'],charsPerLine);
for descLine in descriptionLines:
label += descLine+"\\n";
lines.append('"%s"[shape=box][penwidth=%d][label="%s\:%s"][fillcolor=%s][style=%s]' % (datum['uuid'], penWidth, prefix, label, color, style))
#documentation http://www.graphviz.org/doc/info/attrs.html
# second pass: dependencies
print ('Resolving Dependencies')
for datum in data:
if datum['description']:
for dep in datum.get('depends', '').split(','):
#print ("\naaa %s" %dep)
if dep!='' and dep in validUuids:
lines.append('"%s" -> "%s"[dir=%s];' % (dep, datum['uuid'], dir))
continue
# third pass: projects
print ('Making and Linking Project Nodes')
for datum in data:
for proj in datum.get('project', '').split(','):
if proj != '':
lines.append('"%s" -> "%s"[dir=both][arrowtail=odot];' % (proj, datum['uuid']))
lines.append('"%s"[shape=circle][fontsize=40.0][penwidth=16][color=gray52]' % (proj))
continue
# third pass: tags
print ('Making and Linking Tag Nodes')
for datum in data:
for tag in datum.get('tags',''):
if tag != '':
lines.append('"%s" -> "%s";' % (datum['uuid'], tag))
lines.append('"%s"[shape=square][fontsize=24.0][penwidth=8]' % (tag))
continue
lines.append(FOOTER)
print ('Calling dot')
gv, err = call_dot('\n'.join(lines))
if err != '':
print ('Error calling dot:')
print (err.strip())
print ('Writing to taskgv.gv')
with open('/tmp/taskgv.gv', 'w') as f:
f.write(gv)
subprocess.call("open /tmp/taskgv.gv", shell = True)
|
Python
| 0
|
@@ -2399,16 +2399,32 @@
te(instr
+.encode('utf-8')
)%0A%0Aif __
|
c01eabde155da101e50c6587065b21b07c74a83a
|
remove debug msg
|
mjpegtools/__init__.py
|
mjpegtools/__init__.py
|
from sys import version_info
if version_info < (3,):
from urllib2 import Request,urlopen
else:
from urllib.request import Request,urlopen
from io import BytesIO
import re
import logging
from base64 import b64encode
class MjpegParser(object):
def __init__(self, url, auth=None, timeout=2):
"""
:param url: The url
:param auth: A tuple containing username, password for basic
auth, or none if no auth needed.
:param timeout: The timeout value for urllib.urlopen().
"""
self.pil = False
self.quality = 50
self.format = 'jpeg'
self.timeout= timeout
request = Request(url)
try:
if auth:
request.add_header('Authorization',
b'Basic ' + b64encode((auth[0] + ':' + auth[1]).encode('utf-8')) )
self.input = urlopen(request, timeout=self.timeout)
self.ping = True
except Exception as e:
logging.error('input error {}'.format(e))
self.ping = False
self.length = 0
# Mimic the same data as the origin input, good if you are streaming as-is.
self.data = ''
self.headers = self.get_headers()
# Default headers when serving to the client as mjpeg
def get_headers(self):
return ('\r\n' + '--ipcamera\r\n' + 'Content-Length: ' + str(self.length) +
'\r\n' + 'Content-Type: image/jpeg\r\n\r\n')
def serve(self):
# Regex for digits in content-length.
regex = re.compile(b"\d+")
# Declare some empty vars with init values for while loop.
content_length = 0
content_type = 0
content = b''
self.data = b''
# loop until it contains: content_length and content-type
while content_length == 0 or content_type == 0:
print(content)
# Pick up the content-length.
if b'content-length' in content.lower():
length = regex.findall(content)
if len(length) >= 1:
content_length = int(length[0])
self.length = content_length
if b'content-type' in content.lower():
content_type = 1
# Nothing found startover.
content = self.input.readline()
data = self.input.read(content_length)
self.data += content # Slow need to use join instead (pep8 Style).
self.output = BytesIO()
self.filename = 'cameraphoto.jpg'
# if pil is enabled (by default enabled).
if self.pil:
from PIL import Image
self.im = Image.open(BytesIO(data))
# If you need to do more changes to the image.
# Use overide the image_manipulator.
self.image_manipulator(self.im)
self.im.save(self.output, format=self.format, quality=self.quality)
self.output.seek(0)
else:
self.output.write(data)
self.output.seek(0)
return self
# Overide this method to do more image manipulations.
def image_manipulator(self, image):
'''You can manipulate your image here in example :
from PIL import ImageDraw
draw = ImageDraw.Draw(self.im)
draw.text((0, 0), str(time.ctime()) + " Your Camera name : " \
, (255, 255, 255))
'''
pass
def as_mjpeg(self):
def generate():
while True:
cam = self.serve()
c = cam.output
yield self.get_headers()
yield c.read()
return generate()
def as_flask_mjpeg(self):
def generate():
while True:
cam = self.serve()
c = cam.output
yield self.get_headers()
yield c.read()
from flask import Response
resp = Response(generate(), mimetype='image/jpeg',
content_type='multipart/x-mixed-replace;boundary=ipcamera',
direct_passthrough=True)
return resp
def as_flask_image(self):
from flask import send_file
return send_file(self.output,
attachment_filename=self.filename,
as_attachment=True)
def as_image(self):
return self.output
def as_array(self):
from PIL import Image
return Image.open(self.output)
|
Python
| 0.000048
|
@@ -1701,16 +1701,17 @@
:%0A
+#
print(co
|
7977e7874e78718cf2e1980c82003169fdd3c1fb
|
Fix "undefined reference to `_sbrk'" // Resolve #90
|
builder/frameworks/arduino/maple/stm32f1.py
|
builder/frameworks/arduino/maple/stm32f1.py
|
# Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Arduino
Arduino Wiring-based Framework allows writing cross-platform software to
control devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://www.stm32duino.com
"""
from os.path import isdir, join
Import("env")
platform = env.PioPlatform()
board = env.BoardConfig()
FRAMEWORK_DIR = join(platform.get_package_dir(
"framework-arduinoststm32"), "STM32F1")
assert isdir(FRAMEWORK_DIR)
# default configuration values
vector = int(board.get("build.vec_tab_addr", "0x8000000"), 16)
error_led_port = "GPIOB"
error_led_pin = 1
# remap board configuration values
mcu_type = board.get("build.mcu")[:-2]
if "f103c8" in mcu_type:
ldscript = "jtag_c8.ld"
elif "f103cb" in mcu_type:
ldscript = "jtag.ld"
else:
ldscript = "%s.ld" % mcu_type
if "f103c" in mcu_type:
variant = "generic_stm32f103c"
elif "f103r8" in mcu_type or "f103rb" in mcu_type:
variant = "generic_stm32f103r8"
elif "f103rc" in mcu_type or "f103re" in mcu_type:
variant = "generic_stm32f103r"
elif "f103vc" in mcu_type or "f103ve" in mcu_type:
variant = "generic_stm32f103v"
# upload related configuration remap
# for all generic boards
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
if upload_protocol not in ("dfu", "serial"):
env.Append(CPPDEFINES=[
("CONFIG_MAPLE_MINI_NO_DISABLE_DEBUG", 1),
"SERIAL_USB",
"GENERIC_BOOTLOADER"
])
# maple board related configuration remap
if "maple" in board.id:
env.Append(CPPDEFINES=[("SERIAL_USB")])
variant = "maple_mini" if "maple_mini" in board.id else "maple"
vector = 0x8005000
ldscript = "flash.ld"
if board.id == "maple_mini_b20":
vector = 0x8002000
ldscript = "bootloader_20"
# for nucleo f103rb board
elif "nucleo_f103rb" in board.id:
variant = "nucleo_f103rb"
ldscript = "jtag.ld"
env.Append(CPPDEFINES=["NUCLEO_HSE_CRYSTAL"])
elif upload_protocol == "dfu":
env.Append(CPPDEFINES=["SERIAL_USB", "GENERIC_BOOTLOADER"])
vector = 0x8002000
if "f103c" in mcu_type:
ldscript = "bootloader_20.ld"
elif "f103r" in mcu_type:
ldscript = "bootloader.ld"
elif "f103v" in mcu_type:
ldscript = "stm32f103veDFU.ld"
env.Append(
CFLAGS=["-std=gnu11"],
CXXFLAGS=["-std=gnu++11"],
CCFLAGS=[
"-MMD",
"--param", "max-inline-insns-single=500",
"-march=armv7-m"
],
CPPDEFINES=[
("DEBUG_LEVEL", "DEBUG_NONE"),
("BOARD_%s" % variant),
("VECT_TAB_ADDR", vector),
("ERROR_LED_PORT", error_led_port),
("ERROR_LED_PIN", error_led_pin),
("ARDUINO", 10610),
("ARDUINO_%s" % variant.upper()
if not "nucleo" in board.id else "STM_NUCLEO_F103RB"),
("ARDUINO_ARCH_STM32F1"),
("__STM32F1__"),
("MCU_%s" % mcu_type.upper())
],
CPPPATH=[
join(FRAMEWORK_DIR, "cores", "maple"),
join(FRAMEWORK_DIR, "system", "libmaple"),
join(FRAMEWORK_DIR, "system", "libmaple", "include"),
join(FRAMEWORK_DIR, "system", "libmaple", "usb", "stm32f1"),
join(FRAMEWORK_DIR, "system", "libmaple", "usb", "usb_lib")
],
LIBPATH=[join(FRAMEWORK_DIR, "variants", variant, "ld")]
)
# remap ldscript
env.Replace(LDSCRIPT_PATH=ldscript)
# remove unused linker flags
for item in ("-nostartfiles", "-nostdlib"):
if item in env['LINKFLAGS']:
env['LINKFLAGS'].remove(item)
# remove unused libraries
for item in ("c", "stdc++", "nosys"):
if item in env['LIBS']:
env['LIBS'].remove(item)
#
# Lookup for specific core's libraries
#
env.Append(
LIBSOURCE_DIRS=[
join(FRAMEWORK_DIR, "libraries", "__cores__", "maple"),
join(FRAMEWORK_DIR, "libraries")
]
)
#
# Target: Build Core Library
#
libs = []
if "build.variant" in board:
env.Append(
CPPPATH=[join(FRAMEWORK_DIR, "variants", variant)]
)
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduinoVariant"),
join(FRAMEWORK_DIR, "variants", variant)
))
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduino"),
join(FRAMEWORK_DIR, "cores", "maple")
))
env.Prepend(LIBS=libs)
|
Python
| 0
|
@@ -3890,16 +3890,33 @@
, %22ld%22)%5D
+,%0A%0A LIBS=%5B%22c%22%5D
%0A)%0A%0A# re
@@ -4153,13 +4153,8 @@
in (
-%22c%22,
%22std
|
f75f1a9f26c6a4b96251f076cfd369e7fc276cab
|
fix layout parameter
|
bumblebee_status/modules/contrib/spotify.py
|
bumblebee_status/modules/contrib/spotify.py
|
"""Displays the current song being played and allows pausing, skipping ahead, and skipping back.
Requires the following library:
* python-dbus
Parameters:
* spotify.format: Format string (defaults to '{artist} - {title}')
Available values are: {album}, {title}, {artist}, {trackNumber}
* spotify.layout: Comma-separated list to change order of widgets (defaults to song, previous, pause, next)
Widget names are: spotify.song, spotify.prev, spotify.pause, spotify.next
contributed by `yvesh <https://github.com/yvesh>`_ - many thanks!
added controls by `LtPeriwinkle <https://github.com/LtPeriwinkle>`_ - many thanks!
"""
import sys
import dbus
import core.module
import core.widget
import core.input
import core.decorators
import util.format
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, [])
self.__layout = self.parameter(
"layout",
util.format.aslist("spotify.song,spotify.prev,spotify.pause,spotify.next"),
)
self.__song = ""
self.__pause = ""
self.__format = self.parameter("format", "{artist} - {title}")
self.__cmd = "dbus-send --session --type=method_call --dest=org.mpris.MediaPlayer2.spotify \
/org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player."
def hidden(self):
return self.string_song == ""
def __get_song(self):
bus = dbus.SessionBus()
spotify = bus.get_object(
"org.mpris.MediaPlayer2.spotify", "/org/mpris/MediaPlayer2"
)
spotify_iface = dbus.Interface(spotify, "org.freedesktop.DBus.Properties")
props = spotify_iface.Get("org.mpris.MediaPlayer2.Player", "Metadata")
self.__song = self.__format.format(
album=str(props.get("xesam:album")),
title=str(props.get("xesam:title")),
artist=",".join(props.get("xesam:artist")),
trackNumber=str(props.get("xesam:trackNumber")),
)
def update(self):
try:
self.clear_widgets()
self.__get_song()
widget_map = {}
for widget_name in self.__layout:
widget = self.add_widget(name=widget_name)
if widget_name == "spotify.prev":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "Previous",
}
widget.set("state", "prev")
elif widget_name == "spotify.pause":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "PlayPause",
}
playback_status = str(
dbus.Interface(dbus.SessionBus().get_object(
"org.mpris.MediaPlayer2.spotify", "/org/mpris/MediaPlayer2"), "org.freedesktop.DBus.Properties")
.Get("org.mpris.MediaPlayer2.Player", "PlaybackStatus")
)
if playback_status == "Playing":
widget.set("state", "playing")
else:
widget.set("state", "paused")
elif widget_name == "spotify.next":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "Next",
}
widget.set("state", "next")
elif widget_name == "spotify.song":
widget.set("state", "song")
widget.full_text(self.__song)
else:
raise KeyError(
"The spotify module does not have a {widget_name!r} widget".format(
widget_name=widget_name
)
)
for widget, callback_options in widget_map.items():
core.input.register(widget, **callback_options)
except Exception:
self.__song = ""
@property
def string_song(self):
if sys.version_info.major < 3:
return unicode(self.__song)
return str(self.__song)
|
Python
| 0
|
@@ -641,16 +641,104 @@
thanks!
+%0A%0Afixed icons and layout parameter by %60gkeep %3Chttps://github.com/gkeep%3E%60_ - many thanks!
%0A%22%22%22%0A%0Aim
@@ -1005,77 +1005,82 @@
t =
-self.parameter(%0A %22layout%22,%0A util.format.aslist(
+util.format.aslist(%0A self.parameter(%0A %22layout%22,
%22spo
@@ -1129,18 +1129,31 @@
fy.next%22
-)
,
+%0A )
%0A
@@ -2942,16 +2942,45 @@
terface(
+%0A
dbus.Ses
@@ -2993,32 +2993,36 @@
s().get_object(%0A
+
@@ -3070,36 +3070,126 @@
fy%22,
- %22/org/mpris/MediaPlayer2%22),
+%0A %22/org/mpris/MediaPlayer2%22,%0A ),%0A
%22or
@@ -3214,27 +3214,26 @@
.Properties%22
-)%0A
+,%0A
@@ -3244,23 +3244,17 @@
-
+)
.Get(%22or
|
24f929d30924a5ee62d798e4069668cc42a51fde
|
Add -y option for skipping confirmation prompts
|
dcp/__main__.py
|
dcp/__main__.py
|
#!/usr/bin/env python3
"""Format a drive for use as a Digital Cinema Package.
"""
import argparse
import sys
from subprocess import CalledProcessError
import dcp.bytesize as bytesize
from dcp.__init__ import __version__
from dcp.drive import attached_drives, drive_size, unmount
from dcp.drive import partition, dcp_init, ntfs_init
from dcp.interactive import read_choice, read_number, read_y_or_n
# Use this module's docstring as the program description.
DESCRIPTION = sys.modules[__name__].__doc__
EPILOGUE = 'Requires external programs for drive formatting.'
def read_dcp_size (capacity, default):
"""Read the size of the DCP partition from the user.
capacity -- the total capacity of the drive as a ByteSize object.
default -- the default size in bytes.
Returns:
The size of the DCP partition as a ByteSize.
"""
gigs = round(default.gigabytes, 2)
size = read_number('DCP partition size (GB)', gigs)
size = bytesize.from_gb(int(size))
if size >= capacity:
print('Invalid partition size. ' +
'Must be less than drive capacity ({:g2} GB).'.format(
default.gigabytes))
return read_dcp_size(capacity, default)
else:
return size
def print_drive_list (drives):
"""Print a list of available drives for formatting.
drives -- is the list of drives to print.
"""
lines = []
for drive in drives:
size = drive_size(drive)
line = '{}\t {.gigabytes:>8.2f} GB'.format(drive, size)
lines.append(line)
print('''
Available drives:
--------------------------------
{}
'''.format('\n'.join(lines)))
def process_args (args):
"""Process program args. Interactively read any args that were not supplied.
args -- The arguments object output by argparse.
Returns:
The modified args object.
"""
# Select the drive to format.
#
# As a basic sanity check, only offer to format drives that have a capacity
# of 10 GB or greater. This should exclude CDs, DVDs etc.
drives = [drive for drive in attached_drives()
if drive_size(drive) >= bytesize.from_gb(10)]
if not args.drive:
print_drive_list(drives)
default_drive = next(reversed(drives))
args.drive = read_choice(
'Select drive to format', drives, default=default_drive)
elif not args.drive in drives:
exit('Error: Invalid drive path ({})'.format(args.drive))
args.capacity = drive_size(args.drive)
# Set the size of the DCP partition.
if not args.dcp_size:
default_size = args.capacity - bytesize.from_mb(600)
args.dcp_size = read_dcp_size(args.capacity, default=default_size)
else:
args.dcp_size = bytesize.from_gb(args.dcp_size)
# Infer the size of the NTFS partition.
args.ntfs_size = args.capacity - args.dcp_size
return args
def main ():
"""Program entrypoint.
"""
# Parse program arguments.
parser = argparse.ArgumentParser(
description=DESCRIPTION,
epilog=EPILOGUE,
add_help=False)
group = parser.add_argument_group('formatting options')
group.add_argument(
'-d', '--drive',
help='the absolute path to the drive to format')
group.add_argument(
'-s', '--dcp_size',
type=float,
metavar='GB',
help='the size of the DCP partition in gigabytes')
group = parser.add_argument_group('help')
group.add_argument(
'-h', '--help',
action='help',
help='show usage')
group.add_argument(
'-v', '--version',
action='version',
version=__version__,
help='show version')
try:
args = parser.parse_args()
args = process_args(args)
except KeyboardInterrupt:
exit('\nNo changes made.')
# Summarise options.
print ('''
The drive will be repartitioned as follows:
{}\t\t{.gigabytes:>8.2f} GB
|
|-- 1: DCP \t ext2 \t{.gigabytes:>8.2f} GB
`-- 2: NTFS\t ntfs \t{.gigabytes:>8.2f} GB
'''.format(args.drive, args.capacity, args.dcp_size, args.ntfs_size))
if not read_y_or_n('The drive will be erased. Continue?'):
exit('\nNo changes made.')
# Initialise drive.
print('--> Unmounting drive...')
unmount(args.drive)
print('--> Partitioning...')
try:
partition(args.drive, args.dcp_size, args.ntfs_size)
except CalledProcessError:
exit('\nError: partitioning failed')
print('--> Initialising DCP partition...')
try:
dcp_init('DCP', args.drive)
except CalledProcessError:
exit('\nError: DCP initialisation failed')
print('--> Initialising NTFS partition...')
try:
ntfs_init('NTFS', args.drive)
except CalledProcessError:
exit('\nError: NTFS initialisation failed')
print('--> Unmounting drive...')
unmount(args.drive)
print('--> Finished')
return 0
|
Python
| 0
|
@@ -3433,24 +3433,173 @@
igabytes')%0A%0A
+ group.add_argument(%0A '-y', '--no-confirm',%0A default=False,%0A action='store_true',%0A help='skip confirmation dialogs')%0A%0A
group =
@@ -4297,22 +4297,78 @@
))%0A%0A
-if not
+# Prompt before continuing.%0A%0A accepted = args.no_confirm or
read_y_
@@ -4372,16 +4372,25 @@
_y_or_n(
+%0A
'The dri
@@ -4419,16 +4419,37 @@
tinue?')
+%0A%0A if not accepted
:%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.