commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
bb4188cd080c5b089bc54c2ba7f5b0cdbd6ed32d
|
Convert True to string for image registry
|
sahara/utils/openstack/images.py
|
sahara/utils/openstack/images.py
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from novaclient.v2 import images
import six
from sahara import exceptions as exc
PROP_DESCR = '_sahara_description'
PROP_USERNAME = '_sahara_username'
PROP_TAG = '_sahara_tag_'
def _iter_tags(meta):
for key in meta:
if key.startswith(PROP_TAG) and meta[key]:
yield key[len(PROP_TAG):]
def _ensure_tags(tags):
if not tags:
return []
return [tags] if isinstance(tags, six.string_types) else tags
class SaharaImage(images.Image):
def __init__(self, manager, info, loaded=False):
info['description'] = info.get('metadata', {}).get(PROP_DESCR)
info['username'] = info.get('metadata', {}).get(PROP_USERNAME)
info['tags'] = [tag for tag in _iter_tags(info.get('metadata', {}))]
super(SaharaImage, self).__init__(manager, info, loaded)
def tag(self, tags):
self.manager.tag(self, tags)
def untag(self, tags):
self.manager.untag(self, tags)
def set_description(self, username, description=None):
self.manager.set_description(self, username, description)
def unset_description(self):
self.manager.unset_description(self)
@property
def dict(self):
return self.to_dict()
@property
def wrapped_dict(self):
return {'image': self.dict}
def to_dict(self):
result = self._info.copy()
if 'links' in result:
del result['links']
return result
class SaharaImageManager(images.ImageManager):
"""Manage :class:`SaharaImage` resources.
This is an extended version of nova client's ImageManager with support of
additional description and image tags stored in images' meta.
"""
resource_class = SaharaImage
def set_description(self, image, username, description=None):
"""Sets human-readable information for image.
For example:
Ubuntu 13.04 x64 with Java 1.7u21 and Apache Hadoop 1.1.1, ubuntu
"""
self.set_meta(image, {
PROP_DESCR: description,
PROP_USERNAME: username,
})
def unset_description(self, image):
"""Unsets all Sahara-related information.
It removes username, description and tags from the specified image.
"""
image = self.get(image)
meta = [PROP_TAG + tag for tag in image.tags]
if image.description is not None:
meta += [PROP_DESCR]
if image.username is not None:
meta += [PROP_USERNAME]
self.delete_meta(image, meta)
def tag(self, image, tags):
"""Adds tags to the specified image."""
tags = _ensure_tags(tags)
self.set_meta(image, {PROP_TAG + tag: True for tag in tags})
def untag(self, image, tags):
"""Removes tags from the specified image."""
tags = _ensure_tags(tags)
self.delete_meta(image, [PROP_TAG + tag for tag in tags])
def list_by_tags(self, tags):
"""Returns images having all of the specified tags."""
tags = _ensure_tags(tags)
return [i for i in self.list() if set(tags).issubset(i.tags)]
def list_registered(self, name=None, tags=None):
tags = _ensure_tags(tags)
images = [i for i in self.list()
if i.username and set(tags).issubset(i.tags)]
if name:
return [i for i in images if i.name == name]
else:
return images
def get_registered_image(self, image):
img = self.get(image)
if img.username:
return img
else:
raise exc.ImageNotRegistered(image)
|
Python
| 0.999999
|
@@ -3262,12 +3262,14 @@
ag:
+'
True
+'
for
|
bbfa9c3135ebdc5a99257d62556b691f8c87a26c
|
Update irrigate.py
|
device/src/irrigate.py
|
device/src/irrigate.py
|
#!/usr/bin/env python
#In this project, I use a servo to simulate the water tap.
#Roating to 90 angle suggest that the water tap is open, and 0 angle means close.
#Pin connection:
#deep red <--> GND
#red <--> VCC
#yellow <--> signal(X1)
from pyb import Servo
servo=Servo(1) # X1
def irrigate_start():
servo.angle(90)
def irrigate_stop():
servo.angle(0)
|
Python
| 0.000001
|
@@ -236,16 +236,150 @@
gnal(X1)
+%0A%0A#Update!!!!!%0A#Use real water pump(RS360) to irrigate the plants, need to use relay to drive the pump which is powered by 5V power.%0A#
%0Afrom py
|
173d7ffefe10e8896055bd5b41272c2d0a1f8889
|
Update version to 0.1.6 for upcoming release
|
pdblp/_version.py
|
pdblp/_version.py
|
__version__ = "0.1.5"
|
Python
| 0
|
@@ -16,7 +16,7 @@
0.1.
-5
+6
%22%0A
|
f028a004adf955115cf87354a045350d7d147b3e
|
Fix the warnings unit test.
|
tests/unit/utils/warnings_test.py
|
tests/unit/utils/warnings_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.warnings_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test ``salt.utils.warn_until`` and ``salt.utils.kwargs_warn_until``
'''
# Import python libs
import warnings
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import warn_until, kwargs_warn_until
from salt.version import SaltStackVersion
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WarnUntilTestCase(TestCase):
@patch('salt.version')
def test_warn_until_warning_raised(self, salt_version_mock):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
# Define a salt version info
salt_version_mock.__version_info__ = (0, 16)
# Let SaltStackVersion be the original one, not a MagicMock'ed one
salt_version_mock.SaltStackVersion = SaltStackVersion
def raise_warning():
warn_until(
(0, 17), 'Deprecation Message!'
)
# raise_warning should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_warning()
self.assertEqual(
'Deprecation Message!', str(recorded_warnings[0].message)
)
# the deprecation warning is not issued because we passed
# _dont_call_warning
with warnings.catch_warnings(record=True) as recorded_warnings:
warn_until(
(0, 17), 'Foo', _dont_call_warnings=True
)
self.assertEqual(0, len(recorded_warnings))
# Let's set version info to (0, 17), a RuntimeError should be raised
salt_version_mock.__version_info__ = (0, 17)
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17\' is released. Current version is now \'0.17\'. Please '
r'remove the warning.'):
raise_warning()
# Even though we're calling warn_until, we pass _dont_call_warnings
# because we're only after the RuntimeError
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17\' is released. Current version is now \'0.17\'. Please '
r'remove the warning.'):
warn_until(
(0, 17), 'Foo', _dont_call_warnings=True
)
@patch('salt.version')
def test_kwargs_warn_until_warning_raised(self, salt_version_mock):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
# Define a salt version info
salt_version_mock.__version_info__ = (0, 16)
def raise_warning(**kwargs):
kwargs_warn_until(
kwargs,
(0, 17),
)
# raise_warning({...}) should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_warning(foo=42) # with a kwarg
self.assertEqual(
'The following parameter(s) have been deprecated and '
'will be removed in 0.17: \'foo\'.',
str(recorded_warnings[0].message)
)
# With no **kwargs, should not show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
kwargs_warn_until(
{}, # no kwargs
(0, 17),
)
self.assertEqual(0, len(recorded_warnings))
# Let's set version info to (0, 17), a RuntimeError should be raised
# regardless of whether or not we pass any **kwargs.
salt_version_mock.__version_info__ = (0, 17)
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17\' is released. Current version is now \'0.17\'. Please '
r'remove the warning.'):
raise_warning() # no kwargs
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17\' is released. Current version is now \'0.17\'. Please '
r'remove the warning.'):
raise_warning(bar='baz', qux='quux') # some kwargs
if __name__ == '__main__':
from integration import run_tests
run_tests(WarnUntilTestCase, needs_daemon=False)
|
Python
| 0
|
@@ -2357,32 +2357,34 @@
r'%5C'0.17
+.0
%5C' is released.
@@ -2408,34 +2408,29 @@
s now %5C'0.17
+.0
%5C'.
- Please
'%0A
@@ -2430,32 +2430,39 @@
r'
+Please
remove the warni
@@ -2868,32 +2868,34 @@
r'%5C'0.17
+.0
%5C' is released.
@@ -2919,34 +2919,29 @@
s now %5C'0.17
+.0
%5C'.
- Please
'%0A
@@ -2941,32 +2941,39 @@
r'
+Please
remove the warni
|
b87ebc9dbbc33928345a83ac8ea0ce71806ac024
|
simplify play down to wall and standard defense
|
soccer/gameplay/plays/Defend_Restart_Defensive/BasicDefendRestartDefensive.py
|
soccer/gameplay/plays/Defend_Restart_Defensive/BasicDefendRestartDefensive.py
|
import main
import robocup
import behavior
import constants
import enum
import standard_play
import tactics.positions.submissive_goalie as submissive_goalie
import tactics.positions.submissive_defender as submissive_defender
import evaluation.opponent as eval_opp
import tactics.positions.wing_defender as wing_defender
import skills.mark as mark
import tactics.defense
import situational_play_selection
## Play that uses submissive defenders to defend
# an attack close to our goal.
#
# By default, we will use standard defense (two submissive
# defenders, one goalie) and additional marking robots.
#
class BasicDefendRestartDefensive(standard_play.StandardPlay):
_situationList = [
situational_play_selection.SituationalPlaySelector.Situation.DEFEND_RESTART_DEFENSIVE
] # yapf: disable
def __init__(self, num_defenders=2):
super().__init__(continuous=True)
self.num_defenders = num_defenders
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'Immediately')
for i in range(num_defenders):
self.add_subbehavior(mark.Mark(), 'mark' + str(i), required=False)
# Keep track of which robots are currently being defended
self.defended = {}
for i in range(len(main.their_robots())):
self.defended[i] = False
def execute_running(self):
for i in range(len(main.their_robots())):
if not eval_opp.is_marked(main.their_robots()[i].pos):
self.defended[i] = False
# mark highest threat robot
for i in range(self.num_defenders):
mark_bhvr = self.subbehavior_with_name('mark' + str(i))
threat_found = False
for threat_pt, _, _ in eval_opp.get_threat_list([mark_bhvr]):
print(threat_pt)
closest_opp = eval_opp.get_closest_opponent(threat_pt)
if not threat_found and (closest_opp.pos - main.ball().pos).mag() > constants.Field.CenterRadius + constants.Robot.Radius * 2:
print((closest_opp.pos - main.ball().pos).mag())
# print(constants.Field.CenterRadius)
mark_bhvr.mark_robot = closest_opp
threat_found = True
|
Python
| 0.000027
|
@@ -356,23 +356,28 @@
tactics.
-defense
+wall as wall
%0Aimport
@@ -412,114 +412,25 @@
%0A##
-Play that uses submissive defenders to defend%0A# an attack close to our goal.%0A#%0A# By default, we will
+Restart that
use
+s
sta
@@ -447,85 +447,57 @@
nse
-(two submissive%0A# defenders, one goalie) and additional marking robots. %0A#
+and uses the remaining%0A# robots to form a wall%0A#
%0Acla
@@ -1016,1224 +1016,64 @@
-for i in range(num_defenders):%0A self.add_subbehavior(mark.Mark(), 'mark' + str(i), required=False)%0A%0A # Keep track of which robots are currently being defended%0A self.defended = %7B%7D%0A for i in range(len(main.their_robots())):%0A self.defended%5Bi%5D = False%0A%0A def execute_running(self):%0A for i in range(len(main.their_robots())):%0A if not eval_opp.is_marked(main.their_robots()%5Bi%5D.pos):%0A self.defended%5Bi%5D = False%0A%0A%0A # mark highest threat robot%0A for i in range(self.num_defenders):%0A mark_bhvr = self.subbehavior_with_name('mark' + str(i))%0A%0A threat_found = False %0A for threat_pt, _, _ in eval_opp.get_threat_list(%5Bmark_bhvr%5D):%0A print(threat_pt)%0A closest_opp = eval_opp.get_closest_opponent(threat_pt)%0A if not threat_found and (closest_opp.pos - main.ball().pos).mag() %3E constants.Field.CenterRadius + constants.Robot.Radius * 2:%0A print((closest_opp.pos - main.ball().pos).mag())%0A # print(constants.Field.CenterRadius)%0A mark_bhvr.mark_robot = closest_opp%0A threat_found = True
+self.add_subbehavior(wall.Wall(), 'wall', required=False)%0A%0A
%0A
|
abae242bbcdc3eefcd0ab1ff29f660f89d47db1a
|
Add absolute URL for Surprises
|
mirigata/surprise/models.py
|
mirigata/surprise/models.py
|
from django.db import models
class Surprise(models.Model):
link = models.URLField(max_length=500)
description = models.TextField(max_length=1000)
|
Python
| 0
|
@@ -1,8 +1,53 @@
+from django.core.urlresolvers import reverse%0A
from dja
@@ -190,12 +190,111 @@
ength=1000)%0A
+%0A def get_absolute_url(self):%0A return reverse('surprise-detail', kwargs=%7B%22pk%22: self.id%7D)%0A
|
0aa23de3f1dc2b3bc477bb905fcc7054430bed26
|
Clarify test purpose
|
src/unix/tests/test_resetnetwork_hostname.py
|
src/unix/tests/test_resetnetwork_hostname.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
resetnetwork hostname tester
"""
import os
import unittest
from cStringIO import StringIO
import commands.redhat.network
import commands.debian.network
import commands.arch.network
import commands.gentoo.network
import commands.suse.network
class TestHostNameUpdates(unittest.TestCase):
def _run_redhat(self, infile, hostname):
outfile = commands.redhat.network._update_hostname(infile, hostname)
outfile.seek(0)
return outfile
def _run_debian(self, hostname):
outfile = commands.debian.network._update_hostname(hostname)
outfile.seek(0)
return outfile
def _run_arch(self, infile, hostname):
outfile = commands.arch.network._update_hostname(infile, hostname)
outfile.seek(0)
return outfile
def _run_gentoo(self, hostname):
return commands.gentoo.network._update_hostname(hostname)
def _run_suse(self, hostname):
outfile = commands.suse.network._update_hostname(hostname)
outfile.seek(0)
return outfile
def test_redhat_add_entry(self):
"""Test adding hostname to /etc/sysconfig/network"""
infile = StringIO('NETWORKING=yes\n' +
'NETWORKING_IPV6=yes\n')
outfile = self._run_redhat(infile, 'example')
self.assertEqual(outfile.read(), 'NETWORKING=yes\n' +
'NETWORKING_IPV6=yes\n' +
'HOSTNAME=example\n')
def test_redhat_update_entry(self):
"""Test updating hostname in /etc/sysconfig/network"""
infile = StringIO('NETWORKING=yes\n' +
'NETWORKING_IPV6=yes\n' +
'HOSTNAME=other\n')
outfile = self._run_redhat(infile, 'example')
self.assertEqual(outfile.read(), 'NETWORKING=yes\n' +
'NETWORKING_IPV6=yes\n' +
'HOSTNAME=example\n')
def test_debian(self):
"""Test updating hostname in /etc/hostname"""
outfile = self._run_debian('example')
self.assertEqual(outfile.read(), 'example\n')
def test_arch_add_entry(self):
"""Test adding hostname to /etc/rc.conf"""
infile = StringIO('eth0="eth0 192.0.2.42 netmask 255.255.255.0"\n' +
'INTERFACES=(eth0)\n')
outfile = self._run_arch(infile, 'example')
self.assertEqual(outfile.read(),
'eth0="eth0 192.0.2.42 netmask 255.255.255.0"\n' +
'INTERFACES=(eth0)\n' +
'HOSTNAME="example"\n')
def test_arch_update_entry(self):
"""Test updating hostname in /etc/rc.conf"""
infile = StringIO('eth0="eth0 192.0.2.42 netmask 255.255.255.0"\n' +
'INTERFACES=(eth0)\n' +
'HOSTNAME="other"\n')
outfile = self._run_arch(infile, 'example')
self.assertEqual(outfile.read(),
'eth0="eth0 192.0.2.42 netmask 255.255.255.0"\n' +
'INTERFACES=(eth0)\n' +
'HOSTNAME="example"\n')
def test_gentoo(self):
"""Test updating hostname in /etc/conf.d/hostname"""
data = self._run_gentoo('example')
self.assertEqual(data,
'# Automatically generated, do not edit\n' +
'HOSTNAME="example"\n')
def test_suse(self):
"""Test updating hostname in /etc/HOSTNAME"""
outfile = self._run_suse('example')
self.assertEqual(outfile.read(), 'example\n')
if __name__ == "__main__":
agent_test.main()
|
Python
| 0.000005
|
@@ -1795,24 +1795,32 @@
hostname to
+Red Hat
/etc/sysconf
@@ -1825,32 +1825,32 @@
nfig/network%22%22%22%0A
-
infile =
@@ -2179,24 +2179,32 @@
hostname in
+Red Hat
/etc/sysconf
@@ -2583,24 +2583,31 @@
hostname in
+Debian
/etc/hostnam
@@ -2782,16 +2782,27 @@
name to
+Arch Linux
/etc/rc.
@@ -3221,24 +3221,35 @@
hostname in
+Arch Linux
/etc/rc.conf
@@ -3688,24 +3688,31 @@
hostname in
+Gentoo
/etc/conf.d/
@@ -3908,32 +3908,32 @@
est_suse(self):%0A
-
%22%22%22Test
@@ -3953,16 +3953,21 @@
name in
+SuSE
/etc/HOS
|
307ed9b27e17efb990e198c5f3e059636eedb8d2
|
Add event types for allowed address pairs update action [WAL-3481]
|
src/waldur_openstack/openstack_tenant/log.py
|
src/waldur_openstack/openstack_tenant/log.py
|
from waldur_core.logging.loggers import EventLogger, event_logger
from waldur_core.structure import models as structure_models
from . import models
class ResourceActionEventLogger(EventLogger):
resource = structure_models.NewResource
action_details = dict
class Meta:
event_types = (
'resource_pull_scheduled',
'resource_pull_succeeded',
'resource_pull_failed',
# volume
'resource_attach_scheduled',
'resource_attach_succeeded',
'resource_attach_failed',
'resource_detach_scheduled',
'resource_detach_succeeded',
'resource_detach_failed',
'resource_extend_scheduled',
'resource_extend_succeeded',
'resource_extend_failed',
# instance
'resource_update_security_groups_scheduled',
'resource_update_security_groups_succeeded',
'resource_update_security_groups_failed',
'resource_change_flavor_scheduled',
'resource_change_flavor_succeeded',
'resource_change_flavor_failed',
'resource_assign_floating_ip_scheduled',
'resource_assign_floating_ip_succeeded',
'resource_assign_floating_ip_failed',
'resource_stop_scheduled',
'resource_stop_succeeded',
'resource_stop_failed',
'resource_start_scheduled',
'resource_start_succeeded',
'resource_start_failed',
'resource_restart_scheduled',
'resource_restart_succeeded',
'resource_restart_failed',
'resource_extend_volume_scheduled',
'resource_extend_volume_succeeded',
'resource_extend_volume_failed',
'resource_retype_scheduled',
'resource_retype_succeeded',
'resource_retype_failed',
'resource_unassign_floating_ip_scheduled',
'resource_unassign_floating_ip_succeeded',
'resource_unassign_floating_ip_failed',
'resource_update_internal_ips_scheduled',
'resource_update_internal_ips_succeeded',
'resource_update_internal_ips_failed',
'resource_update_floating_ips_scheduled',
'resource_update_floating_ips_succeeded',
'resource_update_floating_ips_failed',
)
event_groups = {'resources': event_types}
@staticmethod
def get_scopes(event_context):
resource = event_context['resource']
project = resource.service_project_link.project
return {resource, project, project.customer}
class BackupScheduleEventLogger(EventLogger):
resource = models.Instance
backup_schedule = models.BackupSchedule
class Meta:
event_types = (
'resource_backup_schedule_created',
'resource_backup_schedule_deleted',
'resource_backup_schedule_activated',
'resource_backup_schedule_deactivated',
'resource_backup_schedule_cleaned_up',
)
event_groups = {'resources': event_types}
@staticmethod
def get_scopes(event_context):
return ResourceActionEventLogger.get_scopes(event_context)
class SnapshotScheduleEventLogger(EventLogger):
resource = models.Volume
snapshot_schedule = models.SnapshotSchedule
class Meta:
event_types = (
'resource_snapshot_schedule_created',
'resource_snapshot_schedule_deleted',
'resource_snapshot_schedule_activated',
'resource_snapshot_schedule_deactivated',
'resource_snapshot_schedule_cleaned_up',
)
event_groups = {'resources': event_types}
@staticmethod
def get_scopes(event_context):
return ResourceActionEventLogger.get_scopes(event_context)
class BackupEventLogger(EventLogger):
resource = models.Instance
class Meta:
event_types = (
'resource_backup_creation_scheduled',
'resource_backup_creation_succeeded',
'resource_backup_creation_failed',
'resource_backup_restoration_scheduled',
'resource_backup_restoration_succeeded',
'resource_backup_restoration_failed',
'resource_backup_deletion_scheduled',
'resource_backup_deletion_succeeded',
'resource_backup_deletion_failed',
'resource_backup_schedule_creation_succeeded',
'resource_backup_schedule_update_succeeded',
'resource_backup_schedule_deletion_succeeded',
'resource_backup_schedule_activated',
'resource_backup_schedule_deactivated',
)
@staticmethod
def get_scopes(event_context):
return ResourceActionEventLogger.get_scopes(event_context)
event_logger.register('openstack_resource_action', ResourceActionEventLogger)
event_logger.register('openstack_backup_schedule', BackupScheduleEventLogger)
event_logger.register('openstack_snapshot_schedule', SnapshotScheduleEventLogger)
event_logger.register('openstack_backup', BackupEventLogger)
|
Python
| 0
|
@@ -2212,32 +2212,218 @@
al_ips_failed',%0A
+ 'resource_update_allowed_address_pairs_scheduled',%0A 'resource_update_allowed_address_pairs_succeeded',%0A 'resource_update_allowed_address_pairs_failed',%0A
'res
|
c0fdbf78fcc6b74086cc40e8e0deb273dee6d03c
|
Update BUILD_OSS to 4666.
|
src/data/version/mozc_version_template.bzl
|
src/data/version/mozc_version_template.bzl
|
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4660
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
|
Python
| 0
|
@@ -1606,17 +1606,17 @@
SS = 466
-0
+6
%0A%0A# Numb
|
c5225c00191595b6d1a824ee808465e0c488769b
|
Add missing arg which didn't make it because of the bad merge conflict resolution.
|
st2stream/st2stream/controllers/v1/stream.py
|
st2stream/st2stream/controllers/v1/stream.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common import log as logging
from st2common.router import Response
from st2common.util.jsonify import json_encode
from st2stream.listener import get_listener
LOG = logging.getLogger(__name__)
def format(gen):
message = '''event: %s\ndata: %s\n\n'''
for pack in gen:
if not pack:
# Note: gunicorn wsgi handler expect bytes, not unicode
yield six.binary_type('\n')
else:
(event, body) = pack
# Note: gunicorn wsgi handler expect bytes, not unicode
yield six.binary_type(message % (event, json_encode(body, indent=None)))
class StreamController(object):
def get_all(self):
def make_response():
res = Response(content_type='text/event-stream',
app_iter=format(get_listener().generator()))
return res
stream = make_response()
return stream
stream_controller = StreamController()
|
Python
| 0
|
@@ -1454,16 +1454,32 @@
all(self
+, requester_user
):%0A
|
1df3dc91f71bf2a02b059d414ea5b041a382f1ad
|
change CSS selectors
|
shot.py
|
shot.py
|
# -*- coding: utf-8 -*-
import redis
import urllib2
from bs4 import BeautifulSoup
from datetime import datetime
url = 'http://www.x-kom.pl'
FORMAT_DATETIME = '%Y-%m-%d %H:%M:%S.%f'
redis_server = redis.Redis(host='localhost', port=6379)
def get_number(number):
return float(number.strip().split()[0].replace(',', '.'))
def get_element(soup, tag, class_name):
return soup.find(tag, {'class': class_name}).get_text()
def get_data(url):
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
title = get_element(soup, 'div', 'killer-product-title')
price = get_element(soup, 'div', 'killer-price')
price_first = get_element(soup, 'div', 'discount-price')
return { 'title': title.encode('utf-8'), 'price': get_number(price), 'price_first': get_number(price_first), 'date': datetime.now()}
def save_to_db():
item = get_data(url)
date = item['date'].strftime(FORMAT_DATETIME)
redis_server.hmset(date, item)
def show_all():
keys = redis_server.keys()
for i, key in enumerate(keys):
print '{}: {}'.format(i, redis_server.hgetall(key))
if __name__ == '__main__':
save_to_db()
# show_all()
|
Python
| 0.000001
|
@@ -552,30 +552,21 @@
p, '
-div
+p
', '
-killer-
product-
titl
@@ -561,20 +561,19 @@
product-
-titl
+nam
e')%0A%09pri
@@ -607,14 +607,11 @@
', '
-killer
+new
-pri
@@ -660,16 +660,11 @@
', '
-discount
+old
-pri
|
8b944f04ebf9b635029182a3137e9368edafe9d2
|
Handle exception for bad search strings
|
pgsearch/utils.py
|
pgsearch/utils.py
|
from django.contrib.postgres.search import SearchVector, SearchRank, SearchQuery
import shlex
import string
def parseSearchString(search_string):
search_strings = shlex.split(search_string)
translator = str.maketrans({key: None for key in string.punctuation})
search_strings = [s.translate(translator) for s in search_strings]
return search_strings
def createSearchQuery(list_of_terms):
if len(list_of_terms) > 0:
q = SearchQuery(list_of_terms[0])
for term in list_of_terms[1:]:
q = q & SearchQuery(term)
return q
else:
return None
def searchPostgresDB(search_string, Table, config, rank, *fields):
list_of_terms = parseSearchString(search_string)
search_query = createSearchQuery(list_of_terms)
if rank == True:
vector = SearchVector(*fields, config=config)
objs = Table.objects.annotate(rank=SearchRank(vector, search_query)).\
order_by('-rank')
else:
objs = Table.objects.annotate(search=SearchVector(*fields,
config=config),).\
filter(search=search_query)
return objs
|
Python
| 0.000006
|
@@ -141,16 +141,29 @@
tring):%0A
+ try:%0A
sear
@@ -206,16 +206,20 @@
ng)%0A
+
+
translat
@@ -280,16 +280,20 @@
ation%7D)%0A
+
sear
@@ -355,16 +355,56 @@
trings%5D%0A
+ except:%0A search_strings = %5B%5D%0A
retu
|
6df0e3efd239f7be073057ede44033dc95064a23
|
Fix StringIO import
|
teuthology/task/tests/test_run.py
|
teuthology/task/tests/test_run.py
|
import logging
import pytest
from StringIO import StringIO
from teuthology.exceptions import CommandFailedError
log = logging.getLogger(__name__)
class TestRun(object):
"""
Tests to see if we can make remote procedure calls to the current cluster
"""
def test_command_failed_label(self, ctx, config):
result = ""
try:
ctx.cluster.run(
args=["python", "-c", "assert False"],
label="working as expected, nothing to see here"
)
except CommandFailedError as e:
result = str(e)
assert "working as expected" in result
def test_command_failed_no_label(self, ctx, config):
with pytest.raises(CommandFailedError):
ctx.cluster.run(
args=["python", "-c", "assert False"],
)
def test_command_success(self, ctx, config):
result = StringIO()
ctx.cluster.run(
args=["python", "-c", "print('hi')"],
stdout=result
)
assert result.getvalue().strip() == "hi"
|
Python
| 0.000001
|
@@ -28,24 +28,18 @@
t%0A%0Afrom
-StringIO
+io
import
|
3c1a658195145ff1c0f20b677c50f5932e5ac66a
|
fix yield statement
|
dusty/compiler/compose/__init__.py
|
dusty/compiler/compose/__init__.py
|
import yaml
import pprint
from .. import get_assembled_specs
from ...source import repo_path
from ..port_spec import port_spec_document
from ... import constants
def write_compose_file():
compose_dict = get_compose_dict()
print pprint.pformat(compose_dict)
with open("{}/docker-compose.yml".format(constants.COMPOSE_DIR), 'w') as f:
f.write(yaml.dump(compose_dict, default_flow_style=False, width=10000))
yield "Written to {}".format(constants.COMPOSE_YML_PATH).encode('utf-8')
def get_compose_dict():
assembled_specs = get_assembled_specs()
port_specs = port_spec_document(assembled_specs)
compose_dict = {}
for app_name in assembled_specs['apps'].keys():
compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs)
for service_name in assembled_specs.get('services', []):
compose_dict[service_name] = _composed_service_dict(service_name, assembled_specs)
return compose_dict
def _composed_app_dict(app_name, assembled_specs, port_specs):
app_spec = assembled_specs['apps'][app_name]
compose_bundle = app_spec.get("compose", {})
compose_bundle['image'] = app_spec['image']
compose_bundle['command'] = _compile_docker_command(app_spec)
compose_bundle['links'] = app_spec.get('depends', {}).get('services', [])
compose_bundle['volumes'] = _get_compose_volumes(app_name, assembled_specs)
port_str = _get_ports_list(app_name, port_specs)
if port_str:
compose_bundle['ports'] = port_str
return compose_bundle
def _composed_service_dict(service_name, assembled_specs):
return assembled_specs['services'][service_name]
def _get_ports_list(app_name, port_specs):
if app_name not in port_specs['docker_compose']:
return None
return ["{}:{}".format(port_specs['docker_compose'][app_name]['mapped_host_port'],
port_specs['docker_compose'][app_name]['in_container_port'])]
def _compile_docker_command(app_spec):
first_run_file = constants.FIRST_RUN_FILE
command = []
command.append("export PATH=$PATH:{}".format(_container_code_path(app_spec)))
command.append("if [ ! -f {} ]".format(first_run_file))
once_command = app_spec['commands'].get("once", "")
command.append("then touch {}; fi".format(first_run_file))
if once_command:
command.append(once_command)
command.append(app_spec['commands']['always'])
return "bash -c \"{}\"".format('; '.join(command))
def _get_compose_volumes(app_name, assembled_specs):
app_spec = assembled_specs['apps'][app_name]
volumes = []
volumes.append(_get_app_volume_mount(app_spec))
volumes += _get_libs_volume_mounts(app_name, assembled_specs)
return volumes
def _get_app_volume_mount(app_spec):
app_repo_path = repo_path(app_spec['repo'])
return "{}:{}".format(app_repo_path, _container_code_path(app_spec))
def _container_code_path(spec):
return "/gc/{}".format(spec['repo'].split('/')[-1])
def _get_libs_volume_mounts(app_name, assembled_specs):
volumes = []
for lib_name in assembled_specs['apps'][app_name].get('depends', {}).get('libs', []):
lib_spec = assembled_specs['libs'][lib_name]
lib_repo_path = repo_path(lib_spec['repo'])
volumes.append("{}:{}".format(lib_repo_path, _container_code_path(lib_spec)))
return volumes
|
Python
| 0.000001
|
@@ -445,32 +445,63 @@
n to %7B%7D%22.format(
+%22%7B%7D/docker-compose.yml%22.format(
constants.COMPOS
@@ -506,16 +506,12 @@
OSE_
-YML_PATH
+DIR)
).en
|
cc5c52084fedf172d11534a465e155b8948da9b7
|
Add support for command arguments
|
skal.py
|
skal.py
|
# Copyright 2012 Loop Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Implement tests and remove sloppy test class and main
# TODO: Detect subcommands from another module
# TODO: Detect subcommands from each module in a package
# TODO: Don't crash app if a subcommand is broken, just don't add it
# TODO: Create decorators for each subcommand to export
__version__ = '0.0.3'
__project_url__ = 'https://github.com/looplab/skal'
import sys
import errno
import argparse
import inspect
import types
class SkalApp(object):
"""A base class for command-subcommand apps.
This class is meant to be used as a base class for the actual application
class in which methods are defined that represents the subcommands.
Consider a simple case:
>>> class MyApp(SkalApp):
... @command
... def first(self):
... print("first")
...
>>> app = MyApp()
>>> app.run()
This will create a simple app which has one method that is made a command
by uisng the @command decorator. If run from the command line it will
respond to a call like this: "python myapp.py first"
"""
def __init__(self):
"""Creates the argparser using metadata from decorators.
"""
main_module = sys.modules['__main__']
version = ''
if hasattr(main_module, '__version__'):
version = str(main_module.__version__)
self.__argparser = argparse.ArgumentParser(description = self.__doc__)
self.__argparser.add_argument(
'--version',
action = 'version',
version = ('%(prog)s v' + version))
if hasattr(self.__class__, '__skal__'):
for k in self.__class__.__skal__:
arg = []
if type(k) == str:
arg.append(k)
elif type(k) == tuple:
short, full = k
if type(short) == str:
arg.append(short)
if type(full) == str:
arg.append(full)
options = self.__class__.__skal__[k]
self.__argparser.add_argument(*arg, **options)
# Add all subcommands by introspection
self.__subparser = self.__argparser.add_subparsers(dest = 'command')
methods = inspect.getmembers(self.__class__, inspect.ismethod)
for name, method in methods:
if (hasattr(method, 'skal_meta')):
command = self.__subparser.add_parser(
name, help = inspect.getdoc(method))
bound_method = types.MethodType(method, self, self.__class__)
command.set_defaults(cmd = bound_method)
def run(self, args = None):
"""Applicatin starting point.
This will run the associated method/function/module or print a help
list if it's an unknown keyword or the syntax is incorrect.
The suggested usage is as an argument to sys.exit():
>>> sys.exit(app.run())
Keyword arguments:
args -- Custom application arguments (default sys.argv)
"""
self.args = self.__argparser.parse_args(args = args)
try:
if 'cmd' in self.args:
return self.args.cmd()
except KeyboardInterrupt:
return errno.EINTR
def command(f):
"""Decorator to tell Skal that the method/function is a command.
"""
f.skal_meta = {}
return f
|
Python
| 0.000113
|
@@ -574,301 +574,8 @@
.%0A%0A%0A
-# TODO: Implement tests and remove sloppy test class and main%0A# TODO: Detect subcommands from another module%0A# TODO: Detect subcommands from each module in a package%0A# TODO: Don't crash app if a subcommand is broken, just don't add it%0A# TODO: Create decorators for each subcommand to export%0A%0A%0A
__ve
@@ -593,9 +593,9 @@
0.0.
-3
+4
'%0A__
@@ -1615,16 +1615,51 @@
ion__)%0A%0A
+ # Add main parser and help%0A
@@ -1898,549 +1898,183 @@
-if hasattr(self.__class__, '__skal__'):%0A for k in self.__class__.__skal__:%0A arg = %5B%5D%0A if type(k) == str:%0A arg.append(k)%0A elif type(k) == tuple:%0A short, full = k%0A if type(short) == str:%0A arg.append(short)%0A if type(full) == str:%0A arg.append(full)%0A options = self.__class__.__skal__%5Bk%5D%0A self.__argparser.add_argument(*arg, **options
+# Add all global arguments from the __args__ dictionary%0A if hasattr(self.__class__, '__args__'):%0A _add_arguments(self.__class__.__args__, self.__argparser
)%0A%0A
-%0A
@@ -2334,25 +2334,21 @@
ethod, '
-skal_meta
+_args
')):%0A
@@ -2460,16 +2460,70 @@
ethod))%0A
+ _add_arguments(method._args, command)%0A
@@ -2650,16 +2650,17 @@
ethod)%0A%0A
+%0A
def
@@ -3293,16 +3293,34 @@
ommand(f
+unc_or_args = None
):%0A %22
@@ -3400,35 +3400,734 @@
-f.skal_meta = %7B%7D%0A return f%0A
+def decorator(f):%0A f._args = args%0A return f%0A if type(func_or_args) == type(decorator):%0A args = %7B%7D%0A return decorator(func_or_args)%0A args = func_or_args%0A return decorator%0A%0A%0Adef default():%0A %22%22%22Decorator to tell Skal that the method/function is the default.%0A%0A %22%22%22%0A raise NotImplementedError%0A%0A%0Adef _add_arguments(args, argparser):%0A for k in args:%0A arg = %5B%5D%0A if type(k) == str:%0A arg.append(k)%0A elif type(k) == tuple:%0A short, full = k%0A if type(short) == str:%0A arg.append(short)%0A if type(full) == str:%0A arg.append(full)%0A options = args%5Bk%5D%0A argparser.add_argument(*arg, **options)
%0A
|
8e24d3139c11428cda1e07da62ff007be9c77424
|
Add convenience method.
|
abilian/testing/__init__.py
|
abilian/testing/__init__.py
|
"""Base stuff for testing.
"""
import os
import subprocess
import requests
assert not 'twill' in subprocess.__file__
from flask.ext.testing import TestCase
from abilian.application import Application
__all__ = ['TestConfig', 'BaseTestCase']
class TestConfig(object):
SQLALCHEMY_DATABASE_URI = "sqlite://"
SQLALCHEMY_ECHO = False
TESTING = True
SECRET_KEY = "SECRET"
CSRF_ENABLED = False
class BaseTestCase(TestCase):
config_class = TestConfig
application_class = Application
def create_app(self):
config = self.config_class()
self.app = self.application_class(config)
return self.app
def setUp(self):
self.app.create_db()
self.session = self.db.session
def tearDown(self):
self.db.session.remove()
self.db.drop_all()
self.db.engine.dispose()
@property
def db(self):
return self.app.extensions['sqlalchemy'].db
# Useful for debugging
def dump_routes(self):
rules = list(self.app.url_map.iter_rules())
rules.sort(key=lambda x: x.rule)
for rule in rules:
print rule, rule.methods, rule.endpoint
#
# Validates HTML if asked by the config or the Unix environment
#
def get(self, url, validate=True):
response = self.client.get(url)
if not validate or response != 200:
return response
validator_url = self.app.config.get('VALIDATOR_URL') \
or os.environ.get('VALIDATOR_URL')
if not validator_url:
return response
content_type = response.headers['Content-Type']
if content_type.split(';')[0].strip() != 'text/html':
return response
return self.validate(url, response.data, content_type, validator_url)
# TODO: post(), put(), etc.
def assert_valid(self, response):
validator_url = self.app.config.get('VALIDATOR_URL') \
or os.environ.get('VALIDATOR_URL')
if validator_url:
self.validate(None, response.data,
response.headers['Content-Type'], validator_url)
def validate(self, url, content, content_type, validator_url):
response = requests.post(validator_url + '?out=json', content,
headers={'Content-Type': content_type})
body = response.json()
for message in body['messages']:
if message['type'] == 'error':
detail = u'on line %s [%s]\n%s' % (
message['lastLine'],
message['extract'],
message['message'])
self.fail((u'Got a validation error for %r:\n%s' %
(url, detail)).encode('utf-8'))
|
Python
| 0
|
@@ -1084,16 +1084,89 @@
dpoint%0A%0A
+ def assert_302(self, response):%0A self.assert_status(response, 302)%0A%0A
#%0A #
|
fa037d3ab37d49ac8c685ee0de4d78aa63b1a097
|
Fixing the object_name freezig
|
south/creator/freezer.py
|
south/creator/freezer.py
|
"""
Handles freezing of models into FakeORMs.
"""
import sys
from django.db import models
from django.contrib.contenttypes.generic import GenericRelation
from south.orm import FakeORM
from south import modelsinspector
def freeze_apps(apps):
"""
Takes a list of app labels, and returns a string of their frozen form.
"""
if isinstance(apps, basestring):
apps = [apps]
frozen_models = set()
# For each app, add in all its models
for app in apps:
for model in models.get_models(models.get_app(app)):
frozen_models.add(model)
# Now, add all the dependencies
for model in list(frozen_models):
frozen_models.update(model_dependencies(model))
# Serialise!
model_defs = {}
for model in frozen_models:
model_defs[model_key(model)] = prep_for_freeze(model)
# Check for any custom fields that failed to freeze.
missing_fields = False
for key, fields in model_defs.items():
for field_name, value in fields.items():
if value is None:
missing_fields = True
print " ! Cannot freeze field '%s.%s'" % (key, field_name)
if missing_fields:
print ""
print " ! South cannot introspect some fields; this is probably because they are custom"
print " ! fields. If they worked in 0.6 or below, this is because we have removed the"
print " ! models parser (it often broke things)."
print " ! To fix this, read http://south.aeracode.org/wiki/MyFieldsDontWork"
sys.exit(1)
return model_defs
def freeze_apps_to_string(apps):
return pprint_frozen_models(freeze_apps(apps))
###
def model_key(model):
"For a given model, return 'appname.modelname'."
return "%s.%s" % (model._meta.app_label, model._meta.object_name.lower())
def prep_for_freeze(model):
"""
Takes a model and returns the ready-to-serialise dict (all you need
to do is just pretty-print it).
"""
fields = modelsinspector.get_model_fields(model, m2m=True)
# Remove useless attributes (like 'choices')
for name, field in fields.items():
fields[name] = remove_useless_attributes(field)
# See if there's a Meta
fields['Meta'] = remove_useless_meta(modelsinspector.get_model_meta(model))
# Add in our own special items to track the object name and managed
fields['Meta']['object_name'] = repr(model._meta.object_name)
fields['Meta']['managed'] = repr(model._meta.managed)
return fields
### Dependency resolvers
def model_dependencies(model, checked_models=None):
"""
Returns a set of models this one depends on to be defined; things like
OneToOneFields as ID, ForeignKeys everywhere, etc.
"""
depends = set()
checked_models = checked_models or set()
# Get deps for each field
for field in model._meta.fields + model._meta.many_to_many:
depends.update(field_dependencies(field))
# Now recurse
new_to_check = depends - checked_models
while new_to_check:
checked_model = new_to_check.pop()
if checked_model == model or checked_model in checked_models:
continue
checked_models.add(checked_model)
deps = model_dependencies(checked_model, checked_models)
# Loop through dependencies...
for dep in deps:
# If the new dep is not already checked, add to the queue
if (dep not in depends) and (dep not in new_to_check) and (dep not in checked_models):
new_to_check.add(dep)
depends.add(dep)
return depends
def field_dependencies(field, checked_models=None):
checked_models = checked_models or set()
depends = set()
if isinstance(field, (models.OneToOneField, models.ForeignKey, models.ManyToManyField, GenericRelation)):
if field.rel.to in checked_models:
return depends
checked_models.add(field.rel.to)
depends.add(field.rel.to)
depends.update(field_dependencies(field.rel.to._meta.pk, checked_models))
return depends
### Prettyprinters
def pprint_frozen_models(models):
return "{\n %s\n }" % ",\n ".join([
"%r: %s" % (name, pprint_fields(fields))
for name, fields in sorted(models.items())
])
def pprint_fields(fields):
return "{\n %s\n }" % ",\n ".join([
"%r: %r" % (name, defn)
for name, defn in sorted(fields.items())
])
### Output sanitisers
USELESS_KEYWORDS = ["choices", "help_text", "upload_to", "verbose_name", "storage"]
USELESS_DB_KEYWORDS = ["related_name"] # Important for ORM, not for DB.
def remove_useless_attributes(field, db=False):
"Removes useless (for database) attributes from the field's defn."
keywords = USELESS_KEYWORDS
if db:
keywords += USELESS_DB_KEYWORDS
if field:
for name in keywords:
if name in field[2]:
del field[2][name]
return field
USELESS_META = ["verbose_name", "verbose_name_plural"]
def remove_useless_meta(meta):
"Removes useless (for database) attributes from the table's meta."
if meta:
for name in USELESS_META:
if name in meta:
del meta[name]
return meta
|
Python
| 0.999984
|
@@ -2402,29 +2402,24 @@
ct_name'%5D =
-repr(
model._meta.
@@ -2425,25 +2425,50 @@
.object_name
-)
+ # Special: not eval'able.
%0A fields%5B
|
405e40f73d8eb2464f49b90ed48d0a51591d8063
|
Fix import in example
|
examples/imdb_cnn.py
|
examples/imdb_cnn.py
|
# coding: utf8
from __future__ import unicode_literals
import tqdm
from thinc.i2v import StaticVectors, HashEmbed
from thinc.v2v import Model, Maxout, Softmax
from thinc.t2t import ExtractWindow
from thinc.t2t import ParametricAttention
from thinc.t2v import Pooling, sum_pool
from thinc.misc import LayerNorm as LN
from thinc.misc import Residual
from thinc.extra import datasets
from thinc.neural.util import to_categorical
from thinc.api import layerize, chain, concatenate, clone
from thinc.api import foreach, flatten_add_lengths, with_getitem
from thinc.api import FeatureExtracter
import spacy
from spacy.attrs import ORTH, LOWER, SHAPE, PREFIX, SUFFIX, ID
from thinc.neural.ops import CupyOps
from spacy.util import compounding
@layerize
def get_sents(docs, drop=0.0):
sents = [list(doc.sents) for doc in docs]
return sents, None
def build_model(nr_class, width, depth, conv_depth, **kwargs):
with Model.define_operators({"|": concatenate, ">>": chain, "**": clone}):
embed = (
HashEmbed(width, 5000, column=1)
| StaticVectors("spacy_pretrained_vectors", width, column=5)
| HashEmbed(width // 2, 750, column=2)
| HashEmbed(width // 2, 750, column=3)
| HashEmbed(width // 2, 750, column=4)
) >> LN(Maxout(width))
sent2vec = (
flatten_add_lengths
>> with_getitem(
0,
embed
>> Residual(ExtractWindow(nW=1) >> LN(Maxout(width))) ** conv_depth,
)
>> ParametricAttention(width)
>> Pooling(sum_pool)
>> Residual(LN(Maxout(width))) ** depth
)
model = (
foreach(sent2vec, drop_factor=2.0)
>> flatten_add_lengths
# This block would allow the model to learn some cross-sentence
# features. It's not useful on this problem. It might make more
# sense to use a BiLSTM here, following Liang et al (2016).
# >> with_getitem(0,
# Residual(ExtractWindow(nW=1) >> LN(Maxout(width))) ** conv_depth
# )
>> ParametricAttention(width, hard=False)
>> Pooling(sum_pool)
>> Residual(LN(Maxout(width))) ** depth
>> Softmax(nr_class)
)
model.lsuv = False
return model
def main(use_gpu=False, nb_epoch=100):
if use_gpu:
Model.ops = CupyOps()
Model.Ops = CupyOps
train, test = datasets.imdb(limit=2000)
print("Load data")
train_X, train_y = zip(*train)
test_X, test_y = zip(*test)
train_y = Model.ops.asarray(to_categorical(train_y, nb_classes=2))
test_y = Model.ops.asarray(to_categorical(test_y, nb_classes=2))
nlp = spacy.load("en_vectors_web_lg")
nlp.add_pipe(nlp.create_pipe("sentencizer"), first=True)
preprocessor = FeatureExtracter([ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID])
train_X = [preprocessor(list(doc.sents)) for doc in tqdm.tqdm(nlp.pipe(train_X))]
test_X = [preprocessor(list(doc.sents)) for doc in tqdm.tqdm(nlp.pipe(test_X))]
dev_X = train_X[-1000:]
dev_y = train_y[-1000:]
train_X = train_X[:-1000]
train_y = train_y[:-1000]
print("Parse data")
n_sent = sum([len(list(sents)) for sents in train_X])
print("%d sentences" % n_sent)
model = build_model(
2, width=128, conv_depth=2, depth=2, train_X=train_X, train_y=train_y
)
with model.begin_training(train_X[:100], train_y[:100]) as (trainer, optimizer):
epoch_loss = [0.0]
def report_progress():
with model.use_params(optimizer.averages):
print(
epoch_loss[-1],
epoch_var[-1],
model.evaluate(dev_X, dev_y),
trainer.dropout,
)
epoch_loss.append(0.0)
epoch_var.append(0.0)
trainer.each_epoch.append(report_progress)
batch_sizes = compounding(64, 64, 1.01)
trainer.dropout = 0.3
trainer.batch_size = int(next(batch_sizes))
trainer.dropout_decay = 0.0
trainer.nb_epoch = nb_epoch
# optimizer.alpha = 0.1
# optimizer.max_grad_norm = 10.0
# optimizer.b1 = 0.0
# optimizer.b2 = 0.0
epoch_var = [0.0]
for X, y in trainer.iterate(train_X, train_y):
yh, backprop = model.begin_update(X, drop=trainer.dropout)
losses = ((yh - y) ** 2.0).sum(axis=1) / y.shape[0]
epoch_var[-1] += losses.var()
loss = losses.mean()
backprop((yh - y) / yh.shape[0], optimizer)
epoch_loss[-1] += loss
trainer.batch_size = int(next(batch_sizes))
with model.use_params(optimizer.averages):
print("Avg dev.: %.3f" % model.evaluate(dev_X, dev_y))
if __name__ == "__main__":
main()
|
Python
| 0.000002
|
@@ -547,35 +547,36 @@
item%0Afrom thinc.
-api
+misc
import FeatureE
|
7292b2d276db056870993a108466fccc18debcae
|
Update count-different-palindromic-subsequences.py
|
Python/count-different-palindromic-subsequences.py
|
Python/count-different-palindromic-subsequences.py
|
# Time: O(n^2)
# Space: O(n^2)
class Solution(object):
def countPalindromicSubsequences(self, S):
"""
:type S: str
:rtype: int
"""
def dp(i, j, prv, nxt, lookup):
if lookup[i][j] is not None:
return lookup[i][j]
result = 1
if i <= j:
for x in xrange(4):
i0 = nxt[i][x]
j0 = prv[j][x]
if i <= i0 <= j:
result = (result + 1) % P
if None < i0 < j0:
result = (result + dp(i0+1, j0-1, prv, nxt, lookup)) % P
result %= P
lookup[i][j] = result
return result
prv = [None] * len(S)
nxt = [None] * len(S)
last = [None] * 4
for i in xrange(len(S)):
last[ord(S[i])-ord('a')] = i
prv[i] = tuple(last)
last = [None] * 4
for i in reversed(xrange(len(S))):
last[ord(S[i])-ord('a')] = i
nxt[i] = tuple(last)
P = 10**9 + 7
lookup = [[None] * len(S) for _ in xrange(len(S))]
return dp(0, len(S)-1, prv, nxt, lookup) - 1
|
Python
| 0.000001
|
@@ -26,16 +26,1008 @@
O(n%5E2)%0A%0A
+# Given a string S, find the number of different non-empty palindromic subsequences in S,%0A# and return that number modulo 10%5E9 + 7.%0A#%0A# A subsequence of a string S is obtained by deleting 0 or more characters from S.%0A#%0A# A sequence is palindromic if it is equal to the sequence reversed.%0A#%0A# Two sequences A_1, A_2, ... and B_1, B_2, ... are different if there is some i for which A_i != B_i.%0A#%0A# Example 1:%0A# Input: %0A# S = 'bccb'%0A# Output: 6%0A# Explanation: %0A# The 6 different non-empty palindromic subsequences are 'b', 'c', 'bb', 'cc', 'bcb', 'bccb'.%0A# Note that 'bcb' is counted only once, even though it occurs twice.%0A#%0A# Example 2:%0A# Input: %0A# S = 'abcdabcdabcdabcdabcdabcdabcdabcddcbadcbadcbadcbadcbadcbadcbadcba'%0A# Output: 104860361%0A# %0A# Explanation: %0A# There are 3104860382 different non-empty palindromic subsequences, which is 104860361 modulo 10%5E9 + 7.%0A# Note:%0A# - The length of S will be in the range %5B1, 1000%5D.%0A# - Each character S%5Bi%5D will be in the set %7B'a', 'b', 'c', 'd'%7D.%0A%0A
class So
|
358de4c3ce20569e217b1caf5c25ce826b536bbc
|
Reformat datastructuretools
|
supriya/tools/datastructuretools/__init__.py
|
supriya/tools/datastructuretools/__init__.py
|
# -*- encoding: utf-8 -*-
r'''
Tools for working with generic datastructures.
'''
from abjad.tools import systemtools
systemtools.ImportManager.import_structured_package(
__path__[0],
globals(),
)
|
Python
| 0.000001
|
@@ -21,19 +21,19 @@
8 -*-%0A%0Ar
-'''
+%22%22%22
%0ATools f
@@ -76,11 +76,11 @@
es.%0A
-'''
+%22%22%22
%0A%0Afr
|
0e2bc29486fc1e09b6d90ccdbe21095f73848d48
|
remove the event listener check
|
speakerbot/listenable.py
|
speakerbot/listenable.py
|
from dynamic_class import Singleton
class NotEventException(Exception):
pass
class GlobalEventDispatcher(object):
"""not quite there yet"""
__metaclass__ = Singleton
def __init__(self):
pass
def event(method):
"""Must be called first in a decorator chain, otherwise we lose the correct name property"""
def wrapped(*args, **kwargs):
self = args[0]
if self.dispatch_events(self._interrogators, method.__name__, *args, **kwargs):
#Self will be removed and put back in the run_manglers routine.
args, kwargs = self.run_manglers(method.__name__, *args, **kwargs)
result = method(*args, **kwargs)
kwargs["event_result"] = result
self.dispatch_events(self._listeners, method.__name__, *args, **kwargs)
return result
wrapped.is_event = True
method.is_event = True
return wrapped
def listenable(klass):
"""
Class decorator to implement a lightweight event-dispatch model.
@listenable on the class
@event on the method you want to monitor
listeners must implement the function signature of the event exactly (or take *args, **kwargs generically),
plus a special argument called "event_result" that contains the return value of the method invocation.
TODO: Make it work with other decorators, inheritance
"""
def _attach(self, event, func, handler_collection_name):
if not hasattr(getattr(self, event), "is_event"):
raise NotEventException("This method hasn't been decorated as an event listener")
handler_collection = getattr(self, handler_collection_name)
handlers = handler_collection.get(event, [])
handlers.append(func)
handler_collection[event] = handlers
setattr(self, handler_collection_name, handler_collection)
def attach_interrogator(self, event, interrogator):
_attach(self, event, interrogator, "_interrogators")
def attach_listener(self, event, listener):
_attach(self, event, listener, "_listeners")
def attach_mangler(self, event, listener):
_attach(self, event, listener, "_manglers")
def run_manglers(self, method_name, *args, **kwargs):
old_self = args[0] #Get the self reference
args = args[1:] #Remove the self reference
for mangler in self._manglers.get(method_name, []):
try:
#pop off the instance information. We just want the function signature
args, kwargs = mangler(*args, **kwargs)
except Exception as e:
print "Argument mangler %s failed with exception %s. It reported the following: %s" % (mangler.__name__, e.__class__.__name__, str(e))
args = list(args)
args.insert(0, old_self)
args = tuple(args)
return args, kwargs
def dispatch_events(self, handler_collection, method_name, *args, **kwargs):
please_do_continue = True
for handler in handler_collection.get(method_name, []):
try:
#pop off the instance information. We just want the function signature
please_do_continue = handler(*args[1:], **kwargs)
if please_do_continue == None:
please_do_continue = True
if not please_do_continue:
print "The event processing was cancelled by %s" % handler.__name__
break
except Exception as e:
print "Event listener %s failed with exception %s. It reported the following: %s" % (handler.__name__, e.__class__.__name__, str(e))
return please_do_continue
setattr(klass, "_listeners", {})
setattr(klass, "_interrogators", {})
setattr(klass, "_manglers", {})
setattr(klass, "attach_listener", attach_listener)
setattr(klass, "attach_interrogator", attach_interrogator)
setattr(klass, "attach_mangler", attach_mangler)
setattr(klass, "dispatch_events", dispatch_events)
setattr(klass, "run_manglers", run_manglers)
return klass
|
Python
| 0.000004
|
@@ -1473,24 +1473,80 @@
%0A
+#Hell is interacting with other people's code.%0A #
if not hasat
@@ -1588,24 +1588,25 @@
):%0A%0A
+#
raise No
|
79fd01202255e0b00ca2fe90834dbd4e15dd84bc
|
Print NVIDIA license notice only when actually downloading the CUDA headers repository.
|
third_party/cuda/dependencies.bzl
|
third_party/cuda/dependencies.bzl
|
"""CUDA headers repository."""
def _download_nvidia_headers(repository_ctx, output, url, sha256, strip_prefix):
# Keep the mirror up-to-date manually (see b/154869892) with:
# /google/bin/releases/tensorflow-devinfra-team/cli_tools/tf_mirror <url>
repository_ctx.download_and_extract(
url = [
"http://gitlab.com/nvidia/headers/" + url,
"http://mirror.tensorflow.org/gitlab.com/nvidia/headers/" + url,
],
output = output,
sha256 = sha256,
stripPrefix = strip_prefix,
)
def _cuda_headers_impl(repository_ctx):
tag = "cuda-10-2"
for name, sha256 in [
("cublas", "9537c3e89a85ea0082217e326cd8e03420f7723e05c98d730d80bda8b230c81b"),
("cudart", "8a203bd87a2fde37608e8bc3c0c9347b40586906c613b6bef0bfc3995ff40099"),
("cufft", "bac1602183022c7a9c3e13078fcac59e4eee0390afe99c3c7348c894a97e19dd"),
("cusolver", "68e049c1d27ad3558cddd9ad82cf885b6789f1f01934f9b60340c391fa8e6279"),
("misc", "5e208a8e0f25c9df41121f0502eadae903fa64f808437516198004bdbf6af04b"),
]:
url = "cuda-individual/{name}/-/archive/{tag}/{name}-{tag}.tar.gz".format(name = name, tag = tag)
strip_prefix = "{name}-{tag}".format(name = name, tag = tag)
_download_nvidia_headers(repository_ctx, "cuda", url, sha256, strip_prefix)
repository_ctx.symlink(Label("//third_party/cuda:cuda_headers.BUILD"), "BUILD")
def _cudnn_headers_impl(repository_ctx):
tag = "v7.6.5"
url = "cudnn/-/archive/{tag}/cudnn-{tag}.tar.gz".format(tag = tag)
strip_prefix = "cudnn-{tag}".format(tag = tag)
sha256 = "ef45f4649328da678285b8ce589a8296cedcc93819ffdbb5eea5346a0619a766"
_download_nvidia_headers(repository_ctx, "cudnn", url, sha256, strip_prefix)
repository_ctx.symlink(Label("//third_party/cuda:cudnn_headers.BUILD"), "BUILD")
_cuda_headers = repository_rule(
implementation = _cuda_headers_impl,
# remotable = True,
)
_cudnn_headers = repository_rule(
implementation = _cudnn_headers_impl,
# remotable = True,
)
def cuda_dependencies():
print("The following command will download NVIDIA proprietary " +
"software. By using the software you agree to comply with the " +
"terms of the license agreement that accompanies the software. " +
"If you do not agree to the terms of the license agreement, do " +
"not use the software.")
_cuda_headers(name = "cuda_headers")
_cudnn_headers(name = "cudnn_headers")
|
Python
| 0
|
@@ -575,32 +575,461 @@
epository_ctx):%0A
+ build_file = Label(%22//third_party/cuda:cuda_headers.BUILD%22)%0A%0A print(%22%5Cn%5C033%5B22;33mNOTICE:%5C033%5B0m The following command will download %22 +%0A %22NVIDIA proprietary software. By using the software you agree to %22 +%0A %22comply with the terms of the license agreement that accompanies %22 +%0A %22the software. If you do not agree to the terms of the license %22 +%0A %22agreement, do not use the software.%22)%0A%0A
tag = %22cuda-
@@ -1797,54 +1797,18 @@
ink(
-Label(%22//third_party/cuda:cuda_headers.BUILD%22)
+build_file
, %22B
@@ -1852,24 +1852,90 @@
itory_ctx):%0A
+ build_file = Label(%22//third_party/cuda:cudnn_headers.BUILD%22)%0A%0A
tag = %22v
@@ -2256,55 +2256,18 @@
ink(
-Label(%22//third_party/cuda:cudnn_headers.BUILD%22)
+build_file
, %22B
@@ -2507,344 +2507,8 @@
():%0A
- print(%22The following command will download NVIDIA proprietary %22 +%0A %22software. By using the software you agree to comply with the %22 +%0A %22terms of the license agreement that accompanies the software. %22 +%0A %22If you do not agree to the terms of the license agreement, do %22 +%0A %22not use the software.%22)%0A%0A
|
9098692bf431b4947da96dc054fe8e1559e27aa5
|
Update hexagon_nn_headers to v1.10.3.1.3 Changes Includes: * Support soc_id:371 * New method exposed that returns the version of hexagon_nn used in libhexagon_interface.so
|
third_party/hexagon/workspace.bzl
|
third_party/hexagon/workspace.bzl
|
"""Loads the Hexagon NN Header files library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "hexagon_nn",
sha256 = "4cbf3c18834e24b1f64cc507f9c2f22b4fe576c6ff938d55faced5d8f1bddf62",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.cloud.google.com/download.tensorflow.org/tflite/hexagon_nn_headers_v1.10.3.1.2.tgz",
],
build_file = "//third_party/hexagon:BUILD",
)
|
Python
| 0
|
@@ -213,72 +213,72 @@
= %22
-4cbf3c18834e24b1f64cc507f9c2f22b4fe576c6ff938d55faced5d8f1bddf62
+281d46b47f7191f03a8a4071c4c8d2af9409bb9d59573dc2e42f04c4fd61f1fd
%22,%0A
@@ -444,17 +444,17 @@
.10.3.1.
-2
+3
.tgz%22,%0A
|
2a2224a2babaf20919c0091bcfd4b6109eadcecb
|
Fix issue with internal user and auditor
|
polyaxon/api/repos/views.py
|
polyaxon/api/repos/views.py
|
import logging
import os
from rest_framework.generics import RetrieveUpdateDestroyAPIView, get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from django.conf import settings
from django.http import Http404, HttpResponseServerError
import auditor
from api.repos.serializers import RepoSerializer
from api.repos.tasks import handle_new_files
from api.utils.views import ProtectedView, UploadView
from db.models.repos import Repo
from event_manager.events.repo import REPO_CREATED, REPO_DOWNLOADED
from libs.permissions.authentication import InternalAuthentication, is_internal_user
from libs.permissions.internal import IsAuthenticatedOrInternal, IsInternal
from libs.permissions.projects import get_permissible_project
from libs.repos import git
from libs.repos.git import set_git_repo
logger = logging.getLogger(__name__)
class RepoDetailView(RetrieveUpdateDestroyAPIView):
queryset = Repo.objects.all()
serializer_class = RepoSerializer
permission_classes = (IsAuthenticated,)
def get_object(self):
return get_object_or_404(Repo, project=get_permissible_project(view=self))
class DownloadFilesView(ProtectedView):
HANDLE_UNAUTHENTICATED = False
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES + [
InternalAuthentication,
]
permission_classes = (IsAuthenticatedOrInternal, )
def get_object(self):
project = get_permissible_project(view=self)
try:
repo = Repo.objects.get(project=project)
except Repo.DoesNotExist:
raise Http404('Repo does not exist.')
if is_internal_user(self.request.user):
auditor.record(event_type=REPO_DOWNLOADED, instance=repo, actor_id=self.request.user.id)
return repo
def get(self, request, *args, **kwargs):
repo = self.get_object()
archive_path, archive_name = git.archive_repo(repo.git, repo.project.name)
return self.redirect(path='/archived_repos/{}'.format(archive_name))
class UploadFilesView(UploadView):
def get_object(self):
project = get_permissible_project(view=self)
if project.has_notebook:
self.permission_denied(
self.request,
'The Project `{}` is currently running a Notebook. '
'You must stop it before uploading a new version of the code.'.format(project.name))
repo, created = Repo.objects.get_or_create(project=project)
if not created and not os.path.isdir(repo.user_path):
set_git_repo(repo)
else:
auditor.record(event_type=REPO_CREATED, instance=repo, actor_id=self.request.user.id)
return repo
def put(self, request, *args, **kwargs):
user = request.user
repo = self.get_object()
path = os.path.join(settings.UPLOAD_ROOT, user.username)
if not os.path.exists(path):
os.makedirs(path)
try:
tar_file_name = self._handle_posted_data(request=request,
filename='{}.tar.gz'.format(repo.project.name),
directory=path,
upload_filename='repo')
except (IOError, os.error) as e: # pragma: no cover
logger.warning(
'IOError while trying to save posted data (%s): %s', e.errno, e.strerror)
return HttpResponseServerError()
json_data = self._handle_json_data(request)
is_async = json_data.get('async')
if is_async is False:
file_handler = handle_new_files
else:
file_handler = handle_new_files.delay
file_handler(user_id=user.id, repo_id=repo.id, tar_file_name=tar_file_name)
# do some stuff with uploaded file
return Response(status=204)
|
Python
| 0.000004
|
@@ -1705,16 +1705,20 @@
if
+not
is_inter
|
86391ed76c49578321c026187f159c53c2cf4ed1
|
Fix slack welcome message display bug and add user handle
|
orchestra/slack.py
|
orchestra/slack.py
|
import base64
from uuid import uuid1
from django.conf import settings
import slacker
from orchestra.utils.settings import run_if
class SlackService(object):
"""
Wrapper slack service to allow easy swapping and mocking out of API.
"""
def __init__(self, api_key):
self._service = slacker.Slacker(api_key)
for attr_name in ('chat', 'groups', 'users'):
setattr(self, attr_name, getattr(self._service, attr_name))
@run_if('SLACK_EXPERTS')
def add_worker_to_project_team(worker, project):
slack = SlackService(settings.SLACK_EXPERTS_API_KEY)
try:
response = slack.groups.invite(project.slack_group_id,
slack.users.get_user_id(
worker.slack_username))
if not response.body['already_in_group']:
welcome_message = ('{} has been added to the team. '
'Welcome aboard!').format(worker.user.username)
slack.chat.post_message(project.slack_group_id, welcome_message)
except:
# TODO(jrbotros): for now, using slack on a per-worker basis is
# optional; we'll want to rethink this in the future
pass
@run_if('SLACK_EXPERTS')
def create_project_slack_group(project):
"""
Create slack channel for project team communication
"""
slack = SlackService(settings.SLACK_EXPERTS_API_KEY)
response = slack.groups.create(_project_slack_group_name(project))
project.slack_group_id = response.body['group']['id']
slack.groups.set_topic(project.slack_group_id, project.short_description)
slack.groups.set_purpose(project.slack_group_id,
'Discussing work on `{}`'.format(
project.short_description))
project.save()
return project.slack_group_id
def _project_slack_group_name(project):
"""
Return a unique identifier for project slack groups; must fit into slack's
21 char limit for group names.
"""
return base64.b64encode(uuid1().bytes)
|
Python
| 0
|
@@ -597,32 +597,31 @@
ry:%0A
-response
+user_id
= slack.gro
@@ -621,175 +621,119 @@
ack.
-groups.invite(project.slack_group_id,%0A slack.users.get_user_id(%0A worker.slack_username)
+users.get_user_id(worker.slack_username)%0A response = slack.groups.invite(project.slack_group_id, user_id
)%0A
@@ -758,17 +758,21 @@
nse.body
-%5B
+.get(
'already
@@ -781,17 +781,17 @@
n_group'
-%5D
+)
:%0A
@@ -819,11 +819,34 @@
= (
-'%7B%7D
+%0A '%3C@%7B%7D%7C%7B%7D%3E
has
@@ -888,31 +888,16 @@
-
-
'Welcome
@@ -918,20 +918,30 @@
mat(
+user_id,
worker.
-user.
+slack_
user
|
e7b50269a6d83234b283f769265bf474666b6cd2
|
Update project model with property has_description
|
polyaxon/projects/models.py
|
polyaxon/projects/models.py
|
import uuid
from django.conf import settings
from django.core.validators import validate_slug
from django.db import models
from libs.blacklist import validate_blacklist_name
from libs.models import DescribableModel, DiffModel
class Project(DiffModel, DescribableModel):
"""A model that represents a set of experiments to solve a specific problem."""
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False,
unique=True,
null=False)
name = models.CharField(
max_length=256,
validators=[validate_slug, validate_blacklist_name])
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='projects')
is_public = models.BooleanField(
default=True,
help_text='If project is public or private.')
def __str__(self):
return self.unique_name
class Meta:
unique_together = (('user', 'name'),)
@property
def unique_name(self):
return '{}.{}'.format(self.user.username, self.name)
@property
def has_code(self):
return hasattr(self, 'repo')
@property
def tensorboard(self):
if settings.DEPLOY_RUNNER:
return self.tensorboard_jobs.last()
return None
@property
def notebook(self):
if settings.DEPLOY_RUNNER:
return self.notebook_jobs.last()
return None
@property
def has_tensorboard(self):
tensorboard = self.tensorboard
return tensorboard and tensorboard.is_running
@property
def has_notebook(self):
notebook = self.notebook
return notebook and notebook.is_running
|
Python
| 0
|
@@ -1126,24 +1126,108 @@
f, 'repo')%0A%0A
+ @property%0A def has_description(self):%0A return bool(self.description)%0A%0A
@propert
|
76bf774f3af2fb4fc2518945944b9f64c413712a
|
Simplify "cursor" function in "misc" module
|
autoload/breeze/utils/misc.py
|
autoload/breeze/utils/misc.py
|
# -*- coding: utf-8 -*-
"""
breeze.utils.misc
~~~~~~~~~~~~~~~~~
This module defines various utility functions and some tiny wrappers
around vim functions.
"""
import vim
import breeze.utils.settings
def echom(msg):
"""Gives a simple feedback to the user via the command line."""
vim.command('echom "[breeze] {0}"'.format(msg.replace('"', '\"')))
def echov(msg):
"""Gives a feedback only if g:breeze_verbosity = 1."""
if breeze.utils.settings.get("verbosity", bool):
echom(msg)
def cursor(target=None, kj=False):
"""Moves the cursor.
If the kj parameter is set to True, then the command behaves as following:
:help keepjumps -> Moving around in {command} does not change the '', '.
and '^ marks, the jumplist or the changelist...
"""
if not target:
return vim.current.window.cursor
vim.command("{0}call cursor({1}, {2})".format(
"keepjumps " if kj else "", target[0], target[1]))
def window_bundaries():
"""Returns the top and bottom lines number for the current window."""
curr_pos = cursor()
scrolloff = vim.eval("&scrolloff")
vim.command("setlocal scrolloff=0")
# :help keepjumps -> Moving around in {command} does not change the '',
# '. and '^ marks, the jumplist or the changelist.
vim.command("keepjumps normal! H")
top = cursor()[0]
vim.command("keepjumps normal! L")
bot = cursor()[0]
# restore position and changed options
cursor(curr_pos)
vim.command("setlocal scrolloff={0}".format(scrolloff))
return top, bot
def highlight(group, patt, priority=10):
"""Wraps the matchadd() vim function."""
vim.eval("matchadd('{0}', '{1}', {2})".format(
group, patt, priority))
def subst_char(buffer, v, row, col):
"""Substitutes a character in the buffer with the given character at the
given position. Return the substituted character."""
if row >= len(buffer):
raise ValueError("row index out of bound")
new_line = list(buffer[row])
if col >= len(new_line):
raise ValueError("column index out of bound")
old = buffer[row][col]
new_line[col] = v
buffer[row] = "".join(new_line)
return old
def clear_highlighting():
"""Clears Breeze highlightings."""
for match in vim.eval("getmatches()"):
if match['group'] in ('BreezeJumpMark', 'BreezeShade', 'BreezeHl'):
vim.command("call matchdelete({0})".format(match['id']))
|
Python
| 0.000291
|
@@ -528,18 +528,8 @@
None
-, kj=False
):%0A
@@ -554,415 +554,163 @@
rsor
-.%0A%0A If the kj parameter is set to True, then the command behaves as following:%0A :help keepjumps -%3E Moving around in %7Bcommand%7D does not change the '', '.%0A and '%5E marks, the jumplist or the changelist...%0A %22%22%22%0A if not target:%0A return vim.current.window.cursor%0A vim.command(%22%7B0%7Dcall cursor(%7B1%7D, %7B2%7D)%22.format(%0A %22keepjumps %22 if kj else %22%22, target%5B0%5D, target%5B1%5D))
+ or returs the current cursor position.%22%22%22%0A if not target:%0A return vim.current.window.cursor%0A else:%0A vim.current.window.cursor = target
%0A%0A%0Ad
|
0b311b67e1cf5831a6e1af317409fc6e854e8ce6
|
Remove debug artifacts
|
emission_events/scraper/scraper.py
|
emission_events/scraper/scraper.py
|
from datetime import datetime
from bs4 import BeautifulSoup
class Scraper(object):
def __init__(self, html, tracking_number):
self.html = html
self.soup = BeautifulSoup(html)
self.tracking_number = tracking_number
def __call__(self):
tds = self.soup.table.find_all('td')
metas = self.soup.find_all('meta')
began_date = self.parse_date(tds[5].string.strip())
ended_date = self.parse_date(tds[7].string.strip())
return {
'tracking_number': self.tracking_number,
'dc_date_meta': self.get_dc_date_meta(metas),
'regulated_entity_name': self.clean(tds[0].string, 30),
'physical_location': self.clean(tds[1].string),
'regulated_entity_rn_number': self.clean(tds[2].string, 50),
'city_county': self.clean(tds[3].string, 50),
'type_of_air_emissions_event': self.clean(tds[4].string, 50).upper(),
'based_on_the': self.clean(tds[6].string, 50).upper(),
'event_began': self.clean(tds[5].string, 30),
'event_ended': self.clean(tds[7].string, 30),
'cause': self.clean(tds[8].string),
'action_taken': self.clean(tds[9].string),
'emissions_estimation_method': self.clean(tds[10].string),
'city': self.get_city(tds[3].string),
'county': self.get_county(tds[3].string),
'began_date': began_date,
'ended_date': ended_date,
'duration': self.get_duration(began_date, ended_date)
}
def clean(self, cad, limit=200):
if cad == None:
return ''
else:
return cad.strip()[0:limit]
def get_dc_date_meta(self, metas):
for meta in metas:
try:
if meta['name'] == 'DC.Date':
return meta['content']
except KeyError:
pass
return None
def get_city(self, cad):
city = cad.split(',')[0].strip()
if city == '':
return None
else:
return city
def get_county(self, cad):
county = cad.split(',')[1].strip()
if county == '':
return None
else:
return county
def parse_date(self, cad):
try:
if len(cad.split()) == 2:
print "parsing " + cad
print datetime.strptime(cad, "%m/%d/%Y %I:%M%p")
return datetime.strptime(cad, "%m/%d/%Y %I:%M%p")
else:
print "parsing (short) " + cad
print datetime.strptime(cad, "%m/%d/%Y")
return datetime.strptime(cad, "%m/%d/%Y")
except ValueError:
print "fallo: " + cad
return None
def get_duration(self, begin, end):
if begin == None or end == None:
return None
else:
return (end - begin).total_seconds()/3600
|
Python
| 0.000001
|
@@ -1699,16 +1699,86 @@
ed_date)
+,%0A 'dc_date': self.parse_date(self.get_dc_date_meta(metas))
%0A
@@ -2584,283 +2584,75 @@
-print %22parsing %22 + cad%0A print datetime.strptime(cad, %22%25m/%25d/%25Y %25I:%25M%25p%22)%0A return datetime.strptime(cad, %22%25m/%25d/%25Y %25I:%25M%25p%22)%0A else:%0A print %22parsing (short) %22 + cad%0A print datetime.strptime(cad, %22%25m/%25d/%25Y%22)
+return datetime.strptime(cad, %22%25m/%25d/%25Y %25I:%25M%25p%22)%0A else:
%0A
|
bd6eec33e59e3d46e5da931fbe9e1094bbb7c0bb
|
Add all primitives to known interactions.
|
enactiveagents/experiment/basic.py
|
enactiveagents/experiment/basic.py
|
"""
Module to build experiments (worlds, agents, etc.).
"""
import model.interaction
import model.agent
import experiment
class BasicExperiment(experiment.Experiment):
world_representation = [
"wwwwwwwwwwwwwww",
"w.............w",
"w.wwwwwww.....w",
"w.......wwwww.w",
"w.wwwww.......w",
"w.w.......w...w",
"w.w.wwwww.w...w",
"w.w.w...w.ww.ww",
"w.www.....w...w",
"w.....wwwww.a.w",
"wwwwwwwwwwwwwww"
]
def __init__(self):
super(BasicExperiment, self).__init__()
# Parse world
self.world = self.parse_world(self.world_representation)
# Set up primitives
step = model.interaction.PrimitiveInteraction("Step")
turn_right = model.interaction.PrimitiveInteraction("Turn Right")
turn_left = model.interaction.PrimitiveInteraction("Turn Left")
feel = model.interaction.PrimitiveInteraction("Feel")
no_feel = model.interaction.PrimitiveInteraction("No Feel")
bump = model.interaction.PrimitiveInteraction("Bump")
# Define environment logic for primitives, these functions will be
# registered to the primitive interactions and will be called once
# the agent attempts to enact the primitive interaction.
# The function can manipulate the world and the agents.
# The return value is the actual enacted interaction (i.e., can be
# different form the attempted interaction).
def _step(world, agent, interaction):
if world.can_step(agent):
agent.step()
return step
else:
return bump
def _turn_right(world, agent, interaction):
agent.add_rotation(-90)
return turn_right
def _turn_left(world, agent, interaction):
agent.add_rotation(90)
return turn_left
def _feel(world, agent, interaction):
if world.can_step(agent):
return no_feel
else:
return feel
# Register the previously defined functions.
enact_logic = {}
enact_logic[step] = _step
enact_logic[turn_right] = _turn_right
enact_logic[turn_left] = _turn_left
enact_logic[feel] = _feel
# Set primitives known/enactable by the agents.
primitives = []
primitives.append(step)
primitives.append(turn_right)
primitives.append(turn_left)
primitives.append(feel)
# Set intrinsic motivation values.
motivation = {}
motivation[step] = 7
motivation[turn_right] = -1
motivation[turn_left] = -1
motivation[feel] = 0
motivation[no_feel] = -1
motivation[bump] = -10
for entity in self.world.get_entities():
if isinstance(entity, model.agent.Agent):
self.world.add_enact_logic(entity, enact_logic)
entity.set_primitives(primitives)
entity.set_motivation(motivation)
def get_world(self):
return self.world
|
Python
| 0
|
@@ -2546,16 +2546,83 @@
nd(feel)
+%0A primitives.append(no_feel)%0A primitives.append(bump)
%0A%0A
|
8da02c7c4ad382f4e7a2f7a017b32c0cff51547e
|
set limit of tw id over 5 letters
|
build_attendee.py
|
build_attendee.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from pyquery import PyQuery as pq
import json
if __name__ == "__main__":
## ref: pyquery
# https://media.readthedocs.org/pdf/pyquery/latest/pyquery.pdf
data = dict()
file = open('data/attendees.json', "w")
dom = pq(url='https://2016.europe.wordcamp.org/attendees/')
entries = dom.find('ul.tix-attendee-list')
for x in entries('li'):
twitter_name = pq(x).find('a.tix-attendee-twitter').text()
full_name = pq(x).find('div.tix-attendee-name').text()
if twitter_name != None:
# have more than 3 characters ?
if len(twitter_name) > 3:
data[full_name.lower()] = twitter_name
json.dump(data, file, indent=2)
file.close()
file = open('data/list_of_attendees', "w")
for x in data.keys():
file.write(x.encode('utf8'))
file.write("\n")
file.close()
|
Python
| 0.000011
|
@@ -724,17 +724,17 @@
name) %3E
-3
+4
:%0A
|
dc54a12bfd2124e7203270940928e47198ed914e
|
bump version
|
bulbs/__init__.py
|
bulbs/__init__.py
|
__version__ = "0.6.23"
|
Python
| 0
|
@@ -17,7 +17,7 @@
.6.2
-3
+4
%22%0A
|
64383b6d8095f27af775d3c6030b22ee36055b29
|
Change summoner example function name, add params
|
examples/summoner.py
|
examples/summoner.py
|
import cassiopeia as cass
from cassiopeia.core import Summoner
def test_cass():
name = "Kalturi"
me = Summoner(name=name)
print("Name:", me.name)
print("Id:", me.id)
print("Account id:", me.account.id)
print("Level:", me.level)
print("Revision date:", me.revision_date)
print("Profile icon id:", me.profile_icon.id)
print("Profile icon name:", me.profile_icon.name)
print("Profile icon url:", me.profile_icon.url)
print("Profile icon image:", me.profile_icon.image)
name = me.name
id = me.id
account_id = me.account.id
me = cass.get_summoner(name)
me = cass.get_summoner(name=name)
me = cass.get_summoner(id=id)
me = cass.get_summoner(account_id=account_id)
if __name__ == "__main__":
test_cass()
|
Python
| 0
|
@@ -61,53 +61,65 @@
er%0A%0A
+%0A
def
-test_cass():%0A name = %22Kalturi%22%0A%0A me
+print_summoner(name: str, region: str):%0A summoner
= S
@@ -131,24 +131,39 @@
er(name=name
+, region=region
)%0A print(
@@ -171,18 +171,24 @@
Name:%22,
-me
+summoner
.name)%0A
@@ -198,23 +198,29 @@
print(%22I
-d
+D
:%22,
-me
+summoner
.id)%0A
@@ -235,24 +235,30 @@
Account
-id
+ID
:%22,
-me
+summoner
.account
@@ -282,18 +282,24 @@
evel:%22,
-me
+summoner
.level)%0A
@@ -326,18 +326,24 @@
date:%22,
-me
+summoner
.revisio
@@ -378,16 +378,22 @@
con
-id
+ID
:%22,
-me
+summoner
.pro
@@ -434,26 +434,32 @@
con name:%22,
-me
+summoner
.profile_ico
@@ -494,17 +494,23 @@
con
-url
+URL
:%22,
-me
+summoner
.pro
@@ -557,18 +557,24 @@
mage:%22,
-me
+summoner
.profile
@@ -590,79 +590,125 @@
ge)%0A
+%0A
-name = me.name%0A id = me.id%0A account_id = me.account.id%0A me
+# These are equivalent ways of obtaining a Summoner.%0A # Note that the region defaults to NA.%0A # summoner
= c
@@ -726,34 +726,44 @@
moner(name)%0A
-me
+# summoner
= cass.get_summ
@@ -768,24 +768,33 @@
mmoner(name=
+summoner.
name)%0A me
@@ -787,26 +787,36 @@
r.name)%0A
-me
+# summoner
= cass.get_
@@ -831,18 +831,37 @@
(id=
-id)%0A me
+summoner.id)%0A # summoner
= c
@@ -888,24 +888,33 @@
ount_id=
+summoner.
account
-_
+.
id)%0A%0A%0Aif
@@ -946,16 +946,36 @@
-test_cass(
+print_summoner(%22Kalturi%22, %22NA%22
)%0A
|
5f167d7bc660bf85ffb92866333e2f2c90f69070
|
Update template for keyPressed callback.
|
examples/template.py
|
examples/template.py
|
#this import statement allows access to the karamba functions
import karamba
#this is called when your widget is initialized
def initWidget(widget):
pass
#This is called when your widget is closed. You can use this to clean
#up open files, etc. You don't need to delete text and images in your
#theme. That is done automatically. This callback is just for cleaning up
#external things. Most people don't need to put anything here.
def widgetClosed(widget):
pass
#this is called everytime your widget is updated
#the update inverval is specified in the .theme file
def widgetUpdated(widget):
pass
#This gets called everytime our widget is clicked.
#Notes:
# widget = reference to our widget
# x = x position (relative to our widget)
# y = y position (relative to our widget)
# botton = button clicked:
# 1 = Left Mouse Button
# 2 = Middle Mouse Button
# 3 = Right Mouse Button, but this will never happen
# because the right mouse button brings up the
# Karamba menu.
# 4,5 = Scroll wheel up and down
def widgetClicked(widget, x, y, button):
pass
#This gets called everytime our widget is clicked.
#Notes
# widget = reference to our widget
# x = x position (relative to our widget)
# y = y position (relative to our widget)
# botton = button being held:
# 0 = No Mouse Button
# 1 = Left Mouse Button
# 2 = Middle Mouse Button
# 3 = Right Mouse Button, but this will never happen
# because the right mouse button brings up the
# Karamba menu.
def widgetMouseMoved(widget, x, y, button):
#Warning: Don't do anything too intensive here
#You don't want to run some complex piece of code everytime the mouse moves
pass
#This gets called when an item is clicked in a popup menu you have created.
# menu = a reference to the menu
# id = the number of the item that was clicked.
def menuItemClicked(widget, menu, id):
pass
#This gets called when an item is clicked in the theme CONFIGURATION menu,
#not the popup menus that you create.
# key = the reference to the configuration key that was changed
# value = the new value (true or false) that was selected
def menuOptionChanged(widget, key, value):
pass
#This gets called when a meter (image, text, etc) is clicked.
# NOTE you must use attachClickArea() to make a meter
# clickable.
# widget = reference to your theme
# meter = the meter clicked
# button = the button clicked (see widgetClicked for button numbers)
def meterClicked(widget, meter, button):
pass
#This gets called when a command you have executed with executeInteractive() outputs something
#to stdout. This way you can get the output of for example kdialog without freezing up the widget
#waiting for kdialog to end.
# widget = reference to your theme
# pid = process number of the program outputting (use this if you execute more than out process)
# output = the text the program outputted to stdout
def commandOutput(widget, pid, output):
pass
#This gets called when an item is dropped on this widget.
# NOTE you have to call acceptDrops() before your widget will accept drops.
# widget = reference to your theme
# dropText = the text of the dropped item (probably a URL to it's location in KDE)
def itemDropped(widget, dropText):
pass
#This gets called when a new program is LOADING in KDE. When it is done
#loading, startupRemoved() is called, followed by taskAdded().
# widget = reference to your widget
# task = A refence to the task that is starting.
def startupAdded(widget, startup):
pass
#This gets called when a new program is done LOADING in KDE.
# widget = reference to your widget
# task = A refence to the task that just finished loading.
def startupRemoved(widget, startup):
pass
#This is called every time a new task (program) is started in KDE.
# widget = reference to your widget
# task = A refence to the new task. Call getTaskInfo() with this reference
# to get the name, etc of this new task.
def taskAdded(widget, task):
pass
#This is called everytime a task (program) is closed in KDE.
# widget = reference to your widget
# task = A refence to the task.
def taskRemoved(widget, task):
pass
#This is called everytime a different task gains focus (IE, the user clicks
#on a different window).
# widget = reference to your widget
# task = A refence to the task. Call getTaskInfo() with this reference
# to get the name, etc of this new task.
def activeTaskChanged(widget, task):
pass
#This is called everytime the systray you created with createSystray() is updated
def systrayUpdated(widget):
pass
#This is called everytime the current desktop changes
# widget = reference to your widget
# desktop = the current desktop
def desktopChanged(widget, desktop):
pass
#This is called everytime the wallpaper changes on a desktop
# widget = reference to your widget
# desktop = the desktop whose wallpaper changed
def wallpaperChanged(widget, desktop):
pass
# This will be printed when the widget loads.
print "Loaded my python extension!"
|
Python
| 0.000007
|
@@ -5190,16 +5190,246 @@
pass
+%0A%0A#This is called everytime there is a key press in any focused input field%0A# widget = reference to your widget%0A# meter = reference to an input box%0A# char = the key that was pressed%0Adef keyPressed(widget, meter, char):%0A pass
%0A %0A#
|
69091ea58fcd67c61dae3837eb0b9261825d44b3
|
Use except as notation
|
examples/tor_info.py
|
examples/tor_info.py
|
#!/usr/bin/env python
# Simple usage example of TorInfo. This class does some magic so that
# once it's set up, all the attributes it has (or appears to) are
# GETINFO ones, in a heirarchy. So where GETINFO specifies
# "net/listeners/dns" TorInfo will have a "net" attribute that
# contains at least "listeners", etcetera. The leaves are all methods
# which return a Deferred. If the corresponding GETINFO takes an
# argument, so does the leaf.
#
# Go straight to "setup_complete" for the goods -- this is called
# after TorInfo and the underlying TorControlProtocol are set up.
#
# If you want to issue multiple GETINFO calls in one network
# transaction, you'll have to use TorControlProtocol's get_info
# instead.
from __future__ import print_function
import sys
from twisted.internet import reactor, defer
from txtorcon import TorInfo, build_local_tor_connection
def error(x):
print("ERROR", x)
return x
@defer.inlineCallbacks
def recursive_dump(indent, obj, depth=0):
if callable(obj):
try:
print("%s: " % obj, end=' ')
sys.stdout.flush()
if obj.takes_arg:
v = yield obj('arrrrrg')
v = yield obj()
v = v.replace('\n', '\\')
if len(v) > 60:
v = v[:50] + '...' + v[-7:]
except Exception, e:
v = 'ERROR: ' + str(e)
print(v)
else:
indent = indent + ' '
for x in obj:
yield recursive_dump(indent, x, depth + 1)
@defer.inlineCallbacks
def setup_complete(info):
print("Top-Level Things:", dir(info))
if True:
# some examples of getting specific GETINFO callbacks
v = yield info.version()
ip = yield info.ip_to_country('1.2.3.4')
boot_phase = yield info.status.bootstrap_phase()
ns = yield info.ns.name('moria1')
guards = yield info.entry_guards()
print('version:', v)
print('1.2.3.4 is in', ip)
print('bootstrap-phase:', boot_phase)
print('moria1:', ns)
print('entry guards:', guards)
# now we dump everything, one at a time
d = recursive_dump('', info)
d.addCallback(lambda x: reactor.stop())
d.addErrback(error)
def setup_failed(arg):
print("SETUP FAILED", arg)
reactor.stop()
def bootstrap(c):
info = TorInfo(c)
info.post_bootstrap.addCallback(setup_complete).addErrback(setup_failed)
d = build_local_tor_connection(reactor, build_state=False)
# do not use addCallbacks() here, in case bootstrap has an error
d.addCallback(bootstrap).addErrback(setup_failed)
reactor.run()
|
Python
| 0.000023
|
@@ -1321,17 +1321,19 @@
xception
-,
+ as
e:%0A
|
b077df615eb4354f416877cc2857fb9848e158eb
|
Fix get_sort_by_toggle to work with QueryDicts with multiple values
|
saleor/core/templatetags/shop.py
|
saleor/core/templatetags/shop.py
|
from __future__ import unicode_literals
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from django.template import Library
from django.utils.http import urlencode
register = Library()
@register.filter
def slice(items, group_size=1):
args = [iter(items)] * group_size
return (filter(None, group)
for group in zip_longest(*args, fillvalue=None))
@register.simple_tag(takes_context=True)
def get_sort_by_url(context, field, descending=False):
request = context['request']
request_get = request.GET.dict()
if descending:
request_get['sort_by'] = '-' + field
else:
request_get['sort_by'] = field
return '%s?%s' % (request.path, urlencode(request_get))
@register.simple_tag(takes_context=True)
def get_sort_by_url_toggle(context, field):
request = context['request']
request_get = request.GET.dict()
if field == request_get.get('sort_by'):
new_sort_by = '-%s' % field # descending sort
else:
new_sort_by = field # ascending sort
request_get['sort_by'] = new_sort_by
return '%s?%s' % (request.path, urlencode(request_get))
|
Python
| 0
|
@@ -915,36 +915,36 @@
t = request.GET.
-dict
+copy
()%0A if field
@@ -996,16 +996,17 @@
rt_by =
+u
'-%25s' %25
@@ -1159,34 +1159,24 @@
quest.path,
-urlencode(
request_get)
@@ -1166,19 +1166,30 @@
ath, request_get
+.urlencode(
))%0A
|
dab8e1af4091a18a6251668b9c2475ee6b1e8f66
|
Fix diffuse.explicit() for constant non-zero extrapolation
|
phi/physics/diffuse.py
|
phi/physics/diffuse.py
|
"""
Functions to simulate diffusion processes on `phi.field.Field` objects.
"""
from phi import math
from phi.field import Grid, Field, laplace, solve_linear, jit_compile_linear
from phi.field._field import FieldType
from phi.field._grid import GridType
from phi.math import copy_with
def explicit(field: FieldType,
diffusivity: float or math.Tensor or Field,
dt: float or math.Tensor,
substeps: int = 1) -> FieldType:
"""
Simulate a finite-time diffusion process of the form dF/dt = α · ΔF on a given `Field` FieldType with diffusion coefficient α.
If `field` is periodic (set via `extrapolation='periodic'`), diffusion may be simulated in Fourier space.
Otherwise, finite differencing is used to approximate the
Args:
field: CenteredGrid, StaggeredGrid or ConstantField
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
substeps: number of iterations to use (Default value = 1)
field: FieldType:
Returns:
Diffused field of same type as `field`.
"""
amount = diffusivity * dt
if isinstance(amount, Field):
amount = amount.at(field)
for i in range(substeps):
field += amount / substeps * laplace(field).with_extrapolation(field.extrapolation)
return field
def implicit(field: FieldType,
diffusivity: float or math.Tensor or Field,
dt: float or math.Tensor,
order: int = 1,
solve=math.Solve('CG', 1e-5, 0)) -> FieldType:
"""
Diffusion by solving a linear system of equations.
Args:
order: Order of method, 1=first order. This translates to `substeps` for the explicit sharpening.
field: `phi.field.Field` to diffuse.
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
solve:
Returns:
Diffused field of same type as `field`.
"""
@jit_compile_linear
def sharpen(x):
return explicit(x, diffusivity, -dt, substeps=order)
if not solve.x0:
solve = copy_with(solve, x0=field)
return solve_linear(sharpen, y=field, solve=solve)
def fourier(field: GridType,
diffusivity: float or math.Tensor,
dt: float or math.Tensor) -> FieldType:
"""
Exact diffusion of a periodic field in frequency space.
For non-periodic fields or non-constant diffusivity, use another diffusion function such as `explicit()`.
Args:
field:
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
Returns:
Diffused field of same type as `field`.
"""
assert isinstance(field, Grid), "Cannot diffuse field of type '%s'" % type(field)
assert field.extrapolation == math.extrapolation.PERIODIC, "Fourier diffusion can only be applied to periodic fields."
amount = diffusivity * dt
k = math.fftfreq(field.resolution)
k2 = math.vec_squared(k)
fft_laplace = -(2 * math.PI) ** 2 * k2
diffuse_kernel = math.exp(fft_laplace * amount)
result_k = math.fft(field.values) * diffuse_kernel
result_values = math.real(math.ifft(result_k))
return field.with_values(result_values)
|
Python
| 0.000012
|
@@ -1236,24 +1236,54 @@
t.at(field)%0A
+ ext = field.extrapolation%0A
for i in
@@ -1371,22 +1371,48 @@
olation(
+ext)%0A field =
field.
+with_
extrapol
@@ -1412,24 +1412,28 @@
xtrapolation
+(ext
)%0A return
|
3e62a39892c231419ac09310808d95cb42b4f69f
|
add python solution for valid_parentheses
|
python/valid_parentheses.py
|
python/valid_parentheses.py
|
Python
| 0.00115
|
@@ -0,0 +1,499 @@
+%0A# validate parentheses of string%0A%0Aimport sys%0A%0AinputChars = %5B x for x in sys.argv%5B1%5D %5D%0A%0AopenParens = ('(', '%5B', '%7B')%0AcloseParens = (')', '%5D', '%7D')%0AparenPairs = %7B%0A ')': '(',%0A '%5D': '%5B',%0A '%7D': '%7B'%0A%7D%0AparenHistory = %5B%5D%0A%0Afor c in inputChars:%0A if c in openParens:%0A parenHistory.append(c)%0A elif c in closeParens:%0A if len(parenHistory) == 0 or parenHistory.pop() != parenPairs%5Bc%5D:%0A print False%0A sys.exit(1)%0A%0Aprint True if len(parenHistory) == 0 else False%0A%0A
|
|
8ebe99ec5e944edaf7e0999222f1f1a54b07e5a4
|
Fix restart_needed
|
salt/states/win_servermanager.py
|
salt/states/win_servermanager.py
|
# -*- coding: utf-8 -*-
'''
Manage Windows features via the ServerManager powershell module
'''
# Import salt modules
import salt.utils
def __virtual__():
'''
Load only if win_servermanager is loaded
'''
return 'win_servermanager' if 'win_servermanager.install' in __salt__ else False
def installed(name, recurse=False, force=False):
'''
Install the windows feature
name:
short name of the feature (the right column in win_servermanager.list_available)
recurse:
install all sub-features as well
force:
if the feature is installed but on of its sub-features are not installed set this to True to force
the installation of the sub-features
Note:
Some features require reboot after un/installation. If so, until the server is restarted
other features can not be installed!
Example:
Run ``salt MinionName win_servermanager.list_available`` to get a list of available roles and features. Use
the name in the right column. Do not use the role or feature names mentioned in the PKGMGR documentation. In
this example for IIS-WebServerRole the name to be used is Web-Server.
.. code-block:: yaml
ISWebserverRole:
win_servermanager.installed:
- force: True
- recurse: True
- name: Web-Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Determine if the feature is installed
old = __salt__['win_servermanager.list_installed']()
if name not in old:
ret['comment'] = '{0} will be installed recurse={1}'.format(name, recurse)
elif force and recurse:
ret['comment'] = '{0} already installed but might install sub-features'.format(name)
else:
ret['comment'] = 'The feature {0} is already installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
return ret
# Install the features
status = __salt__['win_servermanager.install'](name, recurse)
ret['result'] = status['Success']
if not ret['result']:
ret['comment'] = 'Failed to install {0}: {1}'.format(name, ret['changes']['feature']['ExitCode'])
if 'already installed' not in status['DisplayName']:
ret['changes']['feature'] = status
ret['changes']['restart_needed'] = status['RestartNeeded']
new = __salt__['win_servermanager.list_installed']()
ret['changes']['features'] = salt.utils.compare_dicts(old, new)
return ret
def removed(name):
'''
Remove the windows feature
name:
short name of the feature (the right column in win_servermanager.list_available)
.. note::
Some features require a reboot after uninstallation. If so the feature will not be completely uninstalled until
the server is restarted.
Example:
Run ``salt MinionName win_servermanager.list_installed`` to get a list of all features installed. Use the top
name listed for each feature, not the indented one. Do not use the role or feature names mentioned in the
PKGMGR documentation.
.. code-block:: yaml
ISWebserverRole:
win_servermanager.removed:
- name: Web-Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Determine if the feature is installed
old = __salt__['win_servermanager.list_installed']()
if name in old:
ret['comment'] = '{0} will be removed'.format(name)
else:
ret['comment'] = 'The feature {0} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
return ret
# Remove the features
status = __salt__['win_servermanager.remove'](name)
ret['result'] = status['Success']
if not ret['result']:
ret['comment'] = 'Failed to uninstall the feature {0}'.format(ret['changes']['feature']['ExitCode'])
ret['changes']['restart_needed'] = status['RestartNeeded']
new = __salt__['win_servermanager.list_installed']()
ret['changes']['features'] = salt.utils.compare_dicts(old, new)
return ret
|
Python
| 0.000002
|
@@ -2336,35 +2336,24 @@
s%0A%0A ret%5B'
-changes'%5D%5B'
restart_need
@@ -3975,27 +3975,16 @@
ret%5B'
-changes'%5D%5B'
restart_
|
1e36dee289585d046a29ba535e0ceaa5477c4aeb
|
Fix LEN type check
|
raco/expression/function.py
|
raco/expression/function.py
|
"""
Functions (unary and binary) for use in Raco.
"""
import math
from .expression import *
class UnaryFunction(UnaryOperator):
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.input)
class BinaryFunction(BinaryOperator):
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.left, self.right)
class NaryFunction(NaryOperator):
def __str__(self):
return "%s(%s)" % \
(self.__class__.__name__,
",".join([str(op) for op in self.operands]))
class WORKERID(ZeroaryOperator):
def __str__(self):
return "%s" % self.__class__.__name__
def evaluate(self, _tuple, scheme, state=None):
return 0
def typeof(self, scheme, state_scheme):
return "LONG_TYPE"
class UnaryLongFunction(UnaryFunction):
def typeof(self, scheme, state_scheme):
input_type = self.input.typeof(scheme, state_scheme)
check_is_numeric(input_type)
return "LONG_TYPE"
class UnaryDoubleFunction(UnaryFunction):
def typeof(self, scheme, state_scheme):
input_type = self.input.typeof(scheme, state_scheme)
check_is_numeric(input_type)
return "DOUBLE_TYPE"
class UnaryTypePreservingFunction(UnaryFunction):
def typeof(self, scheme, state_scheme):
input_type = self.input.typeof(scheme, state_scheme)
check_is_numeric(input_type)
return input_type
class StringFunction(UnaryFunction):
def typeof(self, scheme, state_scheme):
input_type = self.input.typeof(scheme, state_scheme)
if input_type != "STRING_TYPE":
raise TypeSafetyViolation("Must be a string for %s" % (
self.__class__,))
return "STRING_TYPE"
class ABS(UnaryTypePreservingFunction):
def evaluate(self, _tuple, scheme, state=None):
return abs(self.input.evaluate(_tuple, scheme, state))
class CEIL(UnaryLongFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.ceil(self.input.evaluate(_tuple, scheme, state))
class COS(UnaryDoubleFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.cos(self.input.evaluate(_tuple, scheme, state))
class FLOOR(UnaryLongFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.floor(self.input.evaluate(_tuple, scheme, state))
class LOG(UnaryDoubleFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.log(self.input.evaluate(_tuple, scheme, state))
class SIN(UnaryDoubleFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.sin(self.input.evaluate(_tuple, scheme, state))
class SQRT(UnaryDoubleFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.sqrt(self.input.evaluate(_tuple, scheme, state))
class TAN(UnaryDoubleFunction):
def evaluate(self, _tuple, scheme, state=None):
return math.tan(self.input.evaluate(_tuple, scheme, state))
class POW(BinaryFunction):
literals = ['POW']
def evaluate(self, _tuple, scheme, state=None):
return pow(self.left.evaluate(_tuple, scheme, state),
self.right.evaluate(_tuple, scheme, state))
def typeof(self, scheme, state_scheme):
lt = self.left.typeof(scheme, state_scheme)
check_is_numeric(lt)
rt = self.right.typeof(scheme, state_scheme)
check_is_numeric(rt)
return "DOUBLE_TYPE"
class CompareFunction(BinaryFunction):
def typeof(self, scheme, state_scheme):
lt = self.left.typeof(scheme, state_scheme)
rt = self.right.typeof(scheme, state_scheme)
if lt != rt:
raise TypeSafetyViolation("Can't compare %s with %s" % (
lt, rt))
return lt
class LESSER(CompareFunction):
literals = ['LESSER']
def evaluate(self, _tuple, scheme, state=None):
return min(self.left.evaluate(_tuple, scheme, state),
self.right.evaluate(_tuple, scheme, state))
class GREATER(CompareFunction):
literals = ['GREATER']
def evaluate(self, _tuple, scheme, state=None):
return max(self.left.evaluate(_tuple, scheme, state),
self.right.evaluate(_tuple, scheme, state))
class SUBSTR(NaryFunction):
literals = ["SUBSTR"]
def evaluate(self, _tuple, scheme, state=None):
inputStr = self.operands[0].evaluate(_tuple, scheme, state)
beginIdx = self.operands[1].evaluate(_tuple, scheme, state)
endIdx = self.operands[2].evaluate(_tuple, scheme, state)
return inputStr[beginIdx:endIdx]
def typeof(self, scheme, state_scheme):
check_type(self.operands[0].typeof(scheme, state_scheme), "STRING_TYPE") # noqa
check_type(self.operands[1].typeof(scheme, state_scheme), "LONG_TYPE")
check_type(self.operands[2].typeof(scheme, state_scheme), "LONG_TYPE")
return "STRING_TYPE"
class LEN(StringFunction):
literals = ["LEN"]
def evaluate(self, _tuple, scheme, state=None):
return len(self.input.evaluate(_tuple, scheme, state))
|
Python
| 0
|
@@ -1435,323 +1435,8 @@
e%0A%0A%0A
-class StringFunction(UnaryFunction):%0A def typeof(self, scheme, state_scheme):%0A input_type = self.input.typeof(scheme, state_scheme)%0A if input_type != %22STRING_TYPE%22:%0A raise TypeSafetyViolation(%22Must be a string for %25s%22 %25 (%0A self.__class__,))%0A return %22STRING_TYPE%22%0A%0A%0A
clas
@@ -4625,14 +4625,13 @@
LEN(
-String
+Unary
Func
@@ -4705,32 +4705,32 @@
e, state=None):%0A
-
return l
@@ -4756,28 +4756,303 @@
ate(_tuple, scheme, state))%0A
+%0A def typeof(self, scheme, state_scheme):%0A input_type = self.input.typeof(scheme, state_scheme)%0A if input_type != %22STRING_TYPE%22:%0A raise TypeSafetyViolation(%22Must be a string for %25s%22 %25 (%0A self.__class__,))%0A return %22LONG_TYPE%22%0A
|
1cd9343efcbb8058d2c7483120af1047847787a9
|
Make constant uppercase
|
readthedocs/builds/tasks.py
|
readthedocs/builds/tasks.py
|
import logging
from django.conf import settings
from django.db.models import Avg
from readthedocs.builds.models import Build, Version
from readthedocs.projects.models import Feature
log = logging.getLogger(__name__)
class TaskRouter:
"""
Celery tasks router.
It allows us to decide which queue is where we want to execute the task
based on project's settings but also in queue availability.
1. the project is using conda
2. new project with less than N successful builds
3. last N successful builds have a high time average
It ignores projects that have already set ``build_queue`` attribute.
https://docs.celeryproject.org/en/stable/userguide/routing.html#manual-routing
https://docs.celeryproject.org/en/stable/userguide/configuration.html#std:setting-task_routes
"""
n_builds = 5
n_last_builds = 15
time_average = 350
BUILD_DEFAULT_QUEUE = 'build:default'
BUILD_LARGE_QUEUE = 'build:large'
def route_for_task(self, task, args, kwargs, **__):
log.info('Executing TaskRouter. task=%s', task)
if task not in (
'readthedocs.projects.tasks.update_docs_task',
'readthedocs.projects.tasks.sync_repository_task',
):
log.info('Skipping routing non-build task. task=%s', task)
return
version = self._get_version(task, args, kwargs)
if not version:
log.info('No Build/Version found. No routing task. task=%s', task)
return settings.CELERY_DEFAULT_QUEUE
# Do no route tasks for projects without the feature flag
if not version.project.has_feature(Feature.CELERY_ROUTER):
log.info('Project does not have the feature flag. No routing task. task=%s', task)
return version.project.build_queue or settings.CELERY_DEFAULT_QUEUE
# Do not override the queue defined in the project itself
if version.project.build_queue:
log.info(
'Skipping routing task because project has a custom queue. queue=%s',
version.project.build_queue,
)
return version.project.build_queue
queryset = version.builds.filter(success=True).order_by('-date')
last_builds = queryset[:self.n_last_builds]
# Version has used conda in previous builds
for build in last_builds.iterator():
if build.config.get('conda', None):
log.info(
'Routing task because project uses conda. queue=%s',
self.BUILD_LARGE_QUEUE,
)
return self.BUILD_LARGE_QUEUE
# We do not have enough builds for this version yet
if queryset.count() < self.n_builds:
log.info(
'Routing task because it does not have enough success builds yet. queue=%s',
self.BUILD_LARGE_QUEUE,
)
return self.BUILD_LARGE_QUEUE
# Build time average is high
length_avg = queryset.filter(pk__in=last_builds).aggregate(Avg('length')).get('length__avg')
if length_avg > self.time_average:
log.info(
'Routing task because project has high time average. queue=%s',
self.BUILD_LARGE_QUEUE,
)
return self.BUILD_LARGE_QUEUE
log.info(
'Routing task to default queue because no conditions were met. queue=%s',
settings.CELERY_DEFAULT_QUEUE,
)
return settings.CELERY_DEFAULT_QUEUE
def _get_version(self, task, args, kwargs):
if task == 'readthedocs.projects.tasks.update_docs_task':
build_pk = kwargs.get('build_pk')
try:
build = Build.objects.get(pk=build_pk)
version = build.version
except Build.DoesNotExist:
log.info(
'Build does not exist. Routing task to default queue. build_pk=%s queue=%s',
build_pk,
settings.CELERY_DEFAULT_QUEUE,
)
return
elif task == 'readthedocs.projects.tasks.sync_repository_task':
version_pk = args[0]
try:
version = Version.objects.get(pk=version_pk)
except Version.DoesNotExist:
log.info(
'Version does not exist. Routing task to default queue. version_pk=%s queue=%s',
version_pk,
settings.CELERY_DEFAULT_QUEUE,
)
return
return version
|
Python
| 1
|
@@ -822,24 +822,24 @@
%22%22%0A%0A
-n_builds
+N_BUILDS
= 5%0A
@@ -843,43 +843,43 @@
-n_last_builds = 15%0A time_average
+N_LAST_BUILDS = 15%0A TIME_AVERAGE
= 3
@@ -2277,21 +2277,21 @@
elf.
-n_last_builds
+N_LAST_BUILDS
%5D%0A%0A
@@ -2741,16 +2741,16 @@
elf.
-n_builds
+N_BUILDS
:%0A
@@ -3130,20 +3130,20 @@
elf.
-time_average
+TIME_AVERAGE
:%0A
|
a78ea4b3ca869ce9603c48f53ff6229c3ff7b0bb
|
fix issue with timezone removed from new calendar objects
|
application/views/calendar_view.py
|
application/views/calendar_view.py
|
import urllib
from application.views.custom_model_view import CustomModelView
from application.models import Calendar, Location, User
from application.helpers import encrypt_string, credentials, is_valid_credentials
from application import app, db, authomatic
from flask import flash, g
from flask.ext.admin.actions import action
class CalendarView(CustomModelView):
#TODO: the pagination needs to be per-user, not for the whole database
#TODO: add validation to prevent a venue from being added to a calendar which isn't "applicable"
# Override displayed fields
can_create = False
can_delete = False
can_edit = True
new_actions = True
column_list = ('summary','enabled','url')
column_labels = {'summary': 'Calendar Title', 'enabled': 'Calendar Admin Enabled', 'url': 'Public URL for Event Requests', 'locations': 'Approved Event Venues'}
form_columns = ('locations', 'redirect_url', 'meetup_disabled', 'eventbrite_disabled') # 'google_disabled' (wouldn't make sense to disable google calendars at this point)
# TODO: clean this up
column_formatters = dict(url=lambda v, c, m, p: app.config['DOMAIN_NAME']+'/event/request/'+m.url+'/'+urllib.quote_plus(urllib.quote_plus(m.redirect_url)) if (m.redirect_url and m.url) else app.config['DOMAIN_NAME']+'/event/request/'+m.url+'/'+urllib.quote_plus(urllib.quote_plus(app.config['DOMAIN_NAME'])) if (m.url and not m.redirect_url) else "") # show domain name + url if url exists
@action('disable', 'Disable Selected')
def action_disable(self, ids):
try:
#TODO: You were not the user who originally enabled this calendar. You will need to talk to:
Calendar.query.filter(db.and_(Calendar.id.in_(ids), Calendar.users.any(User.id == g.user.id))).update({"enabled": False, "url": None}, synchronize_session=False)
db.session.commit()
flash('Calendar Admin was disabled for the selected calendars.')
except Exception as ex:
raise
@action('enable', 'Enable Selected')
def action_enable(self, ids):
try:
for id in ids:
# encrypt url so users can't guess other user's URLs
calendar_url = encrypt_string(id)
Calendar.query.filter(db.and_(Calendar.id == id, Calendar.users.any(User.id == g.user.id))).update({"enabled": True, "url": calendar_url}, synchronize_session=False)
db.session.commit()
flash('Calendar Admin was activated for the selected calendars.')
except Exception as ex:
raise
# http://stackoverflow.com/questions/21087077/pre-filter-readable-data-based-on-user-permissions-with-flask-admin
def get_query(self):
if not Calendar.query.filter(db.and_(Calendar.users.any(User.id == g.user.id), Calendar.enabled == True)).first():
flash('You need to enable Calendar Admin on a calendar before your users can submit events for approval.')
# Grab the user's calendars from Google
response = authomatic.access(credentials(), 'https://www.googleapis.com/calendar/v3/users/me/calendarList?minAccessRole=writer')
dictOfCalendars = {}
#TODO: if calendar's name changes, update it
for calendar in response.data.get('items'): # get calendars from google
# if calendar does not exist, then add it
if Calendar.query.filter(db.and_(Calendar.calendar_id == calendar['id'], ~Calendar.users.any(User.id == g.user.id))).first():
existing_calendar = Calendar.query.filter(Calendar.calendar_id == calendar['id']).one()
existing_calendar.users.append(g.user)
db.session.commit()
elif not Calendar.query.filter(db.and_(Calendar.calendar_id == calendar['id'], Calendar.users.any(User.id == g.user.id))).first():
new_calendar = Calendar(calendar['id'], calendar['summary'])
new_calendar.users.append(g.user)
db.session.add(new_calendar)
db.session.commit()
return Calendar.query.filter(Calendar.users.any(User.id == g.user.id))
# override forms to prevent users from seeing eachother's data: https://gist.github.com/mrjoes/5521548
# Hook form creation methods
def create_form(self):
return self._use_filtered_parent(super(CalendarView, self).create_form())
def edit_form(self, obj):
return self._use_filtered_parent(super(CalendarView, self).edit_form(obj))
# Logic
def _use_filtered_parent(self, form):
form.locations.query_factory = self._get_parent_list
return form
def _get_parent_list(self):
return Location.query.join(Location.calendar).filter(Calendar.users.any(User.id == g.user.id)).all()
|
Python
| 0
|
@@ -3610,16 +3610,38 @@
ummary'%5D
+, calendar%5B'timeZone'%5D
)%0A%09%09%09%09ne
@@ -4401,8 +4401,9 @@
)).all()
+%0A
|
3d62b79380669d14f8cff6a83d763679ac3924b5
|
Test that Bootstrap works by checking that authentication input boxes are centered
|
system_maintenance/tests/functional/tests.py
|
system_maintenance/tests/functional/tests.py
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from system_maintenance.tests.utilities import populate_test_db
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
populate_test_db()
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
self.username_inputbox = None
self.password_inputbox = None
self.login_button = None
def tearDown(self):
self.browser.quit()
def find_authentication_elements(self):
self.username_inputbox = self.browser.find_element_by_id('id_username')
self.password_inputbox = self.browser.find_element_by_id('id_password')
self.login_button = self.browser.find_element_by_tag_name('button')
def system_maintenance_url(self, url_stem=''):
return '{}/system_maintenance/{}'.format(
self.live_server_url, url_stem)
def test_can_login_as_sysadmin(self):
# Try to go to the System Maintenance homepage
self.browser.get(self.system_maintenance_url())
# Not logged in, so get redirected to the SysAdmin Authentication page
self.assertIn('System Maintenance', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('SysAdmin Authentication', header_text)
# See input boxes for username & password
self.find_authentication_elements()
self.assertEqual(
self.username_inputbox.get_attribute('placeholder'),
'Enter username')
self.assertEqual(
self.password_inputbox.get_attribute('placeholder'),
'Enter password')
self.assertEqual(self.login_button.text, 'Login')
# Accidentally click 'Login' button without entering credentials
self.login_button.click()
# See two error messages about required fields
field_errors = self.browser.find_elements_by_class_name('field-error')
self.assertEqual(len(field_errors), 2)
for error in field_errors:
self.assertEqual(error.text, 'This field is required.')
# Enter incorrect credentials
self.find_authentication_elements()
self.username_inputbox.send_keys('nobody')
self.password_inputbox.send_keys('nobody')
# Hit 'Enter' key to submit
self.password_inputbox.send_keys(Keys.ENTER)
# See error message about entering correct username and password
self.assertIn(
'Please enter a correct username and password. Note that both ' +
'fields may be case-sensitive.',
self.browser.find_element_by_class_name('alert-danger').text)
# Enter non-sysadmin credentials
self.find_authentication_elements()
self.username_inputbox.send_keys('nonsysadmin')
self.password_inputbox.send_keys('nonsysadmin' + Keys.ENTER)
# See 'Access denied.' message about not being a sys admin
self.assertIn(
'Access denied.', self.browser.find_element_by_tag_name('h1').text)
self.assertIn(
'Hello nonsysadmin. You are not a system administrator.',
self.browser.find_element_by_tag_name('p').text)
# 'Previous Page' button.
# Click 'Logout' button and get redirected to authentication page.
self.logout_button = self.browser.find_element_by_tag_name('a')
self.logout_button.click()
# Enter sysadmin credentials
self.find_authentication_elements()
self.username_inputbox.send_keys('sysadmin')
self.password_inputbox.send_keys('sysadmin' + Keys.ENTER)
# Check that redirected to System Maintenance home page
self.assertIn('System Maintenance', self.browser.title)
self.assertEqual(
self.browser.find_element_by_tag_name('p').text,
'System maintenance records and other important system '
'administration information is accessible via the buttons below.')
self.assertEqual(
len(self.browser.find_elements_by_css_selector(
'.btn.full-width-on-mobile')), 7)
self.assertEqual(
len(self.browser.find_elements_by_css_selector(
'.btn-group.hide-on-mobile > .btn')), 7)
# Logout
self.browser.get(self.system_maintenance_url('logout'))
# Enter superuser sysadmin credentials
self.find_authentication_elements()
self.username_inputbox.send_keys('supersysadmin')
self.password_inputbox.send_keys('supersysadmin' + Keys.ENTER)
# Check that redirected to System Maintenance home page w/ admin access
self.assertIn('System Maintenance', self.browser.title)
paragraphs = self.browser.find_elements_by_tag_name('p')
self.assertIn('System maintenance records and ', paragraphs[0].text)
self.assertIn('To add or change system ', paragraphs[1].text)
self.assertEqual(
len(self.browser.find_elements_by_css_selector(
'.btn.full-width-on-mobile')), 7)
self.assertEqual(
len(self.browser.find_elements_by_css_selector(
'.btn-group.hide-on-mobile > .btn')), 14)
|
Python
| 0
|
@@ -5331,8 +5331,748 @@
)), 14)%0A
+%0A def test_layout_and_styling(self):%0A # Go to the authentication page%0A self.browser.get(self.system_maintenance_url('authentication'))%0A window_width = 768%0A self.browser.set_window_size(window_width, window_width / 2)%0A%0A # Username and password input boxes are centered%0A self.find_authentication_elements()%0A center_username = self.username_inputbox.location%5B'x'%5D + %5C%0A self.username_inputbox.size%5B'width'%5D / 2%0A center_password = self.password_inputbox.location%5B'x'%5D + %5C%0A self.password_inputbox.size%5B'width'%5D / 2%0A self.assertAlmostEqual(center_username, window_width / 2, delta=5)%0A self.assertAlmostEqual(center_password, window_width / 2, delta=5)%0A
|
6ef7e5fd63992950c22172c9db0e80998a8ff053
|
Fix doc typo.
|
tools/pyblock/pyblock/blocking.py
|
tools/pyblock/pyblock/blocking.py
|
'''Tools for reblocking of data to remove serial correlation from data sets.'''
import numpy
import collections
def reblock(data, rowvar=1, ddof=None):
'''Blocking analysis of correlated data.
Repeatedly average neighbouring data points in order to remove the effect of
serial correlation on the estimate of the standard error of a data set, as
described by Flyvbjerg and Petersen [1]_. The standard error is constant
(within error bars) once the correlation has been removed.
.. default-role:: math
Parameters
----------
data : :class:`numpy.ndarray`
1D or 2D array containing multiple variables and data points. See ``rowvar``.
rowvar : int
If ``rowvar`` is non-zero (default) then each row represents a variable and
each column a data point per variable. Otherwise the relationship is
swapped. Only used if data is a 2D array.
ddof : int
If not ``None``, then the standard error and covariance are normalised by
`(N - \\text{ddof})`, where `N` is the number of data points per variable.
Otherwise, the numpy default is used (i.e. `(N - 1)`).
Returns
-------
list of `collections.namedtuples`
Statistics from each reblocking iteration. Each tuple contains:
block : int
blocking iteration. Each iteration successively averages neighbouring
pairs of data points. The final data point is discarded if the number
of data points is odd.
ndata: int
number of data points in the blocking iteration.
mean : :class:`numpy.ndarray`
mean of each variable in the data set.
cov : :class:`numpy.ndarray`
covariance matrix.
std_err : :class:`numpy.ndarray`
standard error of each variable.
std_err_err : :class:`numpy.ndarray`
an estimate of the error in the standard error, assuming a Gaussian
distribution.
References
----------
.. [1] "Error estimates on averages of correlated data", H. Flyvbjerg and
H.G. Petersen, J. Chem. Phys. 91, 461 (1989).
'''
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
if ddof is None:
ddof = 1
if data.ndim > 2:
raise RuntimeError("do not understand how to reblock in more than two dimensions.")
if data.ndim == 1 or data.shape[0] == 1:
rowvar = 1
axis = 0
elif rowvar:
axis = 1
else:
axis = 0
iblock = 0
stats = []
block_tuple_fields = 'block ndata mean cov std_err std_err_err'.split()
block_tuple = collections.namedtuple('BlockTuple', block_tuple_fields)
while data.shape[axis] >= 2:
mean = numpy.array(numpy.mean(data, axis=axis))
cov = numpy.cov(data, rowvar=rowvar, ddof=ddof)
if cov.ndim < 2:
std_err = numpy.array(numpy.sqrt(cov / data.shape[axis]))
else:
std_err = numpy.sqrt(cov.diagonal() / data.shape[axis])
data_len = data.shape[axis]
std_err_err = std_err * 1.0/(numpy.sqrt(2*(data_len-ddof)))
std_err_err = numpy.array(std_err_err)
stats.append(
block_tuple(iblock, data_len, mean, cov, std_err, std_err_err)
)
# last even-indexed value (ignore the odd one, if relevant)
last = 2*int(data.shape[axis]/2)
if data.ndim == 1 or not rowvar:
data = (data[:last:2] + data[1:last:2]) / 2
else:
data = (data[:,:last:2] + data[:,1:last:2]) / 2
iblock += 1
return stats
def find_optimal_block(ndata, stats):
'''Find the optimal block length from a reblocking calculation.
Inspect a reblocking calculation and find the block length which minimises the
stochastic error and removes the effect of correlation from the data set. This
follows the procedures detailed by Wolff [1]_ and Lee et al. [2]_.
.. default-role:: math
Parameters
----------
ndata : int
number of data points ('observations') in the data set.
stats : list of tuples
statistics in the format as returned by :func:`pyblock.blocking.reblock`.
Returns
-------
list of int
the optimal block index for each variable (i.e. the first block index in
which the correlation has been removed). If NaN, then the statistics
provided were not sufficient to estimate the correlation length and more
data should be collected.
Notes
-----
Wolff [1]_ (Eq 47) and Lee et al. [2]_ (Eq 14) give the optimal block size to be
.. math::
B^3 = 2 n n_{\\text{corr}}^2
where `n` is the number of data points in the data set, `B` is the number of
data points in each 'block' (ie the data set has been divided into `n/B`
contiguous blocks) and `n_{\\text{corr}}`.
[todo] - describe n_corr.
Following the scheme proposed by Lee et al. [2]_, we hence look for the largest
block size which satisfies
.. math::
B^3 >= 2 n n_{\\text{corr}}^2.
From Eq 13 in Lee et al. [2]_ (which they cast in terms of the variance):
.. math::
n_{\\text{err}} SE = SE_{\\text{true}}
where the 'error factor', `n_{\\text{err}}`, is the square root of the
estimated correlation length, `SE` is the standard error of the data set and
`SE_{\text{true}}` is the true standard error once the correlation length has
been taken into account. Hence the condition becomes:
.. math::
B^3 >= 2 n (SE(B) / SE(0))^4
where `SE(B)` is the estimate of the standard error of the data divided in
blocks of size `B`.
I am grateful to Will Vigor for discussions and the initial implementation.
References
----------
.. [1] "Monte Carlo errors with less errors", U. Wolff, Comput. Phys. Commun.
156, 143 (2004) and arXiv:hep-lat/0306017.
.. [2] "Strategies for improving the efficiency of quantum Monte Carlo
calculations", R. M. Lee, G. J. Conduit, N. Nemec, P. Lopez Rios, and N.
D. Drummond, Phys. Rev. E. 83, 066706 (2011).
'''
# Get the number of variables by looking at the number of means calculated
# in the first stats entry.
nvariables = stats[0][2].size
optimal_block = [float('NaN')]*nvariables
# If the data was just of a single variable, then the numpy arrays returned
# by blocking are all 0-dimensions. Make sure they're 1-D so we can use
# enumerate safely (just to keep the code short).
std_err_first = numpy.array(stats[0][4], ndmin=1)
for (iblock, data_len, mean, cov, std_err, std_err_err) in reversed(stats):
# 2**iblock data points per block.
B3 = 2**(3*iblock)
std_err = numpy.array(std_err, ndmin=1)
for (i, var_std_err) in enumerate(std_err):
if B3 > 2*ndata*(var_std_err/std_err_first[i])**4:
optimal_block[i] = iblock
return optimal_block
|
Python
| 0
|
@@ -5068,16 +5068,17 @@
d%0A%60SE_%7B%5C
+%5C
text%7Btru
|
60f753e736827f61607e10d160b7e7bab75b77cc
|
update pyasn version for workers
|
pipeline/setup.py
|
pipeline/setup.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dependency setup for beam remote workers."""
import setuptools
setuptools.setup(
name='censoredplanet-analysis',
version='0.0.1',
install_requires=['pyasn==1.6.0b1'],
packages=setuptools.find_packages(),
url='https://github.com/Jigsaw-Code/censoredplanet-analysis',
author='Sarah Laplante',
author_email='laplante@google.com')
|
Python
| 0
|
@@ -749,10 +749,8 @@
1.6.
-0b
1'%5D,
|
33240ac6581188e363d2e4e21753a3071f57df86
|
fix default source
|
pipenv/project.py
|
pipenv/project.py
|
import os
import toml
from . import _pipfile as pipfile
from .utils import format_toml, multi_split
from .utils import convert_deps_from_pip, convert_deps_to_pip
class Project(object):
"""docstring for Project"""
def __init__(self):
super(Project, self).__init__()
@property
def name(self):
return self.pipfile_location.split(os.sep)[-2]
@property
def pipfile_exists(self):
return bool(self.pipfile_location)
@property
def virtualenv_exists(self):
return os.path.isdir(self.virtualenv_location)
@property
def virtualenv_location(self):
return os.sep.join(self.pipfile_location.split(os.sep)[:-1] + ['.venv'])
@property
def pipfile_location(self):
try:
return pipfile.Pipfile.find()
except RuntimeError:
return None
@property
def parsed_pipfile(self):
with open(self.pipfile_location, 'r') as f:
return toml.load(f)
@property
def lockfile_location(self):
return '{0}.lock'.format(self.pipfile_location)
@property
def lockfile_exists(self):
return os.path.isfile(self.lockfile_location)
def create_pipfile(self):
data = {u'source': [{u'url': u'https://pypi.org/', u'verify_ssl': True}], u'packages': {}, 'dev-packages': {}}
with open('Pipfile', 'w') as f:
f.write(toml.dumps(data))
@property
def source(self):
if 'source' in self.parsed_pipfile:
return self.parsed_pipfile['source'][0]
else:
return [{u'url': u'https://pypi.org/', u'verify_ssl': True}][0]
@staticmethod
def remove_package_from_pipfile(package_name, dev=False):
pipfile_path = pipfile.Pipfile.find()
# Read and append Pipfile.
with open(pipfile_path, 'r') as f:
p = toml.loads(f.read())
key = 'dev-packages' if dev else 'packages'
if key in p:
if package_name in p[key]:
del p[key][package_name]
# Write Pipfile.
data = format_toml(toml.dumps(p))
with open(pipfile_path, 'w') as f:
f.write(data)
@staticmethod
def add_package_to_pipfile(package_name, dev=False):
# Lower-case package name.
package_name = package_name.lower()
# Find the Pipfile.
pipfile_path = pipfile.Pipfile.find()
# Read and append Pipfile.
with open(pipfile_path, 'r') as f:
p = toml.loads(f.read())
key = 'dev-packages' if dev else 'packages'
# Set empty group if it doesn't exist yet.
if key not in p:
p[key] = {}
package = convert_deps_from_pip(package_name)
package_name = [k for k in package.keys()][0]
# Add the package to the group.
p[key][package_name] = package[package_name]
# Write Pipfile.
data = format_toml(toml.dumps(p))
with open(pipfile_path, 'w') as f:
f.write(data)
|
Python
| 0.000001
|
@@ -1253,36 +1253,49 @@
u'https://pypi.
-org/
+python.org/simple
', u'verify_ssl'
@@ -1610,20 +1610,33 @@
://pypi.
-org/
+python.org/simple
', u'ver
@@ -1656,34 +1656,16 @@
e%7D%5D%5B0%5D%0A%0A
- @staticmethod%0A
def
@@ -1684,32 +1684,38 @@
ge_from_pipfile(
+self,
package_name, de
@@ -1818,79 +1818,31 @@
-with open(pipfile_path, 'r') as f:%0A p = toml.loads(f.read())
+p = self.parsed_pipfile
%0A%0A
@@ -1839,36 +1839,32 @@
ipfile%0A%0A
-
-
key = 'dev-packa
@@ -1891,20 +1891,17 @@
ckages'%0A
-
+%0A
@@ -1925,20 +1925,16 @@
-
if packa
@@ -1952,20 +1952,16 @@
p%5Bkey%5D:%0A
-
@@ -2135,25 +2135,8 @@
a)%0A%0A
- @staticmethod
%0A
@@ -2163,16 +2163,22 @@
pipfile(
+self,
package_
@@ -2397,85 +2397,33 @@
-with open(pipfile_path, 'r') as f:%0A p = toml.loads(f.read())%0A%0A
+p = self.parsed_pipfile%0A%0A
@@ -2479,20 +2479,16 @@
-
-
# Set em
@@ -2526,28 +2526,24 @@
et.%0A
-
if key not i
@@ -2555,28 +2555,24 @@
-
-
p%5Bkey%5D = %7B%7D%0A
@@ -2576,28 +2576,24 @@
%7B%7D%0A%0A
-
-
package = co
@@ -2626,20 +2626,16 @@
e_name)%0A
-
@@ -2689,20 +2689,16 @@
-
-
# Add th
@@ -2721,20 +2721,16 @@
group.%0A
-
|
8737ac6e9485393af27292371aebd84ac28b2a59
|
Fix to panel.
|
otter/bootstrap.py
|
otter/bootstrap.py
|
"""
Implements a means of using the Bootstrap html and css toolkit to make the report.
"""
from itertools import cycle
import markdown
class HTMLElement():
def __init__(self, content=""):
self.content = content
def __iadd__(self, item):
self.__add__(item)
return self.content
def __add__(self, item):
if isinstance(item, str):
self.content.append(markdown.markdown(str(item), output_format='xhtml5'))
else:
self.content.append(item)
class Row(HTMLElement):
def __init__(self, cols=1, size='md', hclass=''):
"""
Add a new row to the grid.
Parameters
----------
cols : int, list, tuple, or iterable
Either the number of columns desired, or an array of the widths.
The widths are specified in twelfths of the full width, so for example
`cols = [4,4,4]` makes three columns each one-third of a page wide.
size : {'xs', 'sm', 'md', 'lg'}
The size of the column, according to the Bootstrap responsive design. Defaults to 'md'.
hclass : str or iterable
Adds a class to each column. If an iterable is provided then a seperate class
can be applied to each column.
"""
self.rowopen = "<div class='row'>"
self.rowclose = "</div>"
if hasattr(cols, '__getitem__'):
self.columns = [Column(width, size, hclass) for width, hclass in zip(cols, cycle(hclass))]
elif isinstance(cols, int):
width = int(12/cols)
self.columns = [Column(width, size, hclass) for _ in range(cols)]
# We want this to degrade nicely so if there's only one column we interface directly with the row
def __add__(self, item):
if len(self.columns)==1:
self.columns[0] + item
else:
# Need to throw an error
pass
def __getitem__(self, i):
return self.columns[i]
def __setitem__(self, i, item):
self.columns[i].content = markdown.markdown(item, output_format='xhtml5')
def __repr__(self):
output = ''
output += self.rowopen
for column in self.columns:
output += repr(column)
output += self.rowclose
return output
def __str__(self):
return self.__repr__()
class Column(HTMLElement):
def __init__(self, width=12, size="md", hclass=""):
self.width = width
self.size = size
self.hclass = hclass
self.colopen = "<div class='col-{size}-{width} {hclass}'>"
self.colclose = "</div>"
self.content = []
def __repr__(self):
output = ''
output += self.colopen.format(size=self.size, width=self.width, hclass=self.hclass)
for content in self.content:
output += str(content)
output += self.colclose
return output
def __str__(self):
return self.__repr__()
class Alert(HTMLElement):
"""
Creates a Bootstrap alert object in the report.
"""
def __init__(self, text='',style='info', hclass=''):
self.alertopen = "<div class='alert alert-{}' role='alert'>".format(style)
self.alertclose = "</div>"
self.content = text
def __repr__(self):
output = ''
output += self.alertopen
output += self.content
output += self.alertclose
return output
def __str__(self):
return self.__repr__
class Label(HTMLElement):
def __init__(self, text="", style="default"):
"""
Make a new label element.
"""
self.opening = "<span class='label label-{}'>".format(style)
self.closing = "</span>"
self.content = content
def __repr__(self):
output = ''
return output.join(self.opening, self.content, self.closing)
def __str__(self):
return self.__repr__
class Panel(HTMLElement):
"""
Creates a Bootstrap panel object in the report.
"""
def __init__(self, title='', footer = '', style='default', hclass=''):
self.title = title
self.footer = footer
self.style = style
self.hclass = hclass
self.panopen = "<div class='panel panel-{style} {hclass}'>"
self.panclose = "</div>"
self.panbody = "<div class='panel-body'>{}</div>"
self.panheader = "<div class='panel-heading'>{}</div>"
self.panfooter = "<div class='panel-footer'>{}</div>"
self.content = []
def __repr__(self):
output = ''
output += self.panopen.format(style=self.style, hclass=self.hclass)
if self.panheader != '': output += self.panheader.format(self.title)
contents = ""
for content in self.content:
contents += str(content)
output += self.panbody.format(contents)
if self.footer != '': output += self.panfooter.format(self.footer)
return output
__str__ = __repr__
|
Python
| 0
|
@@ -5028,16 +5028,48 @@
footer)%0A
+ output += self.panclose%0A
|
7842919b2af368c640363b4e4e05144049b111ba
|
Remove BaseMail dependency on User object
|
ovp_core/emails.py
|
ovp_core/emails.py
|
from django.core.mail import EmailMultiAlternatives
from django.template import Context, Template
from django.template.loader import get_template
from django.conf import settings
import threading
class EmailThread(threading.Thread):
def __init__(self, msg):
self.msg = msg
threading.Thread.__init__(self)
def run (self):
return self.msg.send() > 0
class BaseMail:
"""
This class is responsible for firing emails
"""
from_email = ''
def __init__(self, user, async_mail=None):
self.user = user
self.async_mail = async_mail
def sendEmail(self, template_name, subject, context):
ctx = Context(context)
text_content = get_template('email/{}.txt'.format(template_name)).render(ctx)
html_content = get_template('email/{}.html'.format(template_name)).render(ctx)
msg = EmailMultiAlternatives(subject, text_content, self.from_email, [self.user.email])
msg.attach_alternative(text_content, "text/plain")
msg.attach_alternative(html_content, "text/html")
if self.async_mail:
async_flag="async"
else:
async_flag=getattr(settings, "DEFAULT_SEND_EMAIL", "async")
if async_flag == "async":
t = EmailThread(msg)
t.start()
return t
else:
return msg.send() > 0
|
Python
| 0
|
@@ -486,20 +486,29 @@
_(self,
-user
+email_address
, async_
@@ -532,19 +532,37 @@
elf.
-user = user
+email_address = email_address
%0A
@@ -924,18 +924,21 @@
elf.
-user.
email
+_address
%5D)%0A
|
3c9de69112c8158877e4b0060ef0ab89c083f376
|
Build 1.14.0.1 package for Windows
|
packages/custom.py
|
packages/custom.py
|
# -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
from cerbero.packages import package
from cerbero.enums import License
class GStreamer:
url = "http://gstreamer.freedesktop.org"
version = '1.14.0'
vendor = 'GStreamer Project'
licenses = [License.LGPL]
org = 'org.freedesktop.gstreamer'
|
Python
| 0
|
@@ -212,16 +212,18 @@
'1.14.0
+.1
'%0A ve
|
2250fdef5528bb59ca2c3218110d637484737659
|
fix pilutil.imresize test. Patch by Mark Wiebe.
|
scipy/misc/tests/test_pilutil.py
|
scipy/misc/tests/test_pilutil.py
|
import os.path
import numpy as np
from numpy.testing import assert_, assert_equal, \
dec, decorate_methods, TestCase, run_module_suite
try:
import PIL.Image
except ImportError:
_have_PIL = False
else:
_have_PIL = True
import scipy.misc.pilutil as pilutil
# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')
datapath = os.path.dirname(__file__)
class TestPILUtil(TestCase):
def test_imresize(self):
im = np.random.random((10,20))
for T in np.sctypes['float'] + [float]:
im1 = pilutil.imresize(im,T(1.1))
assert_equal(im1.shape,(11,22))
def test_imresize2(self):
im = np.random.random((20,30))
im2 = pilutil.imresize(im, (30,40), interp='bicubic')
assert_equal(im2.shape, (30,40))
def test_imresize3(self):
im = np.random.random((15,30))
im2 = pilutil.imresize(im, (30,60), interp='nearest')
assert_equal(im2.shape, (30,60))
def test_bytescale(self):
x = np.array([0,1,2],np.uint8)
y = np.array([0,1,2])
assert_equal(pilutil.bytescale(x),x)
assert_equal(pilutil.bytescale(y),[0,127,255])
def tst_fromimage(filename, irange):
img = pilutil.fromimage(PIL.Image.open(filename))
imin,imax = irange
assert_(img.min() >= imin)
assert_(img.max() <= imax)
@_pilskip
def test_fromimage():
''' Test generator for parametric tests '''
data = {'icon.png':(0,255),
'icon_mono.png':(0,2),
'icon_mono_flat.png':(0,1)}
for fn, irange in data.iteritems():
yield tst_fromimage, os.path.join(datapath,'data',fn), irange
decorate_methods(TestPILUtil, _pilskip)
if __name__ == "__main__":
run_module_suite()
|
Python
| 0
|
@@ -603,16 +603,79 @@
float%5D:%0A
+ # 1.1 rounds to below 1.1 for float16, 1.101 works%0A
@@ -709,16 +709,18 @@
im,T(1.1
+01
))%0A
|
b3573faeff22f220990ea2c97a7c9eae26429258
|
add parse for application/json
|
tornado-sqlalchemy-example/app.py
|
tornado-sqlalchemy-example/app.py
|
import os
import tornado.web
import tornado.options
import tornado.ioloop
from db import db
from model import User
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
class IndexHandler(BaseHandler):
def get(self):
data = self.db.query(User).all()
a = User(username="test", password="test")
self.db.add(a)
data1 = self.db.query(User).all()
for d in data:
self.write("user: %s\n" % d.username)
self.write("==================")
for d in data1:
self.write("second %s" % d.username)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = dict(
debug=True,
static_path=os.path.join(os.path.dirname(__file__), "static"),
template_path=os.path.join(os.path.dirname(__file__), "templates")
)
tornado.web.Application.__init__(self, handlers, **settings)
self.db = db
if __name__ == '__main__':
tornado.options.parse_command_line()
Application().listen(8000)
tornado.ioloop.IOLoop.instance().start()
|
Python
| 0.000001
|
@@ -1,12 +1,36 @@
+# -*- coding: utf-8 -*-%0A
import os%0Aim
@@ -133,16 +133,67 @@
rt User%0A
+from tornado.escape import json_decode, to_unicode%0A
%0A%0Aclass
@@ -257,32 +257,32 @@
def db(self):%0A
-
return s
@@ -301,16 +301,390 @@
ion.db%0A%0A
+ def get_json_argument(self, name, default=None):%0A %22%22%22%E5%BD%93Content-Type%E7%9A%84%E5%80%BC%E4%B8%BAapplication/json, %E8%A7%A3%E6%9E%90%E8%AF%B7%E6%B1%82%E5%8F%82%E6%95%B0%22%22%22%0A args = json_decode(self.request.body)%0A name = to_unicode(name)%0A if name in args:%0A return args%5Bname%5D%0A elif default is not None:%0A return default%0A else:%0A raise tornado.web.MissingArgumentError(name)%0A%0A
%0Aclass I
|
3056cf737ae0b6717073a03a6e01addfb1415416
|
is_project is *not* a uuid
|
scrapi/processing/osf/hashing.py
|
scrapi/processing/osf/hashing.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unicodedata
import string
import hashlib
def get_id(doc):
return normalize_string(doc['id']['serviceID'])
def get_source(doc):
return normalize_string(doc['source'])
def get_doi(doc):
return normalize_string(doc['id']['doi'] + get_title(doc))
def normalize_string(astring): # helper function for grab_ funcs; takes a unicode string
astring = astring.lower()
# stop words - happy to add more
stopwords = ['the', 'a', 'an', 'about', 'do', 'does', 'what', 'who', 'it', 'to', 'has', 'had', 'in', 'by']
for word in stopwords:
word = ' ' + word + ' '
astring = astring.replace(word, ' ')
# docs.python.org/2/library/unicodedata.html
# TODO: this may not work for some unicode characters; I dealt w/ known special cases
# (when it fails to transliterate, it replaces with '')
astring = astring.replace(u'æ', u'ae')
astring = astring.replace(u'Æ', u'Ae')
astring = astring.replace(u'ß', u'ss') # assumes good transliteration
astring = astring.replace(u'—', u'')
bstring = unicodedata.normalize('NFKD', astring).encode('ascii','ignore')
exclude = set(string.punctuation)
exclude.add(' ')
exclude.add('\n')
bstring = ''.join(ch for ch in bstring if ch not in exclude)
return bstring # returns the essence of the string, as a string
def get_contributors(doc):
contributors = doc['contributors'] # this is a list
namelist = ''
for contributor in contributors:
fullname = contributor['given'] + contributor['family']
namelist += fullname
namelist = sorted(namelist) # alphabetical order, in case contrib order varies by source
namelist = ''.join(namelist)
namelist = normalize_string(namelist)
namelist = hashlib.md5(namelist).hexdigest() # should be shorter as md5 than full list
return normalize_string(namelist) # returns a list of strings
def get_title(doc):
title = doc['title']
title = normalize_string(title)
title = hashlib.md5(title).hexdigest() # should be shorter on average than full title
return title
def is_project(doc):
return ';isProject:true'
REPORT_HASH_FUNCTIONS = [get_title, get_contributors, get_doi, get_id]
RESOURCE_HASH_FUNCTIONS = [get_title, get_contributors, is_project]
|
Python
| 0.999386
|
@@ -2323,18 +2323,6 @@
tors
-, is_project
%5D%0A
|
ba5e34c49370552b9779f90a77980dfa457aaa55
|
comment out figshare author biblio
|
totalimpact/providers/figshare.py
|
totalimpact/providers/figshare.py
|
from totalimpact.providers import provider
from totalimpact.providers import crossref
from totalimpact.providers.provider import Provider, ProviderContentMalformedError
import re
import logging
logger = logging.getLogger('ti.providers.figshare')
class Figshare(Provider):
example_id = ("doi", "10.6084/m9.figshare.92393")
url = "http://figshare.com"
descr = "Make all of your research outputs sharable, citable and visible in the browser for free."
biblio_url_template = "http://api.figshare.com/v1/articles/%s"
aliases_url_template = "http://api.figshare.com/v1/articles/%s"
metrics_url_template = "http://api.figshare.com/v1/articles/%s"
provenance_url_template = "http://dx.doi.org/%s"
member_items_url_template = "http://api.figshare.com/v1/authors/%s?page=%s"
static_meta_dict = {
"shares": {
"display_name": "shares",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been shared",
"icon": "http://figshare.com/static/img/favicon.png",
},
"downloads": {
"display_name": "downloads",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been downloaded",
"icon": "http://figshare.com/static/img/favicon.png",
},
"views": {
"display_name": "views",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this item has been viewed",
"icon": "http://figshare.com/static/img/favicon.png",
}
}
def __init__(self):
super(Figshare, self).__init__()
self.crossref = crossref.Crossref()
def is_relevant_alias(self, alias):
(namespace, nid) = alias
is_figshare_doi = (namespace == "doi") and (".figshare." in nid.lower())
return is_figshare_doi
@property
def provides_members(self):
return True
def get_figshare_userid_from_author_url(self, url):
match = re.findall("figshare.com\/authors\/.*?\/(\d+)", url)
return match[0]
def _extract_aliases(self, page, id=None):
dict_of_keylists = {"url": ["figshare_url"]}
item = self._extract_figshare_record(page, id)
aliases_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if aliases_dict:
aliases_list = [(namespace, nid) for (namespace, nid) in aliases_dict.iteritems()]
else:
aliases_list = []
return aliases_list
def _extract_biblio(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'genre' : ['defined_type'],
'authors_literal' : ['authors'],
'published_date' : ['published_date']
}
item = self._extract_figshare_record(page, id)
biblio_dict = provider._extract_from_data_dict(item, dict_of_keylists)
biblio_dict["repository"] = "figshare"
try:
biblio_dict["year"] = int(biblio_dict["published_date"][-4:])
except (KeyError, TypeError):
pass
if "genre" in biblio_dict:
genre = biblio_dict["genre"].lower()
#override
if genre in ["figure", "poster"]:
genre = biblio_dict["genre"]
elif genre == "presentation":
genre = "slides"
elif genre == "paper":
genre = "article"
elif genre == "media":
genre = "video"
else:
genre = "dataset" #includes fileset
biblio_dict["genre"] = genre
if "authors_literal" in biblio_dict:
surname_list = [author["last_name"] for author in biblio_dict["authors_literal"]]
if surname_list:
biblio_dict["authors"] = ", ".join(surname_list)
del biblio_dict["authors_literal"]
return biblio_dict
def _extract_figshare_record(self, page, id):
data = provider._load_json(page)
if not data:
return {}
item = data["items"][0]
if str(item["article_id"]) in id:
return item
else:
return {}
def _extract_metrics(self, page, status_code=200, id=None):
if status_code != 200:
if status_code == 404:
return {}
else:
raise(self._get_error(status_code))
dict_of_keylists = {
'figshare:shares' : ['shares'],
'figshare:downloads' : ['downloads'],
'figshare:views' : ['views']
}
item = self._extract_figshare_record(page, id)
metrics_dict = provider._extract_from_data_dict(item, dict_of_keylists)
return metrics_dict
def _extract_members(self, page, query_string=None):
data = provider._load_json(page)
dois = [item["DOI"].replace("http://dx.doi.org/", "") for item in data["items"]]
doi_aliases = [("doi", doi) for doi in dois]
return(doi_aliases)
# default method; providers can override
def member_items(self,
query_string,
provider_url_template=None,
cache_enabled=True):
if not self.provides_members:
raise NotImplementedError()
self.logger.debug(u"%s getting member_items for %s" % (self.provider_name, query_string))
if not provider_url_template:
provider_url_template = self.member_items_url_template
figshare_userid = self.get_figshare_userid_from_author_url(query_string)
next_page = 1
members = []
while next_page:
url = provider_url_template % (figshare_userid, next_page)
# try to get a response from the data provider
response = self.http_get(url, cache_enabled=cache_enabled)
if response.status_code != 200:
self.logger.info(u"%s status_code=%i"
% (self.provider_name, response.status_code))
if response.status_code == 404:
raise ProviderItemNotFoundError
elif response.status_code == 303: #redirect
pass
else:
self._get_error(response.status_code, response)
# extract the member ids
number_of_items_per_page = 10 #figshare default
try:
page = response.text
data = provider._load_json(page)
if data["items_found"] > next_page*number_of_items_per_page:
next_page += 1
else:
next_page = None
members += self._extract_members(page, query_string)
except (AttributeError, TypeError):
next_page = None
return(members)
|
Python
| 0
|
@@ -2841,16 +2841,17 @@
+#
'authors
@@ -3805,24 +3805,87 @@
%0A%0A
+ # the authors data is messy, so just give up for now%0A #
if %22authors
@@ -3913,24 +3913,26 @@
ict:%0A
+ #
surname
@@ -4005,32 +4005,34 @@
teral%22%5D%5D%0A
+ #
if surname_
@@ -4036,32 +4036,34 @@
me_list:%0A
+ #
biblio_
@@ -4103,32 +4103,34 @@
me_list)%0A
+ #
del bib
|
60c355182f5e2d6a049f763031ffd15c57539a18
|
add views as a figshare metric
|
totalimpact/providers/figshare.py
|
totalimpact/providers/figshare.py
|
from totalimpact.providers import provider
from totalimpact.providers.provider import Provider, ProviderContentMalformedError
import simplejson
import logging
logger = logging.getLogger('ti.providers.figshare')
class Figshare(Provider):
example_id = ("doi", "10.6084/m9.figshare.92393")
url = "http://figshare.com"
descr = "Make all of your research outputs sharable, citable and visible in the browser for free."
biblio_url_template = "http://api.figshare.com/v1/articles/%s"
aliases_url_template = "http://api.figshare.com/v1/articles/%s"
metrics_url_template = "http://api.figshare.com/v1/articles/%s"
provenance_url_template = "http://dx.doi.org/%s"
static_meta_dict = {
"shares": {
"display_name": "shares",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been shared",
"icon": "http://figshare.com/static/img/favicon.png",
},
"downloads": {
"display_name": "downloads",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been downloaded",
"icon": "http://figshare.com/static/img/favicon.png",
}
}
def __init__(self):
super(Figshare, self).__init__()
def is_relevant_alias(self, alias):
(namespace, nid) = alias
is_figshare_doi = (namespace == "doi") and (".figshare." in nid.lower())
return is_figshare_doi
def _extract_item(self, page, id):
data = provider._load_json(page)
if not data:
return {}
item = data["items"][0]
if item["doi"] == self._get_templated_url(self.provenance_url_template, id, "provenance"):
return item
else:
return {}
def _extract_biblio(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'authors' : ['authors'],
'published_date' : ['published_date'],
'url' : ['doi']
}
item = self._extract_item(page, id)
biblio_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if "published_date" in biblio_dict:
biblio_dict["year"] = biblio_dict["published_date"][-4:]
del biblio_dict["published_date"]
if "authors" in biblio_dict:
biblio_dict["authors"] = ", ".join(author["last_name"] for author in biblio_dict["authors"])
return biblio_dict
def _extract_aliases(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'url' : ['doi']
}
item = self._extract_item(page, id)
aliases_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if aliases_dict:
aliases_list = [(namespace, nid) for (namespace, nid) in aliases_dict.iteritems()]
else:
aliases_list = []
return aliases_list
def _extract_metrics(self, page, status_code=200, id=None):
if status_code != 200:
if status_code == 404:
return {}
else:
raise(self._get_error(status_code))
dict_of_keylists = {
'figshare:shares' : ['shares'],
'figshare:downloads' : ['downloads']
}
item = self._extract_item(page, id)
metrics_dict = provider._extract_from_data_dict(item, dict_of_keylists)
return metrics_dict
|
Python
| 0
|
@@ -1285,32 +1285,332 @@
g/favicon.png%22,%0A
+ %7D,%0A %22views%22: %7B%0A %22display_name%22: %22views%22,%0A %22provider%22: %22figshare%22,%0A %22provider_url%22: %22http://figshare.com%22,%0A %22description%22: %22The number of times this item has been viewed%22,%0A %22icon%22: %22http://figshare.com/static/img/favicon.png%22,%0A
%7D%0A
@@ -3699,16 +3699,58 @@
nloads'%5D
+,%0A 'figshare:views' : %5B'views'%5D
%0A
|
a6e8fcc175dbd0c6220d0cf7c487cd02eef47d77
|
typo PEOPLE_DEFAULT_DIRECORY corrected
|
academic/people/models.py
|
academic/people/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from filebrowser.fields import FileBrowseField
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^filebrowser\.fields\.FileBrowseField"])
except:
pass
from django_countries.fields import CountryField
from datetime import date
from academic.settings import *
from academic.utils import *
from academic.organizations.models import *
class Rank(models.Model):
"""
The academic rank (e.g., udergraduate student, graduate student,
phd candidate, assistant professor)
"""
class Meta:
verbose_name = _('Rank')
verbose_name_plural = _('Ranks')
ordering = [
'order',]
name = models.CharField(
_('Rank name'),
help_text=_('E.g., Full Professor'),
max_length=64)
plural_name = models.CharField(
_('Rank plural name'),
help_text=_('E.g., Full Professors'),
max_length=64)
order = models.PositiveSmallIntegerField(
_('Rank order'),
help_text=_('Lower values mean higher importance.'
' I.e., put 0 for a "Full professor"'))
def __unicode__(self):
return self.name
class AlumniManager(models.Manager):
'''
People who graduated here and left.
'''
def get_query_set(self):
return super(AlumniManager, self).get_query_set().filter(
alumni=True,
public=True)
class VisitorManager(models.Manager):
'''
People who are visiting.
'''
def get_query_set(self):
return super(VisitorManager, self).get_query_set().filter(
current=True,
visitor=True,
public=True)
class PastVisitorManager(models.Manager):
'''
People who visited the lab in the past.
'''
def get_query_set(self):
return super(PastVisitorManager, self).get_query_set().filter(
visitor=True,
current=False,
public=True)
class PersonManager(models.Manager):
'''
Genuine people.
'''
def get_query_set(self):
return super(PersonManager, self).get_query_set().filter(
current=True,
visitor=False,
alumni=False,
public=True)
class Person(models.Model):
"""
A person in a research lab.
"""
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('People')
ordering = [
'rank',
'last_name',
'first_name', ]
objects_all = models.Manager()
objects_visitors = VisitorManager()
objects_alumni = AlumniManager()
objects_past_visitors = PastVisitorManager()
objects = PersonManager()
affiliation = models.ManyToManyField(
Organization,
verbose_name=_('Affiliations'),
blank=True,
null=True,
related_name='people')
public = models.BooleanField(
verbose_name=_('Public?'),
help_text=_('Toggle visibility on main pages.'),
default=True)
visitor = models.BooleanField(
verbose_name=_('Visitor'),
help_text=_('Is he/she a visitor?'),
default=False)
alumni = models.BooleanField(
verbose_name=_('Alumni'),
help_text=_('Did he/she graduate here?'),
default=False)
current = models.BooleanField(
verbose_name=_('Current'),
help_text=_('Is he/she still in the group?'),
default=True)
rank = models.ForeignKey(
Rank,
verbose_name=_('Academic Rank'),
help_text=_('Leave blank if this person is not in the group anymore.'),
related_name='people',
blank=True,
null=True)
first_name = models.CharField(
_('First Name'),
max_length=64)
mid_name = models.CharField(
blank=True,
null=True,
max_length=64)
last_name = models.CharField(
_('Last Name'),
max_length=64)
e_mail = models.EmailField(
_('E-mail'),
blank=True,
null=True)
web_page = models.URLField(
_('Web page'),
blank=True,
null=True)
description = models.TextField(
_('Short bio'),
blank=True,
null=True)
picture = FileBrowseField(
_('Profile picture'),
max_length=200,
directory=PEOPLE_DEFAULT_DIRECORY,
format='Image',
default=PEOPLE_DEFAULT_PICTURE,
blank=True,
null=True)
@models.permalink
def get_absolute_url(self):
return ('academic_people_person_detail', (), {'object_id': self.pk})
def _has_picture(self):
return self.picture != ''
has_picture = property(_has_picture)
def __unicode__(self):
return u'%s' % self.name
def _get_name(self):
r = '%s' % self.first_name
if self.mid_name:
r = '%s %s.' % (r, self.mid_name[0])
return '%s %s' % (r, self.last_name)
name = property(_get_name)
def _get_fullname(self):
r = '%s' % self.first_name
if self.mid_name:
r = '%s %s' % (r, self.mid_name)
return '%s %s' % (r, self.last_name)
fullname = property(_get_fullname)
def _get_sname(self):
r = '%s.' % self.first_name[0]
if self.mid_name:
r = '%s %s.' % (r, self.mid_name[0])
return '%s %s' % (r, self.last_name)
sname = property(_get_sname)
def _get_slug(self):
return (u'%s-%s' % (self.first_name[0], self.last_name)).lower()
slug = property(_get_slug)
|
Python
| 0.999742
|
@@ -4463,16 +4463,17 @@
LT_DIREC
+T
ORY,%0A
|
bf81484b7fd55e6383ae8e0f103e5e69ddea430e
|
Update utils.py
|
academictorrents/utils.py
|
academictorrents/utils.py
|
import hashlib
import os
import json
import datetime
import calendar
import time
def convert_bytes_to_decimal(headerBytes):
size = 0
power = len(headerBytes) - 1
for ch in headerBytes:
if isinstance(ch, int):
size += ch * 256 ** power
else:
size += int(ord(ch)) * 256 ** power
power -= 1
return size
def sha1_hash(string):
"""Return 20-byte sha1 hash of string."""
return hashlib.sha1(string).digest()
def get_timestamp_filename():
return clean_path("~/.academictorrents_timestamps.json")
def get_datastore(datastore="", path_to_config_file="~/.academictorrents.config"):
if datastore:
datastore = clean_path(datastore)
else:
datastore = json.loads(open(clean_path(path_to_config_file)).read()).get("datastore", os.getcwd() + "/datastore/")
if datastore[-1] != "/":
datastore = datastore + "/"
return datastore
def clean_path(path=None):
if path.startswith("~"):
return os.path.expanduser(path)
else:
return os.path.abspath(path)
def write_timestamp(at_hash):
filename = get_timestamp_filename()
try:
f = open(filename, 'r')
timestamps = json.load(f)
f.close()
except Exception:
timestamps = {}
timestamps[at_hash] = int(datetime.datetime.now().strftime("%s"))
f = open(filename, 'w')
json.dump(timestamps, f)
def read_timestamp(at_hash):
filename = get_timestamp_filename()
try:
f = open(filename, 'r')
timestamp = json.load(f).get(at_hash, 0)
f.close()
except Exception:
timestamp = 0
return timestamp
def timestamp_is_within_30_days(timestamp):
seconds_in_a_month = 86400 * 30
if timestamp > int(calendar.timegm(time.gmtime())) - seconds_in_a_month:
return True
return False
def timestamp_is_within_10_seconds(timestamp):
ten_seconds = 10
if timestamp > int(calendar.timegm(time.gmtime())) - ten_seconds:
return True
return False
def filenames_present(torrent):
return torrent.contents['info']['name'] in os.listdir(torrent.datastore)
|
Python
| 0.000001
|
@@ -1334,27 +1334,41 @@
ime.
-now().strftime(%22%25s%22
+timestamp(datetime.datetime.now()
))%0A
|
9ade3354c30833dbd9048f46e73531ad86caa86d
|
Ensure defined macro without a value is None
|
pkgconfig/pkgconfig.py
|
pkgconfig/pkgconfig.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""pkgconfig is a Python module to interface with the pkg-config command line
tool."""
import os
import shlex
import re
import collections
from functools import wraps
from subprocess import call, PIPE, Popen
def _compare_versions(v1, v2):
"""
Compare two version strings and return -1, 0 or 1 depending on the equality
of the subset of matching version numbers.
The implementation is taken from the top answer at
http://stackoverflow.com/a/1714190/997768.
"""
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
n1 = normalize(v1)
n2 = normalize(v2)
return (n1 > n2) - (n1 < n2)
def _split_version_specifier(spec):
"""Splits version specifiers in the form ">= 0.1.2" into ('0.1.2', '>=')"""
m = re.search(r'([<>=]?=?)?\s*((\d*\.)*\d*)', spec)
return m.group(2), m.group(1)
def _convert_error(func):
@wraps(func)
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError:
raise EnvironmentError("pkg-config is not installed")
return _wrapper
@_convert_error
def _query(package, option):
pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config'
cmd = '{0} {1} {2}'.format(pkg_config_exe, option, package)
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
return out.rstrip().decode('utf-8')
@_convert_error
def exists(package):
"""
Return True if package information is available.
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config'
cmd = '{0} --exists {1}'.format(pkg_config_exe, package).split()
return call(cmd) == 0
@_convert_error
def requires(package):
"""
Return a list of package names that is required by the package.
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
return _query(package, '--print-requires').split('\n')
def cflags(package):
"""
Return the CFLAGS string returned by pkg-config.
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
return _query(package, '--cflags')
def libs(package):
"""Return the LDFLAGS string returned by pkg-config."""
return _query(package, '--libs')
def variables(package):
"""Return a dictionary of all the variables defined in the .pc pkg-config
file of 'packae'"""
if not exists(package):
msg = ('package "{}" does not exist in PKG_CONFIG_PATH or\n'
'or something else went wrong').format(package)
raise ValueError(msg)
pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config'
# get the list of all the variables defined in the .pc file
cmd = '{0} {1} {2}'.format(
pkg_config_exe, '--print-variables', package)
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, _ = proc.communicate()
_variables = filter(lambda x: x != '', out.decode('utf-8').split('\n'))
# get the variable values
retval = dict()
for variable in _variables:
cmd = '{0} --variable={1} {2}'.format(
pkg_config_exe, variable, package)
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, _ = proc.communicate()
retval[variable] = out.decode('utf-8').strip()
return retval
def installed(package, version):
"""
Check if the package meets the required version.
The version specifier consists of an optional comparator (one of =, ==, >,
<, >=, <=) and an arbitrarily long version number separated by dots. The
should be as you would expect, e.g. for an installed version '0.1.2' of
package 'foo':
>>> installed('foo', '==0.1.2')
True
>>> installed('foo', '<0.1')
False
>>> installed('foo', '>= 0.0.4')
True
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
if not exists(package):
return False
number, comparator = _split_version_specifier(version)
modversion = _query(package, '--modversion')
try:
result = _compare_versions(modversion, number)
except ValueError:
msg = "{0} is not a correct version specifier".format(version)
raise ValueError(msg)
if comparator in ('', '=', '=='):
return result == 0
if comparator == '>':
return result > 0
if comparator == '>=':
return result >= 0
if comparator == '<':
return result < 0
if comparator == '<=':
return result <= 0
_PARSE_MAP = {
'-D': 'define_macros',
'-I': 'include_dirs',
'-L': 'library_dirs',
'-l': 'libraries'
}
def parse(packages):
"""
Parse the output from pkg-config about the passed package or packages.
Builds a dictionary containing the 'libraries', the 'library_dirs',
the 'include_dirs', and the 'define_macros' that are presented by
pkg-config. *package* is a string with space-delimited package names.
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
def parse_package(package):
result = collections.defaultdict(list)
# Execute the query to pkg-config and clean the result.
out = _query(package, '--cflags --libs')
out = out.replace('\\"', '')
# Iterate through each token in the output.
for token in re.split(r'(?<!\\) ', out):
key = _PARSE_MAP.get(token[:2])
if key:
result[key].append(token[2:].strip())
# Iterate and clean define macros.
macros = list()
for declaration in result['define_macros']:
macro = tuple(declaration.split('='))
if len(macro) == 1:
macro += '',
macros.append(macro)
result['define_macros'] = macros
# Return parsed configuration.
return result
# Return the result of parse_package directly.
# We don't need to loop over the packages
return parse_package(packages)
def list_all():
"""Return a list of all packages found by pkg-config."""
packages = [line.split()[0] for line in _query('', '--list-all').split('\n')]
return packages
|
Python
| 0.999891
|
@@ -6927,18 +6927,20 @@
acro +=
-''
+None
,%0A%0A
|
2b5ac57fd02e5e20f738f9060456542f69eeff95
|
Bump version to 4.0.0a12
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a11")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
Python
| 0
|
@@ -625,17 +625,17 @@
0, %220a1
-1
+2
%22)%0A__ver
|
4e3cb4354c49101f29d64e4e5c59e347f95d98c9
|
Fix way to create login_url in dashboard test
|
tempest/scenario/test_dashboard_basic_ops.py
|
tempest/scenario/test_dashboard_basic_ops.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import html_parser as HTMLParser
from six.moves.urllib import parse
from six.moves.urllib import request
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class HorizonHTMLParser(HTMLParser.HTMLParser):
csrf_token = None
region = None
login = None
def _find_name(self, attrs, name):
for attrpair in attrs:
if attrpair[0] == 'name' and attrpair[1] == name:
return True
return False
def _find_value(self, attrs):
for attrpair in attrs:
if attrpair[0] == 'value':
return attrpair[1]
return None
def _find_attr_value(self, attrs, attr_name):
for attrpair in attrs:
if attrpair[0] == attr_name:
return attrpair[1]
return None
def handle_starttag(self, tag, attrs):
if tag == 'input':
if self._find_name(attrs, 'csrfmiddlewaretoken'):
self.csrf_token = self._find_value(attrs)
if self._find_name(attrs, 'region'):
self.region = self._find_value(attrs)
if tag == 'form':
self.login = self._find_attr_value(attrs, 'action')
class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
@classmethod
def skip_checks(cls):
super(TestDashboardBasicOps, cls).skip_checks()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestDashboardBasicOps, cls).setup_credentials()
def check_login_page(self):
response = request.urlopen(CONF.dashboard.dashboard_url)
self.assertIn("id_username", response.read())
def user_login(self, username, password):
self.opener = request.build_opener(request.HTTPCookieProcessor())
response = self.opener.open(CONF.dashboard.dashboard_url).read()
# Grab the CSRF token and default region
parser = HorizonHTMLParser()
parser.feed(response)
# construct login url for dashboard, discovery accommodates non-/ web
# root for dashboard
login_url = CONF.dashboard.dashboard_url + parser.login[1:]
# Prepare login form request
req = request.Request(login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
params = {'username': username,
'password': password,
'region': parser.region,
'csrfmiddlewaretoken': parser.csrf_token}
self.opener.open(req, parse.urlencode(params))
def check_home_page(self):
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
@test.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
@test.services('dashboard')
def test_basic_scenario(self):
creds = self.os.credentials
self.check_login_page()
self.user_login(creds.username, creds.password)
self.check_home_page()
|
Python
| 0.000456
|
@@ -3025,16 +3025,30 @@
n_url =
+parse.urljoin(
CONF.das
@@ -3071,10 +3071,9 @@
_url
- +
+,
par
@@ -3085,12 +3085,9 @@
ogin
-%5B1:%5D
+)
%0A%0A
|
546d8fc8b41de424a76beb03c6530a7cf505a6a3
|
add orca EarthLocation
|
km3pipe/constants.py
|
km3pipe/constants.py
|
# coding=utf-8
# Filename: constants.py
# pylint: disable=C0103
# pragma: no cover
"""
The constants used in KM3Pipe.
"""
from __future__ import division, absolute_import, print_function
# TODO: this module should be refactored soon!
import math
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
# Detector related parameters
arca_frame_duration = 0.1 # s
orca_frame_duration = 0.1 # s
c = 2.99792458e8 # m/s
n_water_antares_phase = 1.3499
n_water_antares_group = 1.3797
n_water_km3net_group = 1.3787
n_water_antares = n_water_antares_group
theta_cherenkov_water_antares = math.acos(1 / n_water_antares_phase)
theta_cherenkov_water_km3net = math.acos(1 / n_water_km3net_group)
c_water_antares = c / n_water_antares_group
c_water_km3net = c / n_water_km3net_group
# Math
pi = math.pi
e = math.e
# Default values for time residuals
dt_window_l = -15 # ns
dt_window_h = +25 # ns
|
Python
| 0.000037
|
@@ -1047,16 +1047,102 @@
w_h = +25 # ns%0A
+%0Aorca_coords = (42 + (48/60), 6 + (2/60)) # (n, e) / degree%0Aorca_height = -2450 # m%0A
|
a325be315bab0f46862dfc0369de80d845887a7e
|
handle duplicates only when needed
|
km3pipe/io/pandas.py
|
km3pipe/io/pandas.py
|
# coding=utf-8
# Filename: pandas.py
# pylint: disable=C0103,R0903
# vim:set ts=4 sts=4 sw=4 et:
"""
Pandas Helpers.
"""
from __future__ import division, absolute_import, print_function
import os.path
from six import string_types
import numpy as np
import pandas as pd
import tables as tb
from km3pipe.logger import logging
from km3pipe.tools import insert_prefix_to_dtype
log = logging.getLogger(__name__) # pylint: disable=C0103
__author__ = "Moritz Lotze"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Moritz Lotze"
__email__ = "mlotze@km3net.de"
__status__ = "Development"
class H5Chain(object):
"""Read/write multiple HDF5 files as ``pandas.DataFrame``.
It is impliend that all files share the same group/tables
structure and naming.
Parameters
----------
filenames: list(str), or dict(fname -> h5file)
verbose: bool [default: False]
Examples
--------
>>> filenames = ['numu_cc.h5', 'anue_nc.h5']
>>> c = H5Chain(filenames)
>>> X = c['/reco/gandalf']
A context manager is also available:
>>> with H5Chain(filenames) as h5:
>>> reco = h5['/reco']
"""
def __init__(self, filenames, verbose=False):
self.filenames = filenames
self.verbose = verbose
def close(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __enter__(self):
return self
def __getitem__(self, key):
dfs = []
for fname in self.filenames:
if self.verbose:
print('opening ', fname)
with tb.File(fname, 'r') as h5:
try:
tab = h5.get_node(key)[:]
except KeyError as ke:
log.error('{} does not exist in {}!'.format(key, fname))
raise ke
except tb.exceptions.NodeError as ne:
log.error('{} does not exist in {}!'.format(key, fname))
raise ne
if self.verbose:
print(tab.shape)
df = pd.DataFrame(tab)
dfs.append(df)
dfs = pd.concat(dfs, axis=0, ignore_index=True)
return dfs
def __call__(self, key):
"""
Parameters
----------
key: str
H5 path of the object to retrieve, e.g. '/reco/gandalf'.
"""
return self[key]
def map2df(map):
return pd.DataFrame.from_records(map, index=np.ones(1, dtype=int))
def _read_table(tab, max_id=None, ignore_events=False):
if ignore_events:
return tab[:max_id]
else:
return tab.read_where('event_id <= %d' % max_id)
def read_group(group, max_id=None, **kwargs):
# Store through groupname, insert tablename into dtype
df = []
for tab in group._f_iter_nodes(classname='Table'):
tabname = tab.name
if max_id is None:
arr = tab.read(**kwargs)
else:
arr = _read_table(tab, max_id)
arr = insert_prefix_to_dtype(arr, tabname)
arr = pd.DataFrame.from_records(arr)
df.append(arr)
df = pd.concat(df, axis=1)
return df
def drop_duplicate_columns(df):
"""If a column name appears more than once, drop it."""
# _, i = np.unique(df.columns, return_index=True)
# return df.iloc[:, i]
return df.T.drop_duplicates().T
def merge_event_ids(df):
cols = list(df.columns)
cols = drop_duplicate_columns(df)
ids = list(c for c in cols if 'event_id' in c)
log.debug(ids)
if not ids:
return df
ids = list(set(ids))
log.debug(ids)
# non_id = list(c for c in cols if c not in ids)
event_ids = df[ids[0]]
log.debug(event_ids.shape)
if event_ids.shape[1] > 1:
event_ids = event_ids.ix[:, 0]
log.debug(event_ids.shape)
df.drop(ids, axis=1, inplace=True)
log.debug(event_ids.shape)
df['event_id'] = event_ids
return df
def df_to_h5(df, h5file, where, **kwargs):
"""Write pandas dataframes with proper columns.
Example:
>>> df = pd.DataFrame(...)
>>> df_to_h5(df, 'foo.h5', '/some/loc/my_df')
"""
write_table(df.to_records(index=False), h5file, where, **kwargs)
def write_table(array, h5file, where, force=False):
"""Write a structured numpy array into a H5 table.
"""
own_h5 = False
if isinstance(h5file, string_types):
own_h5 = True
h5file = tb.open_file(h5file, 'a')
filt = tb.Filters(complevel=5, shuffle=True, fletcher32=True)
loc, tabname = os.path.split(where)
if loc == '':
loc = '/'
try:
h5file.create_table(loc, tabname, obj=array, createparents=True,
filters=filt)
except tb.exceptions.NodeError:
h5file.get_node(where)[:] = array
if own_h5:
h5file.close()
def first_mc_tracks(mc_tracks, mupage=False):
mc_tracks = pd.DataFrame(mc_tracks)
if mupage:
mc_tracks = mc_tracks[mc_tracks.type != 0]
mc_tracks = mc_tracks[mc_tracks.id == 1]
return mc_tracks.drop_duplicates(subset='event_id')
|
Python
| 0.000001
|
@@ -3747,16 +3747,29 @@
ids%5B0%5D%5D%0A
+ try:%0A
log.
@@ -3795,16 +3795,20 @@
pe)%0A
+
+
if event
@@ -3826,16 +3826,20 @@
1%5D %3E 1:%0A
+
@@ -3869,24 +3869,28 @@
x%5B:, 0%5D%0A
+
+
log.debug(ev
@@ -3900,24 +3900,60 @@
_ids.shape)%0A
+ except IndexError:%0A pass%0A
df.drop(
|
ce47d219076dc2ff36c58db1d91ba349b9968d61
|
Update test_bandits.py
|
bandits/tests/test_bandits.py
|
bandits/tests/test_bandits.py
|
from sklearn.utils.testing import assert_equal
import numpy as np
import pytest
print("Hello tests!")
|
Python
| 0.000001
|
@@ -79,26 +79,128 @@
t%0A%0A%0A
-print(%22Hello tests!%22
+@pytest.mark.fast_test%0Adef dummy_test():%0A %22%22%22%0A Quick test to build with Circle CI.%0A %22%22%22%0A x = 2 + 2%0A assert_equal(x, 4
)%0A
|
abfc76636e27bd3454e6b0188502831f7f70162c
|
Fix nonthermal.out file names
|
plotartisnonthermal.py
|
plotartisnonthermal.py
|
#!/usr/bin/env python3
import argparse
# import math
# import os
import glob
import matplotlib.pyplot as plt
# import numpy as np
import pandas as pd
from astropy import constants as const
import readartisfiles as af
C = const.c.to('m/s').value
DEFAULTSPECPATH = '../example_run/spec.out'
def main():
"""
Plot the electron energy distribution
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Plot ARTIS radiation field.')
parser.add_argument('-path', action='store', default='./',
help='Path to nonthermalspec.out file')
parser.add_argument('-listtimesteps', action='store_true', default=False,
help='Show the times at each timestep')
parser.add_argument('-timestep', type=int, default=1,
help='Timestep number to plot')
parser.add_argument('-timestepmax', type=int, default=-1,
help='Make plots for all timesteps up to this timestep')
parser.add_argument('-modelgridindex', type=int, default=0,
help='Modelgridindex to plot')
parser.add_argument('-xmin', type=int, default=40,
help='Plot range: minimum energy in eV')
parser.add_argument('-xmax', type=int, default=10000,
help='Plot range: maximum energy in eV')
parser.add_argument('-o', action='store', dest='outputfile',
default='plotnonthermal_cell{0:03d}_timestep{1:03d}.pdf',
help='Filename for PDF file')
args = parser.parse_args()
if args.listtimesteps:
af.showtimesteptimes('spec.out')
else:
nonthermaldata = None
nonthermal_files = glob.glob('nonthermalspec_????.out', recursive=True) + \
glob.glob('nonthermalspec-????.out', recursive=True) + glob.glob('nonthermalspec.out', recursive=True)
for nonthermal_file in nonthermal_files:
print('Loading {:}...'.format(nonthermal_file))
nonthermaldata_thisfile = pd.read_csv(nonthermal_file, delim_whitespace=True)
nonthermaldata_thisfile.query('modelgridindex==@args.modelgridindex', inplace=True)
if len(nonthermaldata_thisfile) > 0:
if nonthermaldata is None:
nonthermaldata = nonthermaldata_thisfile.copy()
else:
nonthermaldata.append(nonthermaldata_thisfile, ignore_index=True)
if args.timestep < 0:
timestepmin = max(nonthermaldata['timestep'])
else:
timestepmin = args.timestep
if not args.timestepmax or args.timestepmax < 0:
timestepmax = timestepmin + 1
else:
timestepmax = args.timestepmax
list_timesteps = range(timestepmin, timestepmax)
for timestep in list_timesteps:
nonthermaldata_currenttimestep = nonthermaldata.query('timestep==@timestep')
if len(nonthermaldata_currenttimestep) > 0:
print('Plotting timestep {0:d}'.format(timestep))
outputfile = args.outputfile.format(args.modelgridindex, timestep)
make_plot(nonthermaldata_currenttimestep, timestep, outputfile, args)
else:
print('No data for timestep {0:d}'.format(timestep))
def make_plot(nonthermaldata, timestep, outputfile, args):
"""
Draw the bin edges, fitted field, and emergent spectrum
"""
fig, axis = plt.subplots(1, 1, sharex=True, figsize=(6, 4),
tight_layout={"pad": 0.2, "w_pad": 0.0, "h_pad": 0.0})
ymax = max(nonthermaldata['y'])
# nonthermaldata.plot(x='energy_ev', y='y', lw=1.5, ax=axis, color='blue', legend=False)
axis.plot(nonthermaldata['energy_ev'], nonthermaldata['y'], linewidth=1.5, color='blue')
axis.annotate('Timestep {0:d}\nCell {1:d}'.format(timestep, args.modelgridindex),
xy=(0.02, 0.96), xycoords='axes fraction',
horizontalalignment='left', verticalalignment='top', fontsize=8)
axis.set_xlabel(r'Energy (eV)')
axis.set_ylabel(r'y (e$^-$ / cm$^2$ / s / eV)')
axis.set_yscale("log", nonposy='clip')
# axis.set_xlim(xmin=args.xmin, xmax=args.xmax)
axis.set_ylim(ymin=0.0, ymax=ymax)
# axis.legend(loc='upper center', handlelength=2,
# frameon=False, numpoints=1, prop={'size': 13})
print('Saving to {0:s}'.format(outputfile))
fig.savefig(outputfile, format='pdf')
plt.close()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -1865,20 +1865,17 @@
malspec-
-????
+*
.out', r
|
0b7cbae1fcb68d6d090a083f103f4d83167dae71
|
add support for excluded paths
|
plogx/database.py
|
plogx/database.py
|
from pymongo.errors import DuplicateKeyError
from datetime import datetime, timedelta
def _aggregate_day_stats(db, log_day):
start_date = log_day
end_date = start_date + timedelta(days=1)
page_impressions = db.log_items.aggregate([
# match and filter all documents for the specified day
{"$match":
{"timestamp": {
"$gte": start_date,
"$lt": end_date }}},
{"$group": {
"_id": {
"ip_address": "$ip_address",
"path": "$path"},
"timestamp": {"$min": "$timestamp"}}},
{"$project": {
"_id": 0,
"path": "$_id.path",
"ip_address": "$_id.ip_address",
"timestamp": 1 }}])["result"]
# counts each unique ip address
num_visits = len(set([x["ip_address"] for x in page_impressions]))
path_stats_dict = {}
for page_impression in page_impressions:
path = page_impression["path"]
if path not in path_stats_dict:
path_stats_dict[path] = 1
else:
path_stats_dict[path] += 1
path_stats = [{"path": k, "num_visits": v} \
for k,v in path_stats_dict.items()]
path_stats = sorted(path_stats, key=lambda x: -x["num_visits"])
stats_document = {
"_id": start_date,
"num_visits": num_visits,
"num_page_impressions": len(page_impressions),
"path_stats": path_stats
}
return stats_document
def _aggregate_month_stats(db, log_month):
start_date = datetime(log_month.year, log_month.month, 1)
# determine year and month number for the next month
year = log_month.year + start_date.month / 12
month = start_date.month % 12 + 1
# end date is the first day of the next month
end_date = datetime(year, month, 1)
# determine the last day number of the current month
last_day = (end_date - timedelta(days = 1)).day
# aggregate day stats for each day of the month
for day in range(1, last_day + 1):
current_day = datetime(log_month.year, log_month.month, day)
get_stats_per_day(db, current_day)
day_stats = db.stats_per_day.aggregate([
{"$match":
{"_id": {
"$gte": start_date,
"$lt": end_date }}},
{"$project": {
"_id": 0,
"day": "$_id",
"num_page_impressions": "$num_page_impressions",
"num_visits": "$num_visits"}}
])["result"]
stats_document = {
"_id": start_date,
"day_stats": day_stats,
"num_page_impressions": 0,
"num_visits": 0
}
for day in day_stats:
stats_document["num_page_impressions"] += day["num_page_impressions"]
stats_document["num_visits"] += day["num_visits"]
return stats_document
def get_stats_per_day(db, log_day):
"""
Generates a stats document, which includes the number of page impressions,
the total number of visits and a list of path/ip-address combinations
for one specific day. This document gets saved in log_db.stats_per_day,
unless log_day is doday.
@return stats document [dict]
"""
log_day = datetime.combine(log_day.date(), datetime.min.time())
stats_document = db.stats_per_day.find_one({"_id": log_day})
if not stats_document or datetime.now().date() == log_day.date():
# If no document is saved for the specified day, genereate one.
# if the specific day is today, the document will always be generated.
stats_document = _aggregate_day_stats(db, log_day)
try:
db.stats_per_day.insert(stats_document)
except DuplicateKeyError:
date = stats_document["_id"]
del(stats_document["_id"])
db.stats_per_day.update({"_id": date}, {"$set": stats_document})
return stats_document
def get_stats_per_month(db, log_month):
stats_document = db.stats_per_month.find_one({"_id": log_month})
if not stats_document:
stats_document = _aggregate_month_stats(db, log_month)
if datetime.now().strftime("%Y%m") != log_month.strftime("%Y%m"):
db.stats_per_month.insert(stats_document)
return stats_document
def get_raw_logs_per_day(db,log_day):
"""
@return: a cursor containing all log items for the specified day
[pymongo.cursor.Cursor]
"""
start_date = datetime.combine(log_day.date(), datetime.min.time())
end_date = start_date + timedelta(days=1)
return db.log_items.find({"timestamp": {"$gte": start_date, "$lt": end_date}})
|
Python
| 0
|
@@ -190,16 +190,192 @@
(days=1)
+%0A excluded_paths = %5B%0A %22/assets/css/style.min.css%22,%0A %22/assets/css/pygments.css%22,%0A %22/assets/js/jquery.min.js%22,%0A %22/assets/js/bootstrap.min.js%22%0A %5D
%0A%0A pa
@@ -585,33 +585,95 @@
%22$lt%22: end_date
-
+%7D,%0A %22path%22: %7B%0A %22$nin%22: excluded_paths
%7D%7D%7D,%0A %7B%22$
@@ -4838,8 +4838,9 @@
_date%7D%7D)
+%0A
|
19b9451a1a18c3b9a5a115ab209c8137f794a937
|
Use update statement to marke Data objects as purged
|
resolwe/flow/utils/purge.py
|
resolwe/flow/utils/purge.py
|
""".. Ignore pydocstyle D400.
==========
Data Purge
==========
"""
import logging
import os
import shutil
from django.conf import settings
from resolwe.flow.models import Data
from resolwe.flow.utils import iterate_fields
from resolwe.utils import BraceMessage as __
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_purge_files(root, output, output_schema, descriptor, descriptor_schema):
"""Get files to purge."""
def remove_file(fn, paths):
"""From paths remove fn and dirs before fn in dir tree."""
while fn:
for i in range(len(paths) - 1, -1, -1):
if fn == paths[i]:
paths.pop(i)
fn, _ = os.path.split(fn)
def remove_tree(fn, paths):
"""From paths remove fn and dirs before or after fn in dir tree."""
for i in range(len(paths) - 1, -1, -1):
head = paths[i]
while head:
if fn == head:
paths.pop(i)
break
head, _ = os.path.split(head)
remove_file(fn, paths)
def subfiles(root):
"""Extend unreferenced list with all subdirs and files in top dir."""
subs = []
for path, dirs, files in os.walk(root, topdown=False):
path = path[len(root) + 1:]
subs.extend(os.path.join(path, f) for f in files)
subs.extend(os.path.join(path, d) for d in dirs)
return subs
unreferenced_files = subfiles(root)
remove_file('jsonout.txt', unreferenced_files)
remove_file('stderr.txt', unreferenced_files)
remove_file('stdout.txt', unreferenced_files)
meta_fields = [
[output, output_schema],
[descriptor, descriptor_schema]
]
for meta_field, meta_field_schema in meta_fields:
for field_schema, fields in iterate_fields(meta_field, meta_field_schema):
if 'type' in field_schema:
field_type = field_schema['type']
field_name = field_schema['name']
# Remove basic:file: entries
if field_type.startswith('basic:file:'):
remove_file(fields[field_name]['file'], unreferenced_files)
# Remove list:basic:file: entries
elif field_type.startswith('list:basic:file:'):
for field in fields[field_name]:
remove_file(field['file'], unreferenced_files)
# Remove basic:dir: entries
elif field_type.startswith('basic:dir:'):
remove_tree(fields[field_name]['dir'], unreferenced_files)
# Remove list:basic:dir: entries
elif field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
remove_tree(field['dir'], unreferenced_files)
# Remove refs entries
if field_type.startswith('basic:file:') or field_type.startswith('basic:dir:'):
for ref in fields[field_name].get('refs', []):
remove_tree(ref, unreferenced_files)
elif field_type.startswith('list:basic:file:') or field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
for ref in field.get('refs', []):
remove_tree(ref, unreferenced_files)
return set([os.path.join(root, filename) for filename in unreferenced_files])
def data_purge(data_ids=None, delete=False, verbosity=0):
"""Print files not referenced from meta data.
If data_ids not given, run on all data objects.
If delete is True, delete unreferenced files.
"""
data_path = settings.FLOW_EXECUTOR['DATA_DIR']
runtime_path = settings.FLOW_EXECUTOR['RUNTIME_DIR']
unreferenced_files = set()
data_qs = Data.objects.filter(
status__in=[Data.STATUS_DONE, Data.STATUS_ERROR], purged=False
)
if data_ids is not None:
data_qs = data_qs.filter(pk__in=data_ids)
for data in data_qs:
root = os.path.join(data_path, str(data.id))
unreferenced_files.update(get_purge_files(
root,
data.output,
data.process.output_schema,
data.descriptor,
getattr(data.descriptor_schema, 'schema', [])
))
# Remove any folders, which do not belong to any data objects.
if data_ids is None:
for base_path in (data_path, runtime_path):
for directory in os.listdir(base_path):
directory_path = os.path.join(base_path, directory)
if not os.path.isdir(directory_path):
continue
try:
data_id = int(directory)
except ValueError:
continue
# Check if a data object with the given identifier exists.
if not Data.objects.filter(pk=data_id).exists():
unreferenced_files.add(directory_path)
if verbosity >= 1:
# Print unreferenced files
if unreferenced_files:
logger.info(__("Unreferenced files ({}):", len(unreferenced_files)))
for name in unreferenced_files:
logger.info(__(" {}", name))
else:
logger.info("No unreferenced files")
# Go through unreferenced files and delete them.
if delete:
for name in unreferenced_files:
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
for data in data_qs:
data.purged = True
data.save(update_fields=['purged'])
|
Python
| 0
|
@@ -5660,96 +5660,83 @@
-for data in data_qs:%0A data.purged = True%0A data.save(update_fields=%5B'
+# NOTE: This doesn't trigger Django's signals!%0A data_qs.update(
purged
-'%5D
+=True
)%0A
|
cbb5290e42f738025fb11f4745a35bda71968f1f
|
Add support for Lovelace dashboards (#342)
|
pychromecast/controllers/homeassistant.py
|
pychromecast/controllers/homeassistant.py
|
"""
Controller to interface with Home Assistant
"""
from ..config import APP_HOME_ASSISTANT
from . import BaseController
APP_NAMESPACE = "urn:x-cast:com.nabucasa.hast"
class HomeAssistantController(BaseController):
""" Controller to interact with Home Assistant. """
def __init__(
self,
hass_url,
client_id,
refresh_token,
app_namespace=APP_NAMESPACE,
app_id=APP_HOME_ASSISTANT,
):
super().__init__(app_namespace, app_id)
self.hass_url = hass_url
self.client_id = client_id
self.refresh_token = refresh_token
# {
# connected: boolean;
# showDemo: boolean;
# hassUrl?: string;
# lovelacePath?: string | number | null;
# }
self.status = None
self._on_connect = []
@property
def hass_connected(self):
"""Return if connected to Home Assistant."""
return (
self.status is not None
and self.status["connected"]
and self.status["hassUrl"] == self.hass_url
)
def channel_connected(self):
""" Called when a channel has been openend that supports the
namespace of this controller. """
self.get_status()
def channel_disconnected(self):
""" Called when a channel is disconnected. """
self.status = None
def receive_message(self, message, data):
"""Called when a message is received."""
if data.get("type") == "receiver_status":
was_connected = self.hass_connected
self.status = data
if was_connected or not self.hass_connected:
return True
# We just got connected, call the callbacks.
while self._on_connect:
self._on_connect.pop()()
return True
return False
def connect_hass(self, callback_function=None):
"""Connect to Home Assistant."""
self._on_connect.append(callback_function)
self.send_message(
{
"type": "connect",
"refreshToken": self.refresh_token,
"clientId": self.client_id,
"hassUrl": self.hass_url,
}
)
def show_demo(self):
"""Show the demo."""
self.send_message({"type": "show_demo"})
def get_status(self, callback_function=None):
"""Get status of Home Assistant Cast."""
self.send_connected_message(
{"type": "get_status"}, callback_function=callback_function
)
def show_lovelace_view(self, view_path, callback_function=None):
"""Show a Lovelace UI."""
self.send_connected_message(
{"type": "show_lovelace_view", "viewPath": view_path},
callback_function=callback_function,
)
def send_connected_message(self, data, callback_function=None):
"""Send a message to a connected Home Assistant Cast"""
if self.hass_connected:
self.send_message_nocheck(data, callback_function=callback_function)
return
self.connect_hass(
lambda: self.send_message_nocheck(data, callback_function=callback_function)
)
|
Python
| 0
|
@@ -2619,16 +2619,31 @@
ew_path,
+ url_path=None,
callbac
@@ -2795,16 +2795,37 @@
iew_path
+, %22urlPath%22: url_path
%7D,%0A
|
91b1aa0270951e02c861889ef66eb1c9c7e3874a
|
fix caching
|
base_changeset/models/base.py
|
base_changeset/models/base.py
|
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo import _, api, fields, models
from odoo.tools import config
class Base(models.AbstractModel):
_inherit = "base"
changeset_ids = fields.One2many(
comodel_name="record.changeset",
compute="_compute_changeset_ids",
string="Changesets",
)
changeset_change_ids = fields.One2many(
comodel_name="record.changeset.change",
compute="_compute_changeset_ids",
string="Changeset Changes",
)
count_pending_changesets = fields.Integer(
compute="_compute_count_pending_changesets"
)
count_pending_changeset_changes = fields.Integer(
compute="_compute_count_pending_changesets"
)
user_can_see_changeset = fields.Boolean(compute="_compute_user_can_see_changeset")
def _compute_changeset_ids(self):
model_name = self._name
for record in self:
changesets = self.env["record.changeset"].search(
[("model", "=", model_name), ("res_id", "=", record.id)]
)
record.changeset_ids = changesets
record.changeset_change_ids = changesets.mapped("change_ids")
def _compute_count_pending_changesets(self):
model_name = self._name
if model_name in self.models_to_track_changeset():
for rec in self:
changesets = rec.changeset_ids.filtered(
lambda rev: rev.state == "draft"
and rev.res_id == rec.id
and rev.model == model_name
)
changes = changesets.mapped("change_ids")
changes = changes.filtered(
lambda c: c.state in c.get_pending_changes_states()
)
rec.count_pending_changesets = len(changesets)
rec.count_pending_changeset_changes = len(changes)
else:
for rec in self:
rec.count_pending_changesets = 0.0
rec.count_pending_changeset_changes = 0.0
@api.model
def models_to_track_changeset(self):
"""Models to be tracked for changes
:args:
:returns: list of models
"""
models = self.env["changeset.field.rule"].search([]).mapped("model_id.model")
if config["test_enable"] and self.env.context.get("test_record_changeset"):
if "res.partner" not in models:
models += ["res.partner"] # Used in tests
return models
def write(self, values):
if self.env.context.get("__no_changeset"):
return super().write(values)
# To avoid conflicts with tests of other modules
if config["test_enable"] and not self.env.context.get("test_record_changeset"):
return super().write(values)
if self._name not in self.models_to_track_changeset():
return super().write(values)
for record in self:
local_values = self.env["record.changeset"].add_changeset(record, values)
super(Base, record).write(local_values)
return True
def action_record_changeset_change_view(self):
self.ensure_one()
res = {
"type": "ir.actions.act_window",
"res_model": "record.changeset.change",
"view_mode": "tree",
"views": [
[
self.env.ref("base_changeset.view_record_changeset_change_tree").id,
"list",
]
],
"context": self.env.context,
"name": _("Record Changes"),
"search_view_id": [
self.env.ref("base_changeset.view_record_changeset_change_search").id,
"search",
],
}
record_id = self.env.context.get("search_default_record_id")
if record_id:
res.update(
{
"domain": [
("model", "=", self._name),
("changeset_id.res_id", "=", record_id),
]
}
)
return res
@api.model
def _fields_view_get(
self, view_id=None, view_type="form", toolbar=False, submenu=False
):
res = super()._fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu
)
to_track_changeset = self._name in self.models_to_track_changeset()
can_see = len(self) == 1 and self.user_can_see_changeset
button_label = _("Changes")
if to_track_changeset and can_see and view_type == "form":
doc = etree.XML(res["arch"])
for node in doc.xpath("//div[@name='button_box']"):
xml_field = etree.Element(
"field",
{
"name": "count_pending_changeset_changes",
"string": button_label,
"widget": "statinfo",
},
)
xml_button = etree.Element(
"button",
{
"type": "object",
"name": "action_record_changeset_change_view",
"icon": "fa-code-fork",
"context": "{'search_default_draft': 1, "
"'search_default_record_id': active_id}",
},
)
xml_button.insert(0, xml_field)
node.insert(0, xml_button)
res["arch"] = etree.tostring(doc, encoding="unicode")
return res
def _compute_user_can_see_changeset(self):
is_superuser = self.env.is_superuser()
has_changeset_group = self.user_has_groups(
"base_changeset.group_changeset_user"
)
for rec in self:
rec.user_can_see_changeset = is_superuser or has_changeset_group
|
Python
| 0
|
@@ -205,16 +205,26 @@
t config
+, ormcache
%0A%0A%0Aclass
@@ -2151,32 +2151,57 @@
%0A @api.model%0A
+ @ormcache(skiparg=1)%0A
def models_t
|
1e247dace112ce6def2bedf2f3ab864835ed7e06
|
enforce that source.yaml files have to specify a version attribute
|
src/rosdistro/source_file.py
|
src/rosdistro/source_file.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .repository import Repository
class SourceFile(object):
_type = 'source'
def __init__(self, name, data):
self.name = name
assert 'type' in data, "Expected file type is '%s'" % SourceFile._type
assert data['type'] == SourceFile._type, "Expected file type is '%s', not '%s'" % (SourceFile._type, data['type'])
assert 'version' in data, "Source file for '%s' lacks required version information" % self.name
assert int(data['version']) == 1, "Unable to handle '%s' format version '%d', please update rosdistro" % (SourceFile._type, int(data['version']))
self.version = int(data['version'])
self.repositories = {}
if 'repositories' in data:
for repo_name in data['repositories']:
repo_data = data['repositories'][repo_name]
try:
repo = Repository(repo_name, repo_data)
except AssertionError as e:
e.args = [("Source file '%s': %s" % (self.name, a) if i == 0 else a) for i, a in enumerate(e.args)]
raise e
self.repositories[repo_name] = repo
def get_data(self):
data = {}
data['type'] = SourceFile._type
data['version'] = self.version
data['repositories'] = {}
for repo_name in sorted(self.repositories):
repo = self.repositories[repo_name]
data['repositories'][repo_name] = repo.get_data()
return data
|
Python
| 0.000001
|
@@ -2491,16 +2491,132 @@
try:%0A
+ assert 'version' in repo_data, %22Repository '%25s' lacks required version information%22 %25 repo_name%0A
|
ab390fb8a445730a792c717d68d3e5b2d45b05a3
|
use UTC year for insert
|
scripts/ingestors/flux_ingest.py
|
scripts/ingestors/flux_ingest.py
|
"""
Ingest files provided by NLAE containing flux information
"""
import pg
import mx.DateTime
import traceback
other = pg.DB('other', 'iemdb')
# Figure out max valid times
maxts = {}
rs = other.query("""SELECT station, max(valid) from flux%s
GROUP by station""" % (mx.DateTime.now().year - 1, ) ).dictresult()
for i in range(len(rs)):
maxts[ rs[i]['station'] ] = mx.DateTime.strptime(rs[i]['max'][:16],
'%Y-%m-%d %H:%M')
rs = other.query("""SELECT station, max(valid) from flux%s
GROUP by station""" % (mx.DateTime.now().year, ) ).dictresult()
for i in range(len(rs)):
maxts[ rs[i]['station'] ] = mx.DateTime.strptime(rs[i]['max'][:16],
'%Y-%m-%d %H:%M')
DIR = "/mnt/home/mesonet/ot/ot0005/incoming/Fluxdata/"
fp = {'Flux10_AF.dat': 'nstl10',
'Anc10_AF.dat': 'nstl10',
'Flux11_AF.dat': 'nstl11',
'Anc11_AF.dat': 'nstl11',
'30ft.dat': 'nstl30ft',
'NSP_Flux.dat': 'nstlnsp'}
dbcols = [
'fc_wpl',
'le_wpl',
'hs',
'tau',
'u_star',
'cov_uz_uz',
'cov_uz_ux',
'cov_uz_uy',
'cov_uz_co2',
'cov_uz_h2o',
'cov_uz_ts',
'cov_ux_ux',
'cov_ux_uy',
'cov_ux_co2',
'cov_ux_h2o',
'cov_ux_ts',
'cov_uy_uy',
'cov_uy_co2',
'cov_uy_h2o',
'cov_uy_ts',
'cov_co2_co2',
'cov_h2o_h2o',
'cov_ts_ts',
'ux_avg',
'uy_avg',
'uz_avg',
'co2_avg',
'h2o_avg',
'ts_avg',
'rho_a_avg',
'press_avg',
'panel_temp_avg',
'wnd_dir_compass',
'wnd_dir_csat3',
'wnd_spd',
'rslt_wnd_spd',
'batt_volt_avg',
'std_wnd_dir',
'fc_irga',
'le_irga',
'co2_wpl_le',
'co2_wpl_h',
'h2o_wpl_le',
'h2o_wpl_h',
'h2o_hmp_avg',
't_hmp_avg',
'par_avg',
'solrad_avg',
'rain_tot',
'shf1_avg',
'shf2_avg',
'soiltc1_avg',
'soiltc2_avg',
'soiltc3_avg',
'soiltc4_avg',
'irt_can_avg',
'irt_cb_avg',
'incoming_sw',
'outgoing_sw',
'incoming_lw_tcor',
'terrest_lw_tcor',
'rn_short_avg',
'rn_long_avg',
'rn_total_avg',
'rh_hmp_avg',
'temps_c1_avg',
'corrtemp_avg',
'rn_total_tcor_avg',
'incoming_lw_avg',
'terrestrial_lw_avg',
'wfv1_avg']
convert = {
'Incoming_SW_Avg': 'incoming_sw',
'Outgoing_SW_Avg': 'outgoing_sw',
'Incoming_LW_TCor_Avg': 'incoming_lw_tcor',
'Terrest_LW_TCor_Avg': 'terrest_lw_tcor',
}
def c(v):
if (v == "NAN" or v == "-INF" or v == "INF"):
return None
return v
data = {'nstl10': {},
'nstl11': {},
'nstl30ft': {},
'nstlnsp': {},
}
for fn in fp.keys():
station = fp[fn]
lines = open("%s%s" % (DIR, fn), 'r').readlines()
if len(lines) < 2:
print 'flux_ingest.py file: %s has %s lines?' % (fn, len(lines))
continue
keys = lines[1].replace('"','').replace("\r\n", '').split(",")
for linenum, obline in enumerate(lines[3:]):
tokens = obline.replace('"', '').split(",")
if len(tokens) != len(keys):
print '%s line: %s has %s tokens, header has %s' % (fn, linenum,
len(tokens), len(keys))
continue
if tokens[0] == '':
continue
try:
ts = mx.DateTime.strptime(tokens[0][:16], '%Y-%m-%d %H:%M')
except:
print '%s line: %s has invalid time %s' %(fn, linenum, tokens[0])
continue
if ts < maxts.get(station, mx.DateTime.DateTime(2011,1,1)):
continue
if not data[station].has_key(ts):
data[station][ts] = {'valid': tokens[0][:16], 'station': station }
for i in range(len(tokens)):
key = convert.get(keys[i], keys[i]).lower()
if key in ['record','timestamp']:
continue
if key not in dbcols:
#print 'Missing', key
continue
data[station][ts][key] = c( tokens[i] )
cnt = 0
for station in data.keys():
for ts in data[station].keys():
try:
other.insert("flux%s" % (ts.year,) , data[station][ts])
cnt += 1
except:
print station, ts, data[station][ts].keys()
print traceback.print_exc()
if cnt == 0:
print "NLAE flux inget found no records"
|
Python
| 0
|
@@ -3889,24 +3889,50 @@
on%5D.keys():%0A
+ gts = ts.gmtime()%0A
try:
@@ -3969,16 +3969,17 @@
x%25s%22 %25 (
+g
ts.year,
|
462197a9982c944fcf6b786f94ee8012e5e0865f
|
improve verbosity.
|
build_package.py
|
build_package.py
|
#!/usr/bin/python
"""Script for building source and binary debain packages."""
from __future__ import print_function
from argparse import ArgumentParser
from collections import namedtuple
import os
import shutil
import subprocess
import sys
SourceFile = namedtuple('SourceFile', ['sha256', 'size', 'name', 'path'])
CREATE_LXC_TEMPLATE = """\
set -eu
sudo lxc-create -t download -n {container} -- -d ubuntu -r {series} -a {arch}
sudo mkdir /var/lib/lxc/{container}/rootfs/workspace
echo "lxc.mount.entry = {build_dir} workspace none bind 0 0" |
sudo tee -a /var/lib/lxc/{container}/config
"""
BUILD_DEB_TEMPLATE = """\
set -eu
sudo lxc-attach -n {container} -- <<EOT
cd workspace
apt-get install build-essential
dpkg-source -x juju-core_*.dsc
mk-build-deps -i juju-core_*.dsc
dpkg-buildpackage -us -uc
EOT
"""
def parse_dsc(dsc_path, verbose=False):
"""Return the source files need to build a binary package."""
there = os.path.dirname(dsc_path)
dsc_name = os.path.basename(dsc_path)
dcs_source_file = SourceFile(None, None, dsc_name, dsc_path)
files = [dcs_source_file]
with open(dsc_path) as f:
content = f.read()
found = False
for line in content.splitlines():
if found and line.startswith(' '):
data = line.split()
data.append(os.path.join(there, data[2]))
files.append(SourceFile(*data))
if verbose:
print("Found %s" % files[-1].name)
elif found:
# All files were found.
break
if not found and line.startswith('Checksums-Sha256:'):
found = True
return files
def setup_local(location, series, arch, source_files, verbose=False):
"""Create a directory to build binaries in.
The directoy has the source files required to build binaries.
"""
build_dir = os.path.abspath(
os.path.join(location, 'juju-build-{}-{}'.format(series, arch)))
if verbose:
print('Creating %s' % build_dir)
os.makedirs(build_dir)
for sf in source_files:
dest_path = os.path.join(build_dir, sf.name)
if verbose:
print('Copying %s to %s' % (sf.name, build_dir))
shutil.copyfile(sf.path, dest_path)
return build_dir
def setup_lxc(series, arch, build_dir, verbose=False):
"""Create an LXC container to build binaries.
The local build_dir with the source files is bound to the container.
"""
container = '{}-{}'.format(series, arch)
lxc_script = CREATE_LXC_TEMPLATE.format(
container=container, series=series, arch=arch, build_dir=build_dir)
entry_cmd = ['bash', '-c', lxc_script]
output = subprocess.check_output(entry_cmd)
if verbose:
print(output)
return container
def build_in_lxc(container, verbose=False):
"""Build the binaries from the source files in the container."""
returncode = 1
subprocess.check_call(['sudo', 'lxc-start', '-d', '-n', container])
try:
build_script = BUILD_DEB_TEMPLATE.format(container=container)
proc = subprocess.Popen(['bash', '-c', build_script])
proc.communicate()
returncode = proc.returncode
finally:
subprocess.check_call(['sudo', 'lxc-stop', '-n', container])
return returncode
def teardown_lxc(container, verbose=False):
"""Destroy the lxc container."""
subprocess.check_call(['sudo', 'lxc-destroy', '-n', container])
def move_debs(build_dir, location, verbose=False):
"""Move the debs from the build_dir to the location dir."""
found = False
files = [f for f in os.listdir(build_dir) if f.endswith('.deb')]
for file_name in files:
file_path = os.path.join(build_dir, file_name)
dest_path = os.path.join(location, file_name)
if verbose:
print("Found %s" % file_name)
shutil.move(file_path, dest_path)
found = True
return found
def build_binary(dsc_path, location, series, arch, verbose=False):
"""Build binary debs from a dsc file."""
# If location is remote, setup remote location and run.
source_files = parse_dsc(dsc_path, verbose=verbose)
build_dir = setup_local(
location, series, arch, source_files, verbose=verbose)
container = setup_lxc(series, arch, build_dir, verbose=verbose)
try:
build_in_lxc(container, verbose=verbose)
finally:
teardown_lxc(container, verbose=False)
move_debs(build_dir, location, verbose=verbose)
return 0
def main(argv):
"""Execute the commands from the command line."""
exitcode = 0
args = get_args(argv)
if args.command == 'binary':
exitcode = build_binary(
args.dsc, args.location, args.series, args.arch,
verbose=args.verbose)
return exitcode
def get_args(argv=None):
"""Return the arguments for this program."""
parser = ArgumentParser("Build debian packages.")
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
help="Increase the verbosity of the output")
subparsers = parser.add_subparsers(help='sub-command help', dest="command")
bin_parser = subparsers.add_parser('binary', help='Build a binary package')
bin_parser.add_argument("dsc", help="The dsc file to build")
bin_parser.add_argument("location", help="The location to build in.")
bin_parser.add_argument("series", help="The series to build in.")
bin_parser.add_argument("arch", help="The dpkg architure to build in.")
args = parser.parse_args(argv[1:])
return args
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Python
| 0.000002
|
@@ -3124,24 +3124,35 @@
pt%5D)%0A
+ out, err =
proc.commun
@@ -3159,16 +3159,16 @@
icate()%0A
-
@@ -3196,16 +3196,144 @@
urncode%0A
+ if verbose:%0A print(out)%0A if err:%0A print(%22FROM STDERR:%22)%0A print(err)%0A
fina
@@ -3470,24 +3470,24 @@
ose=False):%0A
-
%22%22%22Destr
@@ -3507,24 +3507,99 @@
ntainer.%22%22%22%0A
+ if verbose:%0A print('Deleting the lxc container %25s' %25 container)%0A
subproce
|
d1c7eb6bce52796af4ea757371a485b69348e189
|
Update execute_program_in_vm.py
|
samples/execute_program_in_vm.py
|
samples/execute_program_in_vm.py
|
"""
Written by Timo Sugliani
Github: https://github.com/tsugliani/
Code based on upload_file_to_vm snippet by Reubenur Rahman
Github: https://github.com/rreubenur/
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example:
python execute_program_in_vm.py
-s <vcenter_fqdn>
-u <vcenter_username>
-p <vcenter_password>
-v <vm_uuid>
-r <vm_username>
-w <vm_password>
-l "/bin/cat"
-f "/etc/network/interfaces > /tmp/plop"
This should work on any debian/ubuntu type of vm, and will basically copy
the content of the network configuration to /tmp/plop
"""
from __future__ import with_statement
import atexit
from tools import cli
from pyVim import connect
from pyVmomi import vim, vmodl
def get_args():
"""Get command line args from the user.
"""
parser = cli.build_arg_parser()
parser.add_argument('-v', '--vm_uuid',
required=False,
action='store',
help='Virtual machine uuid')
parser.add_argument('-r', '--vm_user',
required=False,
action='store',
help='virtual machine user name')
parser.add_argument('-w', '--vm_pwd',
required=False,
action='store',
help='virtual machine password')
parser.add_argument('-l', '--path_to_program',
required=False,
action='store',
help='Path inside VM to the program')
parser.add_argument('-f', '--program_arguments',
required=False,
action='store',
help='Program command line options')
args = parser.parse_args()
cli.prompt_for_password(args)
return args
def main():
"""
Simple command-line program for executing a process in the VM without the
network requirement to actually access it.
"""
args = get_args()
try:
if args.disable_ssl_verification:
service_instance = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
else:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
# if instanceUuid is false it will search for VM BIOS UUID instead
vm = content.searchIndex.FindByUuid(datacenter=None,
uuid=args.vm_uuid,
vmSearch=True,
instanceUuid=False)
if not vm:
raise SystemExit("Unable to locate the virtual machine.")
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
raise SystemExit(
"VMwareTools is either not running or not installed. "
"Rerun the script after verifying that VMwareTools "
"is running")
creds = vim.vm.guest.NamePasswordAuthentication(
username=args.vm_user, password=args.vm_pwd
)
try:
pm = content.guestOperationsManager.processManager
ps = vim.vm.guest.ProcessManager.ProgramSpec(
programPath=args.path_to_program,
arguments=args.program_arguments
)
res = pm.StartProgramInGuest(vm, creds, ps)
if res > 0:
print "Program submitted, PID is %d" % res
pid_exitcode = pm.ListProcessesInGuest(vm, creds,
[res]).pop().exitCode
# If its not a numeric result code, it says None on submit
while (re.match('[^0-9]+', str(pid_exitcode))):
print "Program running, PID is %d" % res
time.sleep(5)
pid_exitcode = pm.ListProcessesInGuest(vm, creds,
[res]).pop().\
exitCode
if (pid_exitcode == 0):
print "Program %d completed with success" % res
break
# Look for non-zero code to fail
elif (re.match('[1-9]+', str(pid_exitcode))):
print "ERROR: Program %d completed with Failute" % res
print " tip: Try running this on guest %r to debug" \
% summary.guest.ipAddress
print "ERROR: More info on process"
print pm.ListProcessesInGuest(vm, creds, [res])
break
except IOError, e:
print e
except vmodl.MethodFault as error:
print "Caught vmodl fault : " + error.msg
return -1
return 0
# Start program
if __name__ == "__main__":
main()
|
Python
| 0.000004
|
@@ -5296,16 +5296,21 @@
break
+%0A
|
92a269d95006f991aa65456d413776a6d6d0a93c
|
remove unused import
|
pyramid_oauth2_provider/authentication.py
|
pyramid_oauth2_provider/authentication.py
|
#
# Copyright (c) Elliot Peele <elliot@bentlogic.net>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
import base64
import logging
from zope.interface import implementer
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authentication import CallbackAuthenticationPolicy
from pyramid.httpexceptions import HTTPBadRequest
from .models import Oauth2Token
from .models import DBSession as db
from .errors import InvalidToken
from .util import getClientCredentials
log = logging.getLogger('pyramid_oauth2_provider.authentication')
@implementer(IAuthenticationPolicy)
class OauthAuthenticationPolicy(CallbackAuthenticationPolicy):
def _isOauth(self, request):
return bool(getClientCredentials(request))
def unauthenticated_userid(self, request):
token_type, token = getClientCredentials(request)
if token_type != 'bearer':
return None
auth_token = db.query(Oauth2Token).filter_by(access_token=token).first()
if not auth_token:
raise HTTPBadRequest(InvalidToken())
return auth_token.user_id
def remember(self, request, principal, **kw):
"""
I don't think there is anything to do for an oauth request here.
"""
def forget(self, request):
"""
You could revoke the access token on a call to forget.
"""
@implementer(IAuthenticationPolicy)
class OauthTktAuthenticationPolicy(OauthAuthenticationPolicy,
AuthTktAuthenticationPolicy):
def __init__(self, *args, **kwargs):
OauthAuthenticationPolicy.__init__(self)
AuthTktAuthenticationPolicy.__init__(self, *args, **kwargs)
def unauthenticated_userid(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.unauthenticated_userid(
self, request)
else:
return AuthTktAuthenticationPolicy.unauthenticated_userid(
self, request)
def remember(self, request, principal, **kw):
if self._isOauth(request):
return OauthAuthenticationPolicy.remember(
self, request, principal, **kw)
else:
return AuthTktAuthenticationPolicy.remember(
self, request, principal, **kw)
def forget(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.forget(
self, request)
else:
return AuthTktAuthenticationPolicy.forget(
self, request)
|
Python
| 0.000001
|
@@ -494,22 +494,8 @@
%0A#%0A%0A
-import base64%0A
impo
|
f49d68db26dcde95ba80b6a0ff72f013f41306d9
|
Improve tests to try raw lines
|
pyromancer/test/test_command_decorator.py
|
pyromancer/test/test_command_decorator.py
|
import re
import pytest
from pyromancer.decorators import command
from pyromancer.exceptions import CommandException
from pyromancer.objects import Match, Line
from pyromancer.test.decorators import mock_connection
from pyromancer.test.mock_objects import MockObject
MESSAGES = [
('Hello world', 'Hello world'),
(('Hello {}', 'world'), 'Hello world'),
(('Hello {} and {}', 'world', 'Mars'), 'Hello world and Mars'),
(('Hello {} and {}', ['world', 'Mars']), 'Hello world and Mars'),
(('Hello {sphere}', {'sphere': 'world'}), 'Hello world'),
(('Hello {} and {red_one}', 'world', {'red_one': 'Mars'}),
'Hello world and Mars'),
(('Hello {}, {} and {red_one}', 'world', 'moon', {'red_one': 'Mars'}),
'Hello world, moon and Mars'),
(('Hello {}, {} and {red_one}', ['world', 'moon'], {'red_one': 'Mars'}),
'Hello world, moon and Mars'),
(('Hello {}', ['world', 'moon']), "Hello ['world', 'moon']"),
]
@mock_connection
def test_command_messaging_return_tuple(c):
line = Line(':John!JDoe@some.host PRIVMSG #Chan :Some cool message')
instance = command(r'')
match = Match(None, line, c)
for msg, expected in MESSAGES:
instance.send_messages(msg, match)
assert c.last == 'PRIVMSG #Chan :{}'.format(expected)
@mock_connection
def test_command_messaging_return_list(c):
line = Line(':John!JDoe@some.host PRIVMSG #Chan :Some cool message')
instance = command(r'')
match = Match(None, line, c)
instance.send_messages([msg for msg, expected in MESSAGES], match)
for index, (msg, expected) in enumerate(MESSAGES):
assert c.outbox[index] == 'PRIVMSG #Chan :{}'.format(expected)
@mock_connection
def test_command_messaging_yielding(c):
def mock_command():
for msg, expected in MESSAGES:
yield msg
line = Line(':John!JDoe@some.host PRIVMSG #Chan :Some cool message')
instance = command(r'')
match = Match(None, line, c)
instance.send_messages(mock_command(), match)
for index, (msg, expected) in enumerate(MESSAGES):
assert c.outbox[index] == 'PRIVMSG #Chan :{}'.format(expected)
def test_command_matches_patterns():
line = Line(':John!JDoe@some.host PRIVMSG #Chan :Some cool message')
settings = MockObject(command_prefix='!')
instance = command(r'^Some', prefix=False)
assert bool(instance.matches(line, settings)) is True
instance = command(r'message$', prefix=False)
assert bool(instance.matches(line, settings)) is True
instance = command(r'^Some cool message$', prefix=False)
assert bool(instance.matches(line, settings)) is True
instance = command(r'mESsagE', prefix=False, flags=re.IGNORECASE)
assert bool(instance.matches(line, settings)) is True
instance = command(r'Some')
assert bool(instance.matches(line, settings)) is False
settings = MockObject(command_prefix='S')
instance = command(r'^ome')
assert bool(instance.matches(line, settings)) is True
instance = command(r'cool')
assert bool(instance.matches(line, settings)) is True
def test_command_matches_code():
with pytest.raises(CommandException):
command()
with pytest.raises(CommandException):
command(code='Foo')
settings = MockObject(command_prefix='!')
instance = command(code=376)
line = Line(':irc.example.net 376 A :End of MOTD command')
assert line.code == 376
assert bool(instance.matches(line, settings)) is True
line = Line(':irc.example.net 375 A :- irc.example.net message of the day')
assert line.code == 375
assert bool(instance.matches(line, settings)) is False
|
Python
| 0
|
@@ -3075,16 +3075,307 @@
s True%0A%0A
+ line = Line(':irc.example.net 376 A :End of MOTD command')%0A%0A instance = command(r'example', prefix=False)%0A assert bool(instance.matches(line, settings)) is False%0A%0A instance = command(r'example', raw=True, prefix=False)%0A assert bool(instance.matches(line, settings)) is True%0A%0A
%0Adef tes
|
2652919c8d2e6fad8f7b3d47f5e82528b4b5214e
|
Write the last point for plot completeness
|
plots/monotone.py
|
plots/monotone.py
|
# MONOTONE
# Produce a monotonically decreasing output plot from noisy data
# Input: columns: t x
# Output: columns: t_i x_i , sampled such that x_i <= x_j
# for j > i.
from string import *
import sys
# Set PYTHONPATH=$PWD
from plottools import *
if len(sys.argv) != 3:
abort("usage: <input file> <output file>")
input_file = sys.argv[1]
output_file = sys.argv[2]
val_loss_min = sys.float_info.max
with open(input_file, "r") as fp_i, \
open(output_file, "w") as fp_o:
for line in fp_i:
(t, val_loss_string) = split(line)
val_loss = float(val_loss_string)
if val_loss < val_loss_min:
val_loss_min = val_loss
fp_o.write("%s, %f\n" % (t, val_loss_min))
|
Python
| 0.999273
|
@@ -686,16 +686,162 @@
al_loss%0A
+ fp_o.write(%22%25s, %25f%5Cn%22 %25 (t, val_loss_min))%0A # Ensure the last data point is written for the plot:%0A if val_loss %3E= val_loss_min:%0A
|
820a958497383574c845c6a6a84e3a7b9e33196a
|
Handle error case better
|
butter/splice.py
|
butter/splice.py
|
#!/usr/bin/env python
"""splice: wrapper around the splice() syscall"""
#from __future__ import print_function
from select import select as _select
from collections import namedtuple
from cffi import FFI as _FFI
import errno as _errno
_ffi = _FFI()
_ffi.cdef("""
#define SPLICE_F_MOVE ... /* This is a noop in modern kernels and is left here for compatibility */
#define SPLICE_F_NONBLOCK ... /* Make splice operations Non blocking (as long as the fd's are non blocking) */
#define SPLICE_F_MORE ... /* After splice() more data will be sent, this is a hint to add TCP_CORK like buffering */
#define SPLICE_F_GIFT ... /* unused for splice() (vmsplice compatibility) */
ssize_t splice(int fd_in, signed long long *off_in, int fd_out, signed long long *off_out, size_t len, unsigned int flags);
""")
_C = _ffi.verify("""
#include <fcntl.h>
""", libraries=[])
def splice(fd_in, fd_out, in_offset=0, out_offset=0, len=0, flags=0):
"""Take data from fd_in and pass it to fd_out without going through userspace
Arguments:
-----------
Flags:
-------
SPLICE_F_MOVE: This is a noop in modern kernels and is left here for compatibility
SPLICE_F_NONBLOCK: Make splice operations Non blocking (as long as the fd's are non blocking)
SPLICE_F_MORE: After splice() more data will be sent, this is a hint to add TCP_CORK like buffering
SPLICE_F_GIFT: unused for splice() (vmsplice compatibility)
Returns:
---------
int: Number of bytes written
Exceptions:
------------
EBADF One or both file descriptors are not valid, or do not have proper read-write mode.
EINVAL Target filesystem doesn't support splicing; target file is opened in append mode; neither of the descriptors refers to a pipe; or offset given for
nonseekable device.
ENOMEM Out of memory.
ESPIPE Either off_in or off_out was not NULL, but the corresponding file descriptor refers to a pipe.
"""
fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
in_offset = _ffi.cast("long long *", in_offset)
out_offset = _ffi.cast("long long *", out_offset)
size = _C.splice(fd_in, in_offset, fd_out, out_offset, len, flags)
if size < 0:
err = _ffi.errno
if err == _errno.EINVAL:
if in_offset or out_offset:
raise ValueError("fds may not be seekable")
else:
raise ValueError("Target filesystem does not support slicing or file may be in append mode")
elif err == _errno.EBADF:
raise OSError("fds are invalid or incorrect mode for file")
elif err == _errno.ESPIPE:
raise OSError("offset specified but one of the fds is a pipe")
elif err == _errno.ENOMEM:
raise MemoryError("Insufficent kernel memory avalible")
elif err == _errno.EAGAIN:
raise IOError("No writers on fd_in or a fd is open in BLOCKING mode and NON_BLOCK specified to splice()")
else:
# If you are here, its a bug. send us the traceback
raise ValueError("Unknown Error: {}".format(err))
return size
SPLICE_F_MOVE = _C.SPLICE_F_MOVE
SPLICE_F_NONBLOCK = _C.SPLICE_F_NONBLOCK
SPLICE_F_MORE = _C.SPLICE_F_MORE
SPLICE_F_GIFT = _C.SPLICE_F_GIFT
def main():
import sys
import os
val = 'thisisatest'
fd1_ingress, fd1_egress = os.pipe()
fd2_ingress, fd2_egress = os.pipe()
print('writing')
os.write(fd1_egress, val)
print('splicing')
# Make sure we are splicing() more than the buffer of data we have chosen as a
# sentinal value to ensure that nothing blocks unexpectly (hard coded magic values
# lead to chaos)
splice(fd1_ingress, fd2_egress, flags=SPLICE_F_NONBLOCK, len=len(val)*2)
print('reading')
buf = os.read(fd2_ingress, len(val)+5) # this works as pipes can give a short read
print('verifing ("{}" == "{}")'.format(buf, val))
assert buf == val, 'value transformed through pipe transistion'
print('all ok')
def socket_main():
from select import select
import socket
import os
in_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
out_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
out_pipe, in_pipe = os.pipe()
in_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
in_sock.bind(('::', 8090))
in_sock.listen(1)
conn, addr = in_sock.accept()
print('Accepted connection from {}'.format(addr))
print('Connecting to (::1, 8091)')
out_sock.connect(('::1', 8091))
max_segment = 2**24
bytes = True
try:
while bytes != 0:
rd, wr, err = select([conn], [], [conn])
if err:
print('Connection error')
break
print('Splicing')
bytes = splice(conn, in_pipe, len=max_segment, flags=SPLICE_F_MOVE)
print("Read {} Bytes".format(bytes))
bytes = splice(out_pipe, out_sock, len=max_segment)
out_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
print("Wrote {} Bytes".format(bytes))
except KeyboardInterrupt:
pass
print("Exiting")
if __name__ == "__main__":
socket_main()
#main()
|
Python
| 0.000002
|
@@ -4712,18 +4712,12 @@
ile
-bytes != 0
+True
:%0A
@@ -4851,16 +4851,33 @@
break%0A
+ %0A
@@ -4894,24 +4894,24 @@
'Splicing')%0A
-
@@ -4978,16 +4978,82 @@
F_MOVE)%0A
+ if bytes == 0:%0A break%0A %0A
|
58ee8882fdbdef01f36859f0ed40afc346518690
|
Add test for double backward
|
tests/chainer_tests/functions_tests/array_tests/test_flip.py
|
tests/chainer_tests/functions_tests/array_tests/test_flip.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestFlip(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
self.g = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
def check_forward(self, x_data, axis):
x = chainer.Variable(x_data)
y = functions.flip(x, axis)
testing.assert_allclose(y.data, numpy.flip(x_data, axis))
def test_forward_cpu(self):
self.check_forward(self.x, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), self.axis)
def check_backward(self, x_data, axis, y_grad):
gradient_check.check_backward(lambda x: functions.flip(x, axis),
x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.axis, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.g))
@testing.parameterize(
{'axis': 3},
{'axis': -4},
)
class TestFlipInvalidTypeAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.flip(x, self.axis)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestFlipInvalidTypeError(unittest.TestCase):
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.Flip('a')
testing.run_module(__name__, __file__)
|
Python
| 0.000007
|
@@ -777,33 +777,34 @@
.random.uniform(
-0
+-1
, 1, self.shape)
@@ -837,16 +837,17 @@
self.g
+y
= numpy
@@ -862,17 +862,18 @@
uniform(
-0
+-1
, 1, sel
@@ -899,16 +899,284 @@
f.dtype)
+%0A self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)%0A%0A self.check_double_backward_options = %7B'atol': 1e-3, 'rtol': 1e-2%7D%0A if self.dtype == numpy.float16:%0A self.check_double_backward_options.update(dtype=numpy.float64)
%0A%0A de
@@ -1825,16 +1825,17 @@
, self.g
+y
)%0A%0A @
@@ -1905,16 +1905,16 @@
ckward(%0A
-
@@ -1967,16 +1967,665 @@
u(self.g
+y))%0A%0A def check_double_backward(self, x_data, axis, y_grad, x_grad_grad):%0A def f(x):%0A x = functions.flip(x, axis)%0A return x * x%0A%0A gradient_check.check_double_backward(%0A f, x_data, y_grad, x_grad_grad,%0A **self.check_double_backward_options)%0A%0A def test_double_backward_cpu(self):%0A self.check_double_backward(self.x, self.axis, self.gy, self.ggx)%0A%0A @attr.gpu%0A def test_double_backward_gpu(self):%0A self.check_double_backward(cuda.to_gpu(self.x), self.axis,%0A cuda.to_gpu(self.gy),%0A cuda.to_gpu(self.ggx
))%0A%0A%0A@te
|
6482c485982fe5039574eab797b46d5f1b93bacc
|
Refactor populate script
|
finance/management/commands/populate.py
|
finance/management/commands/populate.py
|
import random
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import IntegrityError
import factory
from accounts.factories import UserFactory
from books.factories import TransactionFactory
class Command(BaseCommand):
help = "Popoulates databse with dummy data"
def handle(self, *args, **options):
# Factory creates simple user, so ``is_staff`` is set later
try:
admin = UserFactory(username='admin', password='asdasd')
admin.is_staff = True
admin.save()
print("admin user have been created successfully")
except IntegrityError:
admin = User.objects.get(username='admin')
print("admin user already exists")
TransactionFactory.create_batch(
10,
price=factory.Sequence(lambda n: random.randrange(0, 10)),
user=admin,
)
print("Transactions for admin created")
|
Python
| 0
|
@@ -107,45 +107,8 @@
mand
-%0Afrom django.db import IntegrityError
%0A%0Aim
@@ -330,16 +330,288 @@
tions):%0A
+ if not User.objects.filter(username='admin'):%0A self.create_admin()%0A else:%0A self.admin = User.objects.get(username='admin')%0A print(%22admin user already exists%22)%0A%0A self.create_transactions()%0A%0A def create_admin(self):%0A
@@ -682,25 +682,13 @@
-try:%0A
+self.
admi
@@ -744,28 +744,29 @@
d')%0A
-
-
+self.
admin.is_sta
@@ -779,28 +779,29 @@
rue%0A
-
+self.
admin.save()
@@ -801,36 +801,32 @@
.save()%0A
-
-
print(%22admin use
@@ -864,141 +864,43 @@
y%22)%0A
+%0A
- except IntegrityError:%0A admin = User.objects.get(username='admin')%0A print(%22admin user already exists%22)%0A
+def create_transactions(self):
%0A
@@ -969,13 +969,14 @@
-price
+amount
=fac
@@ -1041,13 +1041,91 @@
-user=
+category=random.randrange(0, 2), # random range from 0 to 1%0A user=self.
admi
|
c745f6abe699251a877245ec19ae7b417fd97718
|
Fix bug in kolibri arg passing
|
kolibri/utils/cli.py
|
kolibri/utils/cli.py
|
from __future__ import absolute_import, print_function, unicode_literals
import importlib
import logging
import os
import signal
import sys
import kolibri
from docopt import docopt
from logging import config as logging_config
USAGE = """
Kolibri
Supported by Foundation for Learning Equality
www.learningequality.org
Usage:
kolibri start [--foreground --watch] [options] [-- DJANGO_OPTIONS ...]
kolibri stop [options] [-- DJANGO_OPTIONS ...]
kolibri restart [options] [-- DJANGO_OPTIONS ...]
kolibri status [options]
kolibri shell [options] [-- DJANGO_OPTIONS ...]
kolibri manage [options] COMMAND [-- DJANGO_OPTIONS ...]
kolibri diagnose [options]
kolibri plugin PLUGIN (enable | disable)
kolibri plugin --list
kolibri -h | --help
kolibri --version
Options:
-h --help Show this screen.
--version Show version.
COMMAND The name of any available django manage command. For
help, type `kolibri manage help`
--debug Output debug messages (for development)
--port=<arg> Use a non-default port on which to start the HTTP server
or to query an existing server (stop/status)
DJANGO_OPTIONS All options are passed on to the django manage command.
Notice that all django options must appear *last* and
should not be mixed with other options. Only long-name
options ('--long-name') are supported.
Examples:
kolibri start Start Kolibri
kolibri stop Stop Kolibri
kolibri status How is Kolibri doing?
kolibri url Tell me the address of Kolibri
kolibri shell Display a Django shell
kolibri manage help Show the Django management usage dialogue
kolibri manage runserver Runs Django's development server
kolibri diagnose Show system information for debugging
Environment:
DJANGO_SETTINGS_MODULE
- The Django settings module to load. Useful if you are deploying Kolibri
in a specific setup such as your own web server.
- Default: "kolibri.deployment.default.settings.base"
KOLIBRI_HOME
- Where Kolibri will store its data and configuration files. If you are using
an external drive
"""
__doc__ = """
Kolibri Command Line Interface (CLI)
====================================
Auto-generated usage instructions from ``kolibri -h``::
{usage:s}
""".format(usage="\n".join(map(lambda x: " " + x, USAGE.split("\n"))))
# Set default env
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base")
os.environ.setdefault("KOLIBRI_HOME", os.path.join(os.path.expanduser("~"), ".kolibri"))
os.environ.setdefault("KOLIBRI_LISTEN_PORT", "8008")
logger = logging.getLogger(__name__)
def setup_logging():
"""Configures logging in cases where a Django environment is not supposed
to be configured"""
from kolibri.deployment.default.settings.base import LOGGING
logging_config.dictConfig(LOGGING)
def manage(cmd, args=[]):
"""
Invokes a django command
:param: cmd: The command to invoke, for instance "runserver"
:param: args: arguments for the command
"""
# Set sys.argv to correctly reflect the way we invoke kolibri as a Python
# module
sys.argv = ["-m", "kolibri"] + sys.argv[1:]
from django.core.management import execute_from_command_line
argv = ['kolibri manage', cmd] + args
execute_from_command_line(argv=argv)
def plugin(plugin_name, args):
"""
Receives a plugin identifier and tries to load its main class. Calls class
functions.
"""
setup_logging()
from kolibri.utils import conf
plugin_classes = []
from kolibri.plugins.base import KolibriPluginBase # NOQA
# Try to load kolibri_plugin from given plugin module identifier
try:
plugin_module = importlib.import_module(plugin_name + ".kolibri_plugin")
for obj in plugin_module.__dict__.values():
if type(obj) == type and obj is not KolibriPluginBase and issubclass(obj, KolibriPluginBase):
plugin_classes.append(obj)
except ImportError:
raise RuntimeError("Plugin does not exist")
if args['enable']:
for klass in plugin_classes:
klass.enable()
if args['disable']:
for klass in plugin_classes:
klass.disable()
conf.save()
def main(args=None):
"""
Kolibri's main function. Parses arguments and calls utility functions.
Utility functions should be callable for unit testing purposes, but remember
to use main() for integration tests in order to test the argument API.
"""
if not args:
args = sys.argv[1:]
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Split out the parts of the argument list that we pass on to Django
# and don't feed to docopt.
if '--' in args:
pivot = args.index('--')
args, django_args = args[:pivot], args[pivot:]
else:
django_args = []
docopt_kwargs = dict(
version=str(kolibri.__version__),
options_first=False,
)
if args:
docopt_kwargs['argv'] = args
arguments = docopt(USAGE, **docopt_kwargs)
if arguments['manage']:
command = arguments['COMMAND']
manage(command, args=django_args)
return
if arguments['plugin']:
plugin_name = arguments['PLUGIN']
plugin(plugin_name, arguments)
return
logger.info(arguments)
|
Python
| 0.999965
|
@@ -5035,16 +5035,18 @@
gs%5Bpivot
++1
:%5D%0A e
|
2c8a1b121c7cc709ae24bb38605aab4c81dee899
|
Increase job timeout to 2h
|
src/scripts/schedule_jobs.py
|
src/scripts/schedule_jobs.py
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script schedules all the jobs to be dispatched to AppEngine.
"""
import os
import sys
from functools import partial
from argparse import ArgumentParser
from google.cloud import scheduler_v1
from google.cloud.scheduler_v1.types import Job, AppEngineHttpTarget
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# pylint: disable=wrong-import-position
from lib.pipeline_tools import get_pipelines
def clear_jobs(
client: scheduler_v1.CloudSchedulerClient, project_id: str, location_id: str
) -> None:
""" Delete all scheduled jobs """
parent = client.location_path(project_id, location_id)
for job in client.list_jobs(parent):
client.delete_job(job.name)
def schedule_job(
client: scheduler_v1.CloudSchedulerClient,
project_id: str,
location_id: str,
timezone: str,
schedule: str,
path: str,
) -> None:
""" Schedules the given job for the specified project and location """
# Create a Job to schedule
target = AppEngineHttpTarget(relative_uri=path, http_method="GET")
job = Job(app_engine_http_target=target, schedule=schedule, time_zone=timezone)
# Schedule the Job we just created
parent = client.location_path(project_id, location_id)
client.create_job(parent, job)
def schedule_all_jobs(project_id: str, location_id: str, timezone: str) -> None:
"""
Clears all previously scheduled jobs and schedules all necessary jobs for the current
configuration.
"""
client = scheduler_v1.CloudSchedulerClient()
# Create a custom method with our parameters for ease of use
_schedule_job = partial(
schedule_job,
client=client,
project_id=project_id,
location_id=location_id,
timezone=timezone,
)
# Clear all pre-existing jobs
clear_jobs(client=client, project_id=project_id, location_id=location_id)
# Cache pull job runs hourly
_schedule_job(schedule="0 * * * *", path="/cache_pull")
# The job that publishes data into the prod bucket runs every 4 hours
_schedule_job(
path="/publish",
# Offset by 30 minutes to let other hourly tasks finish
schedule="30 */4 * * *",
)
# Converting the outputs to JSON is less critical but also slow so it's run separately
_schedule_job(
path="/convert_json_1",
# Offset by 30 minutes to run after publishing
schedule="0 1-23/4 * * *",
)
# The convert to JSON task is split in two because otherwise it takes too long
_schedule_job(
path="/convert_json_2",
# Offset by 30 minutes to run after publishing
schedule="0 1-23/4 * * *",
)
# Get new errors once a day at midday.
_schedule_job(path="/report_errors_to_github", schedule="0 12 * * *")
# Keep track of the different job groups to only output them once
job_urls_seen = set()
for data_pipeline in get_pipelines():
# The job that combines data sources into a table runs hourly
_schedule_job(
path=f"/combine_table?table={data_pipeline.table}",
# Offset by 15 minutes to let other hourly tasks finish
schedule="15 * * * *",
)
for idx, data_source in enumerate(data_pipeline.data_sources):
# The job to pull each individual data source runs hourly unless specified otherwise
job_sched = data_source.config.get("automation", {}).get("schedule", "0 * * * *")
# Each data source has a job group. All data sources within the same job group are run
# as part of the same job in series. The default job group is the index of the data
# source.
job_group = data_source.config.get("automation", {}).get("job_group", idx)
job_url = f"/update_table?table={data_pipeline.table}&job_group={job_group}"
if job_url not in job_urls_seen:
job_urls_seen.add(job_url)
_schedule_job(path=job_url, schedule=job_sched)
if __name__ == "__main__":
# Get default values from environment
default_project = os.environ.get("GCP_PROJECT")
default_location = os.environ.get("GCP_LOCATION", "us-east1")
default_timezone = os.environ.get("GCP_TIMEZONE", "EST")
# Parse arguments from the command line
argparser = ArgumentParser()
argparser.add_argument("--project-id", type=str, default=default_project)
argparser.add_argument("--location-id", type=str, default=default_location)
argparser.add_argument("--timezone", type=str, default=default_timezone)
args = argparser.parse_args()
# Ensure project ID is not empty, since we don't have a default value for it
assert args.project_id is not None, 'Argument "project-id" must not be empty'
# Clear all preexisting jobs and schedule the new ones, this assumes the current code has
# already been successfully deployed to GAE in a previous build step
schedule_all_jobs(args.project_id, args.location_id, args.timezone)
|
Python
| 0.000053
|
@@ -1677,16 +1677,25 @@
b = Job(
+%0A
app_engi
@@ -1716,16 +1716,24 @@
=target,
+%0A
schedul
@@ -1743,16 +1743,24 @@
chedule,
+%0A
time_zo
@@ -1770,16 +1770,55 @@
timezone
+,%0A attempt_deadline=%22120m%22,%0A
)%0A%0A #
|
87405b65ca4f6848a3e7ec0a63369658d09cd0d5
|
print debug messages to stderr, not stdout
|
fasttsne/__init__.py
|
fasttsne/__init__.py
|
import scipy.linalg as la
import numpy as np
import time
from fasttsne import _TSNE as TSNE
def timed_reducer(f):
def f2(data, d, mode, **kwargs):
t = time.time()
print "Reducing to %dd using %s..." % (d, f.__name__)
if mode == 1:
from sklearn.preprocessing import Normalizer
data = Normalizer().fit_transform(data)
X = f(data, d, mode, **kwargs)
print "%s -> %s. Took %.1fs" % (data.shape, X.shape, time.time() - t)
return X
return f2
@timed_reducer
def sparse_encode(data, d, mode, alpha=500):
import sklearn.decomposition as deco
print "finding dict..."
code, dictionary, errors = deco.dict_learning(data[:1000], d, alpha, verbose=True)
print code, dictionary, errors
return deco.sparse_encode(data, dictionary)
@timed_reducer
def pca_reduce(data, pca_d, mode, algorithm='RandomizedPCA'):
import sklearn.decomposition as deco
alg = getattr(deco, algorithm)
print "pca..."
pca = alg(n_components=pca_d)
X = pca.fit_transform(data)
return X
@timed_reducer
def whitened_pca_reduce(data, pca_d, mode):
import sklearn.decomposition as deco
print "pca..."
pca = deco.RandomizedPCA(pca_d, whiten=True)
X = pca.fit_transform(data)
return X
def fast_tsne(data, pca_d=None, d=2, perplexity=30., theta=0.5, mode=0, normalise=0,
whiten=0):
"""
Run Barnes-Hut T-SNE on _data_.
@param data The data.
@param pca_d The dimensionality of data is reduced via PCA
to this dimensionality.
@param d The embedding dimensionality. Must be fixed to
2.
@param perplexity The perplexity controls the effective number of
neighbors.
@param theta Degree of BH optimisation (0-1; higher -> faster, worse).
@param mode 0: Euclidean; 1: normalised Euclidean.
@param normalise Normalise mean around zero.
@param whiten Whiten when doing PCA.
"""
# inplace!!
if normalise:
print "normalising..."
data = data - data.mean(axis=0)
if not pca_d or pca_d > data.shape[1]:
X = data
elif whiten:
X = whitened_pca_reduce(data, pca_d, mode)
del data
else:
X = pca_reduce(data, pca_d, mode)
del data
N, vlen = X.shape
print X.shape
tsne = TSNE()
Y = tsne.run(X, N, vlen, d, perplexity, theta, mode)
return Y
|
Python
| 0.998474
|
@@ -49,16 +49,27 @@
ort time
+%0Aimport sys
%0A%0Afrom f
@@ -194,16 +194,31 @@
print
+ %3E%3E sys.stderr,
%22Reduci
@@ -441,16 +441,31 @@
print
+ %3E%3E sys.stderr,
%22%25s -%3E
@@ -2124,16 +2124,21 @@
ormalise
+_mean
:%0A
@@ -2144,16 +2144,31 @@
print
+ %3E%3E sys.stderr,
%22normal
@@ -2464,16 +2464,31 @@
print
+ %3E%3E sys.stderr,
X.shape
|
17037f53d3b3a54456892a986e1a199d381b5074
|
Use absolute_import in markdown.py, to fix import problem.
|
pokedex/db/markdown.py
|
pokedex/db/markdown.py
|
# encoding: utf8
u"""Implements the markup used for description and effect text in the database.
The language used is a variation of Markdown and Markdown Extra. There are
docs for each at http://daringfireball.net/projects/markdown/ and
http://michelf.com/projects/php-markdown/extra/ respectively.
Pokédex links are represented with the extended syntax `[name]{type}`, e.g.,
`[Eevee]{pokemon}`. The actual code that parses these is in spline-pokedex.
"""
import markdown
import sqlalchemy.types
class MarkdownString(object):
"""Wraps a Markdown string. Stringifies to the original text, but .as_html
will return an HTML rendering.
To add extensions to the rendering (which is necessary for rendering links
correctly, and which spline-pokedex does), you must append to this class's
`markdown_extensions` list. Yep, that's gross.
"""
markdown_extensions = ['extra']
def __init__(self, source_text):
self.source_text = source_text
self._as_html = None
def __unicode__(self):
return self.source_text
@property
def as_html(self):
"""Returns the string as HTML4."""
if self._as_html:
return self._as_html
md = markdown.Markdown(
extensions=self.markdown_extensions,
safe_mode='escape',
output_format='xhtml1',
)
self._as_html = md.convert(self.source_text)
return self._as_html
@property
def as_text(self):
"""Returns the string in a plaintext-friendly form.
At the moment, this is just the original source text.
"""
return self.source_text
class MoveEffectProperty(object):
"""Property that wraps a move effect. Used like this:
MoveClass.effect = MoveEffectProperty('effect')
some_move.effect # returns a MarkdownString
some_move.effect.as_html # returns a chunk of HTML
This class also performs simple substitution on the effect, replacing
`$effect_chance` with the move's actual effect chance.
"""
def __init__(self, effect_column):
self.effect_column = effect_column
def __get__(self, move, move_class):
effect_text = getattr(move.move_effect, self.effect_column)
effect_text = effect_text.replace(
u'$effect_chance',
unicode(move.effect_chance),
)
return MarkdownString(effect_text)
class MarkdownColumn(sqlalchemy.types.TypeDecorator):
"""Generic SQLAlchemy column type for Markdown text.
Do NOT use this for move effects! They need to know what move they belong
to so they can fill in, e.g., effect chances. Use the MoveEffectProperty
property class above.
"""
impl = sqlalchemy.types.Unicode
def process_bind_param(self, value, dialect):
if not isinstance(value, basestring):
# Can't assign, e.g., MarkdownString objects yet
raise NotImplementedError
return unicode(value)
def process_result_value(self, value, dialect):
return MarkdownString(value)
|
Python
| 0
|
@@ -453,16 +453,55 @@
dex.%0A%22%22%22
+%0Afrom __future__ import absolute_import
%0A%0Aimport
|
4b6d584f4fb3741fe3d3268c36b54b8469444f60
|
fix identifier error of 'dshape'
|
benchmark/fluid/models/vgg.py
|
benchmark/fluid/models/vgg.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VGG16 benchmark in Fluid"""
from __future__ import print_function
import sys
import time
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import argparse
import functools
import os
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
return fc2
def get_model(args):
if args.data_set == "cifar10":
classdim = 10
if args.data_format == 'NCHW':
data_shape = [3, 32, 32]
else:
data_shape = [32, 32, 3]
else:
classdim = 102
if args.data_format == 'NCHW':
data_shape = [3, 224, 224]
else:
data_shape = [224, 224, 3]
if args.use_reader_op:
filelist = [
os.path.join(args.data_path, f) for f in os.listdir(args.data_path)
]
data_file = fluid.layers.open_files(
filenames=filelist,
shapes=[[-1] + data_shape, (-1, 1)],
lod_levels=[0, 0],
dtypes=["float32", "int64"],
thread_num=args.gpus,
pass_num=args.pass_num)
data_file = fluid.layers.double_buffer(
fluid.layers.batch(
data_file, batch_size=args.batch_size))
images, label = fluid.layers.read_file(data_file)
else:
images = fluid.layers.data(name='data', shape=dshape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# Train program
net = vgg16_bn_drop(images)
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=predict, label=label, total=batch_size_tensor)
# inference program
inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program):
inference_program = fluid.io.get_inference_program(
target_vars=[batch_acc, batch_size_tensor])
# Optimization
optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)
# data reader
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10()
if args.data_set == 'cifar10' else paddle.dataset.flowers.train(),
buf_size=5120),
batch_size=args.batch_size * args.gpus)
test_reader = paddle.batch(
paddle.dataset.cifar.test10()
if args.data_set == 'cifar10' else paddle.dataset.flowers.test(),
batch_size=args.batch_size)
return avg_cost, inference_program, optimizer, train_reader, test_reader, batch_acc
|
Python
| 0.000373
|
@@ -2839,24 +2839,37 @@
layers.data(
+%0A
name='data',
@@ -2876,16 +2876,20 @@
shape=d
+ata_
shape, d
|
2eb4f8bf1af29a161155835a841519a31b514fe3
|
Fix add/edit links in users module.
|
modules/installed/system/users.py
|
modules/installed/system/users.py
|
import os, cherrypy
from gettext import gettext as _
from auth import require, add_user
from plugin_mount import PagePlugin, FormPlugin
import cfg
from forms import Form
from util import *
from model import User
class users(PagePlugin):
order = 20 # order of running init in PagePlugins
def __init__(self, *args, **kwargs):
PagePlugin.__init__(self, *args, **kwargs)
self.register_page("sys.users")
self.register_page("sys.users.add")
self.register_page("sys.users.edit")
@cherrypy.expose
@require()
def index(self):
return self.fill_template(title="Manage Users and Groups", sidebar_right="""<strong><a href="/sys/users/add">Add User</a></strong><br/><strong><a href="/sys/users/edit">Edit Users</a></strong>""")
class add(FormPlugin, PagePlugin):
url = ["/sys/users/add"]
order = 30
sidebar_left = ''
sidebar_right = _("""<strong>Add User</strong><p>Adding a user via this
administrative interface <strong>might</strong> create a system user.
For example, if you provide a user with ssh access, she will
need a system account. If you don't know what that means,
don't worry about it.</p>""")
def main(self, username='', name='', email='', message=None, *args, **kwargs):
form = Form(title="Add User",
action=cfg.server_dir + "/sys/users/add/index",
name="add_user_form",
message=message)
form.text_input(_("Username"), name="username", value=username)
form.text_input(_("Full name"), name="name", value=name)
form.text_input(_("Email"), name="email", value=email)
form.text_input(_("Password"), name="password", type="password")
form.submit(label=_("Create User"), name="create")
return form.render()
def process_form(self, username=None, name=None, email=None, password=None, **kwargs):
msg = Message()
error = add_user(username, password, name, email, False)
if error:
msg.text = error
else:
msg.add = _("%s saved." % username)
cfg.log(msg.text)
main = self.main(username, name, email, msg=msg.text)
return self.fill_template(title="Manage Users and Groups", main=main, sidebar_left=self.sidebar_left, sidebar_right=self.sidebar_right)
class edit(FormPlugin, PagePlugin):
url = ["/sys/users/edit"]
order = 35
sidebar_left = ''
sidebar_right = _("""<strong>Edit Users</strong><p>Click on a user's name to
go to a screen for editing that user's account.</p><strong>Delete
Users</strong><p>Check the box next to a users' names and then click
"Delete User" to remove users from %s and the %s
system.</p><p>Deleting users is permanent!</p>""" % (cfg.product_name, cfg.box_name))
def main(self, msg=''):
users = cfg.users.get_all()
add_form = Form(title=_("Edit or Delete User"), action=cfg.server_dir + "/sys/users/edit", message=msg)
add_form.html('<span class="indent"><strong>Delete</strong><br /></span>')
for uname in users:
user = User(uname[1])
add_form.html('<span class="indent"> %s ' %
add_form.get_checkbox(name=user['username']) +
'<a href="/sys/users/edit?username=%s">%s (%s)</a><br /></span>' %
(user['username'], user['name'], user['username']))
add_form.submit(label=_("Delete User"), name="delete")
return add_form.render()
def process_form(self, **kwargs):
if 'delete' in kwargs:
msg = Message()
usernames = find_keys(kwargs, 'on')
cfg.log.info("%s asked to delete %s" % (cherrypy.session.get(cfg.session_key), usernames))
if usernames:
for username in usernames:
if cfg.users.exists(username):
try:
cfg.users.remove(username)
msg.add(_("Deleted user %s." % username))
except IOError, e:
if cfg.users.exists(username):
m = _("Error on deletion, user %s not fully deleted: %s" % (username, e))
cfg.log.error(m)
msg.add(m)
else:
m = _('Deletion failed on %s: %s' % (username, e))
cfg.log.error(m)
msg.add(m)
else:
cfg.log.warning(_("Can't delete %s. User does not exist." % username))
msg.add(_("User %s does not exist." % username))
else:
msg.add = _("Must specify at least one valid, existing user.")
main = self.main(msg=msg.text)
return self.fill_template(title="Manage Users and Groups", main=main, sidebar_left=self.sidebar_left, sidebar_right=self.sidebar_right)
sidebar_right = ''
u = cfg.users[kwargs['username']]
if not u:
main = _("<p>Could not find a user with username of %s!</p>" % kwargs['username'])
return self.fill_template(template="err", title=_("Unknown User"), main=main,
sidebar_left=self.sidebar_left, sidebar_right=sidebar_right)
main = _("""<strong>Edit User '%s'</strong>""" % u['username'])
sidebar_right = ''
return self.fill_template(title="Manage Users and Groups", main=main, sidebar_left=self.sidebar_left, sidebar_right=sidebar_right)
|
Python
| 0
|
@@ -575,67 +575,8 @@
- return self.fill_template(title=%22Manage Users and Groups%22,
sid
@@ -589,12 +589,12 @@
ight
-=%22%22%22
+ = '
%3Cstr
@@ -598,32 +598,50 @@
strong%3E%3Ca href=%22
+'+cfg.server_dir+'
/sys/users/add%22%3E
@@ -675,32 +675,50 @@
strong%3E%3Ca href=%22
+'+cfg.server_dir+'
/sys/users/edit%22
@@ -741,19 +741,112 @@
/strong%3E
-%22%22%22
+'%0A return self.fill_template(title=%22Manage Users and Groups%22, sidebar_right=sidebar_right
)%0A%0Aclass
@@ -3435,16 +3435,34 @@
a href=%22
+'+cfg.server_dir+'
/sys/use
|
306c735f863d3fe6a0922a433a7cdd1d21bdd772
|
fix unit test
|
flumotion/test/test_feedcomponent010.py
|
flumotion/test/test_feedcomponent010.py
|
# -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from twisted.trial import unittest
import common
from twisted.python import failure
from twisted.internet import defer
from flumotion.component import feedcomponent010 as fc
class TestFeeder(unittest.TestCase):
def setUp(self):
self.feeder = fc.Feeder('video:default')
def test_clientConnected(self):
clientId = '/default/muxer-video'
self.feeder.clientConnected(clientId, 3)
clients = self.feeder.getClients()
self.failUnless(3 in clients.keys())
client = clients[3]
self.assertEquals(client.uiState.get('clientId'), clientId)
def testReconnect(self):
clientId = '/default/muxer-video'
# connect
c = self.feeder.clientConnected(clientId, 3)
# verify some stuff
self.clientAssertStats(c, 0, 0, 0, 0, 1)
# read 10 bytes, drop 1 buffer
c.setStats((10, None, None, None, None, 1))
self.clientAssertStats(c, 10, 1, 10, 1, 1)
# disconnect
self.feeder.clientDisconnected(3)
self.clientAssertStats(c, 0, 0, 10, 1, 1)
# connect again
self.feeder.clientConnected(clientId, 3)
self.clientAssertStats(c, 0, 0, 10, 1, 2)
# read 20 bytes, drop 2 buffers
c.setStats((20, None, None, None, None, 2))
self.clientAssertStats(c, 20, 2, 30, 3, 2)
def clientAssertEquals(self, client, key, value):
self.assertEquals(client.uiState.get(key), value)
def clientAssertStats(self, client, brc, bdc, brt, bdt, reconnects):
self.clientAssertEquals(client, 'bytesReadCurrent', brc)
self.clientAssertEquals(client, 'buffersDroppedCurrent', bdc)
self.clientAssertEquals(client, 'bytesReadTotal', brt)
self.clientAssertEquals(client, 'buffersDroppedTotal', bdt)
self.clientAssertEquals(client, 'reconnects', reconnects)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -923,16 +923,29 @@
ntact.%0A%0A
+import time%0A%0A
from twi
@@ -1837,20 +1837,27 @@
, None,
-None
+time.time()
, 1))%0A
@@ -2226,20 +2226,27 @@
, None,
-None
+time.time()
, 2))%0A
|
3165635076f44d8dcf2de941eeb2f5f2239861d4
|
use pool of db_name parameter
|
bin/addons/base/ir/ir_cron.py
|
bin/addons/base/ir/ir_cron.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from mx import DateTime
import time
import netsvc
import tools
import pooler
from osv import fields,osv
next_wait = 60
_intervalTypes = {
'work_days': lambda interval: DateTime.RelativeDateTime(days=interval),
'days': lambda interval: DateTime.RelativeDateTime(days=interval),
'hours': lambda interval: DateTime.RelativeDateTime(hours=interval),
'weeks': lambda interval: DateTime.RelativeDateTime(days=7*interval),
'months': lambda interval: DateTime.RelativeDateTime(months=interval),
'minutes': lambda interval: DateTime.RelativeDateTime(minutes=interval),
}
class ir_cron(osv.osv, netsvc.Agent):
_name = "ir.cron"
_columns = {
'name': fields.char('Name', size=60, required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'active': fields.boolean('Active'),
'interval_number': fields.integer('Interval Number'),
'interval_type': fields.selection( [('minutes', 'Minutes'),
('hours', 'Hours'), ('work_days','Work Days'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'numbercall': fields.integer('Number of Calls', help='Number of time the function is called,\na negative number indicates that the function will always be called'),
'doall' : fields.boolean('Repeat Missed'),
'nextcall' : fields.datetime('Next Call Date', required=True),
'model': fields.char('Object', size=64),
'function': fields.char('Function', size=64),
'args': fields.text('Arguments'),
'priority': fields.integer('Priority', help='0=Very Urgent\n10=Not urgent')
}
_defaults = {
'nextcall' : lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'priority' : lambda *a: 5,
'user_id' : lambda obj,cr,uid,context: uid,
'interval_number' : lambda *a: 1,
'interval_type' : lambda *a: 'months',
'numbercall' : lambda *a: 1,
'active' : lambda *a: 1,
'doall' : lambda *a: 1
}
def _callback(self, cr, uid, model, func, args):
args = (args or []) and eval(args)
m=self.pool.get(model)
if m and hasattr(m, func):
f = getattr(m, func)
f(cr, uid, *args)
def _poolJobs(self, db_name, check=False):
if self.pool._init:
self.setAlarm(self._poolJobs, int(time.time())+10*60, [db_name])
now = DateTime.now()
#FIXME: multidb. Solution: a l'instanciation d'une nouvelle connection bd (ds pooler) fo que j'instancie
# un nouveau pooljob avec comme parametre la bd
try:
cr = pooler.get_db(db_name).cursor()
except:
return False
try:
cr.execute('select * from ir_cron where numbercall<>0 and active and nextcall<=now() order by priority')
for job in cr.dictfetchall():
nextcall = DateTime.strptime(job['nextcall'], '%Y-%m-%d %H:%M:%S')
numbercall = job['numbercall']
ok = False
while nextcall<now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(cr, job['user_id'], job['model'], job['function'], job['args'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql=''
if not numbercall:
addsql = ', active=False'
cr.execute("update ir_cron set nextcall=%s, numbercall=%s"+addsql+" where id=%s", (nextcall.strftime('%Y-%m-%d %H:%M:%S'), numbercall, job['id']))
cr.commit()
finally:
cr.close()
#
# Can be improved to do at the min(min(nextcalls), time()+next_wait)
# But is this an improvement ?
#
if not check:
self.setAlarm(self._poolJobs, int(time.time())+next_wait, [db_name])
ir_cron()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0
|
@@ -3288,28 +3288,140 @@
-if self.pool._init:%0A
+try:%0A db, pool = pooler.get_db_and_pool(db_name)%0A if pool._init:%0A # retry in a few minutes%0A
@@ -3493,16 +3493,95 @@
_name%5D)%0A
+ cr = db.cursor()%0A except:%0A return False%0A %0A
@@ -3773,111 +3773,8 @@
a bd
-%0A try:%0A cr = pooler.get_db(db_name).cursor()%0A except:%0A return False
%0A%0A
|
1afce678dec65bf3c6445322ff7961c7aca05f56
|
add more error checking for couchbase python client removal
|
api/code/src/main/python/stratuslab/installator/CouchbaseClient.py
|
api/code/src/main/python/stratuslab/installator/CouchbaseClient.py
|
#
# Copyright (c) 2013, Centre National de la Recherche Scientifique
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import string
import os.path
from random import choice
import stratuslab.system.SystemFactory as SystemFactory
from stratuslab.installator.Installator import Installator
from stratuslab import Util
from stratuslab.Util import printError
class CouchbaseClient(Installator):
def __init__(self, configHolder):
configHolder.assign(self)
self.system = SystemFactory.getSystem(self.frontendSystem, configHolder)
self._pkgs = ['libcouchbase2-libevent', 'libcouchbase-devel']
self._deps = ['python-pip', 'gcc']
self._repofile = '/etc/yum.repos.d/couchbase.repo'
self._repourl = 'http://packages.couchbase.com/rpm/couchbase-centos62-x86_64.repo'
def _installFrontend(self):
self._installPackages()
def _setupFrontend(self):
pass
def _startServicesFrontend(self):
pass
def _installPackages(self):
Util.printStep('Setting up Couchbase yum repository')
cmd = 'curl --output %s %s' % (self._repofile, self._repourl)
self._executeExitOnError(cmd)
Util.printStep('Removing Couchbase python client')
cmd = 'pip uninstall -y couchbase'
rc, output = Util.execute(cmd.split(' '),
withOutput=True,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
Util.printInfo("cannot uninstall couchbase python client\n%s" % output)
Util.printStep('Removing Couchbase C client')
cmd = 'yum erase -y %s' % ' '.join(self._pkgs)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase C client')
cmd = 'yum install -y %s' % ' '.join(self._pkgs)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase python client dependencies')
cmd = 'yum install -y %s' % ' '.join(self._deps)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase python client')
cmd = 'pip install couchbase'
self._executeExitOnError(cmd)
def _configure(self):
pass
def _restartService(self):
pass
def _executeExitOnError(self, cmd_str):
rc, output = Util.execute(cmd_str.split(' '),
withOutput=True,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
printError('Failed running: %s\n%s' % (cmd_str, output))
|
Python
| 0
|
@@ -1731,32 +1731,49 @@
python client')%0A
+ try:%0A
cmd = 'p
@@ -1791,32 +1791,36 @@
l -y couchbase'%0A
+
rc, outp
@@ -1879,32 +1879,36 @@
+
withOutput=True,
@@ -1934,32 +1934,36 @@
+
+
verboseLevel=sel
@@ -1970,32 +1970,36 @@
f.verboseLevel,%0A
+
@@ -2062,32 +2062,36 @@
TAILED)%0A
+
if rc != 0:%0A
@@ -2078,32 +2078,36 @@
if rc != 0:%0A
+
Util
@@ -2121,27 +2121,176 @@
nfo(
-%22cannot uninstall c
+'Couchbase python client NOT removed')%0A else:%0A Util.printInfo('Couchbase python client removed')%0A except:%0A Util.printInfo(%22C
ouch
@@ -2311,22 +2311,21 @@
ient
-%5Cn%25s%22 %25 output
+ NOT removed%22
)%0A%0A
|
4475cd927dda1d8ab685507895e0fc4bde6e3b4a
|
switch window index error
|
pages/base_page.py
|
pages/base_page.py
|
from .page import Page
class BasePage(Page):
def get_cookie_index_page(self, url, cookie):
self.get_relative_path(url)
self.maximize_window()
self.selenium.add_cookie(cookie)
self.selenium.refresh()
def switch_to_second_window(self):
handles = self.selenium.window_handles
self.selenium.switch_to_window(handles[1])
|
Python
| 0.000001
|
@@ -329,16 +329,124 @@
andles%0D%0A
+ try:%0D%0A handle = handles%5B1%5D%0D%0A except IndexError:%0D%0A handle = handles%5B0%5D%0D%0A
@@ -482,17 +482,13 @@
w(handle
-s%5B1%5D
)%0D%0A%0D%0A
|
439a09ce69b9ba66e2dc7c21b952ffc438fbe0f4
|
Add Abuse enum to outcomes. (#13833)
|
src/sentry/utils/outcomes.py
|
src/sentry/utils/outcomes.py
|
"""
sentry.utils.outcomes.py
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime
from django.conf import settings
from enum import IntEnum
import random
import six
import time
from sentry import tsdb, options
from sentry.utils import json, metrics
from sentry.utils.data_filters import FILTER_STAT_KEYS_TO_VALUES
from sentry.utils.dates import to_datetime
from sentry.utils.pubsub import QueuedPublisherService, KafkaPublisher
# valid values for outcome
class Outcome(IntEnum):
ACCEPTED = 0
FILTERED = 1
RATE_LIMITED = 2
INVALID = 3
outcomes = settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]
outcomes_publisher = None
def track_outcome(org_id, project_id, key_id, outcome, reason=None, timestamp=None, event_id=None):
"""
This is a central point to track org/project counters per incoming event.
NB: This should only ever be called once per incoming event, which means
it should only be called at the point we know the final outcome for the
event (invalid, rate_limited, accepted, discarded, etc.)
This increments all the relevant legacy RedisTSDB counters, as well as
sending a single metric event to Kafka which can be used to reconstruct the
counters with SnubaTSDB.
"""
global outcomes_publisher
if outcomes_publisher is None:
outcomes_publisher = QueuedPublisherService(
KafkaPublisher(
settings.KAFKA_CLUSTERS[outcomes['cluster']]
)
)
assert isinstance(org_id, six.integer_types)
assert isinstance(project_id, six.integer_types)
assert isinstance(key_id, (type(None), six.integer_types))
assert isinstance(outcome, Outcome)
assert isinstance(timestamp, (type(None), datetime))
timestamp = timestamp or to_datetime(time.time())
increment_list = []
if outcome != Outcome.INVALID:
# This simply preserves old behavior. We never counted invalid events
# (too large, duplicate, CORS) toward regular `received` counts.
increment_list.extend([
(tsdb.models.project_total_received, project_id),
(tsdb.models.organization_total_received, org_id),
(tsdb.models.key_total_received, key_id),
])
if outcome == Outcome.FILTERED:
increment_list.extend([
(tsdb.models.project_total_blacklisted, project_id),
(tsdb.models.organization_total_blacklisted, org_id),
(tsdb.models.key_total_blacklisted, key_id),
])
elif outcome == Outcome.RATE_LIMITED:
increment_list.extend([
(tsdb.models.project_total_rejected, project_id),
(tsdb.models.organization_total_rejected, org_id),
(tsdb.models.key_total_rejected, key_id),
])
if reason in FILTER_STAT_KEYS_TO_VALUES:
increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))
increment_list = [(model, key) for model, key in increment_list if key is not None]
if increment_list:
tsdb.incr_multi(increment_list, timestamp=timestamp)
# Send a snuba metrics payload.
if random.random() <= options.get('snuba.track-outcomes-sample-rate'):
outcomes_publisher.publish(
outcomes['topic'],
json.dumps({
'timestamp': timestamp,
'org_id': org_id,
'project_id': project_id,
'key_id': key_id,
'outcome': outcome.value,
'reason': reason,
'event_id': event_id,
})
)
metrics.incr(
'events.outcomes',
skip_internal=True,
tags={
'outcome': outcome.name.lower(),
'reason': reason,
},
)
|
Python
| 0
|
@@ -710,16 +710,30 @@
LID = 3%0A
+ ABUSE = 4%0A
%0A%0Aoutcom
|
e69efded329ebbcf5ccf74ef137dc1a80bd4b4a6
|
add 2.1.2, re-run cython if needed (#13102)
|
var/spack/repos/builtin/packages/py-line-profiler/package.py
|
var/spack/repos/builtin/packages/py-line-profiler/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLineProfiler(PythonPackage):
"""Line-by-line profiler."""
homepage = "https://github.com/rkern/line_profiler"
url = "https://pypi.io/packages/source/l/line_profiler/line_profiler-2.0.tar.gz"
version('2.0', 'fc93c6bcfac3b7cb1912cb28836d7ee6')
depends_on('python@2.5:')
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('py-ipython@0.13:', type=('build', 'run'))
|
Python
| 0
|
@@ -191,16 +191,27 @@
R MIT)%0A%0A
+import os%0A%0A
from spa
@@ -442,16 +442,112 @@
ar.gz%22%0A%0A
+ version('2.1.2', sha256='efa66e9e3045aa7cb1dd4bf0106e07dec9f80bc781a993fbaf8162a36c20af5c')%0A
vers
@@ -622,16 +622,39 @@
on@2.5:'
+, type=('build', 'run')
)%0A de
@@ -739,32 +739,32 @@
type='build')%0A
-
depends_on('
@@ -786,28 +786,378 @@
:', type=('build', 'run'))%0A
+%0A # See https://github.com/rkern/line_profiler/issues/166%0A @run_before('build')%0A @when('%5Epython@3.7:')%0A def fix_cython(self):%0A cython = self.spec%5B'py-cython'%5D.command%0A for root, _, files in os.walk('.'):%0A for fn in files:%0A if fn.endswith('.pyx'):%0A cython(os.path.join(root, fn))%0A
|
79fa33998e0f261a9633c72282446bb2615083ed
|
Update configure.py
|
ansible_plugin/configure.py
|
ansible_plugin/configure.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# for running shell commands
import os
from os.path import join as joinpath
import errno
# ctx is imported and used in operations
from cloudify import ctx
# put the operation decorator on any function that is a task
from cloudify.decorators import operation
# Import Cloudify exception
from cloudify.exceptions import NonRecoverableError
DEFAULT_ANSIBLE_BEST_PRACTICES_DIRECTORY_TREE = [
'group_vars',
'host_vars',
'library',
'filter_plugins',
'roles',
'roles/common',
'roles/common/tasks',
'roles/common/handlers',
'roles/common/templates',
'roles/common/files',
'roles/common/vars',
'roles/common/defaults',
'roles/common/meta',
'webtier',
'monitoring'
]
@operation
def make_directories(user_home = '/home/ubuntu', ansible_conf = 'ansible.cfg', **kwargs):
deployment_home = joinpath(user_home, 'cloudify.', ctx.deployment.id)
etc_ansible = joinpath(deployment_home, 'env', 'etc', 'ansible')
create_directories(etc_ansible, DEFAULT_ANSIBLE_BEST_PRACTICES_DIRECTORY_TREE)
@operation
def put_ansible_conf(user_home = '/home/ubuntu', ansible_conf = 'ansible.cfg', **kwargs):
if download_resource(ansible_conf, joinpath(user_home, '.ansible.cfg') ):
ctx.logger.info("Ansible configured.")
else:
ctx.logger.info('Ansible not configured.')
raise NonRecoverableError('Ansible not configured.')
@operation
def validate(user_home = '/home/ubuntu', binary_name = 'ansible-playbook', **kwargs):
""" validate that ansible is installed on the manager
"""
deployment_home = joinpath(user_home, 'cloudify.', ctx.deployment.id)
playbook_binary = joinpath(deployment_home, 'env', 'bin', binary_name)
command = [playbook_binary, '--version']
code = run_shell_command(command)
if code == 0:
ctx.logger.info('Confirmed that ansible is installed on the manager.')
else:
ctx.logger.info('Unable to confirm that installation was unsuccessful')
raise NonRecoverableError('Ansible not installed.')
@operation
def hard_code_home(user_home = '/home/ubuntu', **kwargs):
""" Ansible configures a writable directory in '$HOME/.ansible/cp',mode=0700
Cloudify's workers can't use that variable, so we need to hard code the home.
"""
deployment_home = joinpath(user_home, 'cloudify.', ctx.deployment.id)
user_home = user_home[1:]
home, user = user_home.split('/')
ansible_files = [deployment_home + '/env/lib/python2.7/site-packages/ansible/runner/connection_plugins/ssh.py',
deployment_home + '/env/local/lib/python2.7/site-packages/ansible/runner/connection_plugins/ssh.py']
for ansible_file in ansible_files:
replace_string(ansible_file, '$HOME', joinpath('/', home, user))
def download_resource(file, target_file):
""" copies 'file' from local machine and moves to
target_file
"""
try:
ctx.download_resource(file, target_file)
except Error as e:
print('Error {0}'.format(e))
raise exceptions.NonRecoverableError(
'Could not get "{0}" ({1}: {2})'.format(
file, type(e).__name__, e))
return False
return True
def create_directories(etc_ansible, paths):
for path in paths:
makeme = joinpath(etc_ansible, path)
try:
os.makedirs(makeme)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise NonRecoverableError('Cannot create directory {0}, error: {1}'.format(path, e))
def replace_string(file, old_string, new_string):
new_file = joinpath('/tmp', file)
with open(new_file, 'wt') as fout:
with open(file, 'rt') as fin:
for line in fin:
fout.write(line.replace(old_string, new_search))
copy(new_file, file)
os.remove(new_file)
def write_to_file(path, filename, entry):
if not pathexists(path):
makedirs(path)
path_to_file = joinpath(path, filename)
if not pathexists(path_to_file):
try:
f = open(path_to_file, 'w')
f.write(entry)
success = True
except IOError as e:
ctx.logger.error('Can\'t open file {0} for writing: {1}'.format(path_to_file, e))
success = False
else:
try:
f = open(path_to_file, 'a')
f.write(entry)
success = True
except IOError as e:
ctx.logger.error('Can\'t open file {0} for writing: {1}'.format(path_to_file, e))
success = False
f.close()
return success
def run_shell_command(command):
"""this runs a shell command.
"""
ctx.logger.info("Running shell command: {0}"
.format(command))
try:
run = subprocess.Popen(command, stdout=subprocess.PIPE)
output, error = run.communicate()
if output:
for lines in output:
ctx.logger.info('lines: {0}'.format(lines))
elif error:
ctx.logger.error('error: {0}'.format(error))
raise Exception('{0} returned {1}'.format(command, error))
except:
ctx.logger.error('error: {0}'.format(e))
|
Python
| 0.000001
|
@@ -5872,31 +5872,51 @@
.error('
-error: %7B0%7D'.format(e
+Unknown Exception in run_shell_command.'
))%0A
|
0bebe2078955d4b5b7b46448f0fcef2c74fdb3f1
|
Fix copy commands for swarm setup script.
|
scripts/tools/swarm_bot_setup.py
|
scripts/tools/swarm_bot_setup.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup a given bot to become a swarm bot by installing the
required files and setting up any required scripts. The bot's OS must be
specified. We assume the bot already has python installed and a ssh server
enabled."""
import optparse
import os
import subprocess
import sys
SWARM_DIRECTORY_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'swarm_bootstrap')
# The swarm server links.
SWARM_SERVER_PROD = 'https://chromium-swarm.appspot.com'
SWARM_SERVER_DEV = 'https://chromium-swarm-dev.appspot.com'
# The directories containing the swarm code initially.
SWARM_STARTING_DIRECTORY = {
'linux': '/b/build/scripts/tools/swarm_bootstrap/',
'mac': '/b/build/scripts/tools/swarm_bootstrap/',
'win': 'e:\\b\\scripts\\tools\swarm_bootstrap\\',
}
# The directories to store the swarm code.
SWARM_DIRECTORY = {
'linux': '/b/swarm_slave',
'mac': '/b/swarm_slave',
'win': 'e:\\b\\swarm\\',
}
class Options(object):
def __init__(self, swarm_server):
self.swarm_server = swarm_server
def OpenSSHCommand(user, host):
return ['ssh', '-o ConnectTimeout=5', '-t', user + '@' + host]
def BuildSetupCommand(user, host, platform, options):
assert platform in ('linux', 'mac', 'win')
bot_setup_commands = []
# Update the swarm files on the machines
if platform == 'win':
bot_setup_commands.extend(['e:', '&&'])
bot_setup_commands.extend([
'cd %s' % SWARM_STARTING_DIRECTORY[platform],
'&&',
'svn update',
'&&'])
# Copy the swarm files to the new swarm directory
if platform == 'win':
copy_func = 'copy'
else:
copy_func = 'cp'
bot_setup_commands.extend([
'%s %s %s' % (copy_func,
SWARM_STARTING_DIRECTORY[platform],
SWARM_DIRECTORY[platform]),
'&&'])
# Run the final swarm setup script.
bot_setup_commands.extend(['cd %s' % SWARM_DIRECTORY[platform], '&&'])
if platform == 'win':
bot_setup_commands.extend([
'call swarm_bot_setup.bat %s %s' %
(options.swarm_server, SWARM_DIRECTORY[platform])])
else:
bot_setup_commands.append('./swarm_bot_setup.sh %s %s' %
(options.swarm_server, SWARM_DIRECTORY[platform]))
# On windows the command must be executed by cmd.exe
if platform == 'win':
bot_setup_commands = ['cmd.exe /c',
'"' + ' '.join(bot_setup_commands) + '"']
return OpenSSHCommand(user, host) + bot_setup_commands
def BuildCleanCommand(user, host, platform):
assert platform in ('linux', 'mac', 'win')
command = OpenSSHCommand(user, host)
if platform == 'win':
command.append('del /q /s %s' % SWARM_DIRECTORY[platform])
else:
command.append('rm -r %s' % SWARM_DIRECTORY[platform])
return command
def main():
parser = optparse.OptionParser(usage='%prog [options]',
description=sys.modules[__name__].__doc__)
parser.add_option('-b', '--bot', action='append', default=[],
help='The bot to setup as a swarm bot')
parser.add_option('-r', '--raw',
help='The name of a file containing line separated slaves '
'to setup. The slaves must all be the same os.')
parser.add_option('-c', '--clean', action='store_true',
help='Removes any old swarm files before setting '
'up the bot.')
parser.add_option('-d', '--use_dev', action='store_true',
help='Set when the swarm bots being setup should use the '
'development swarm server instead of the production one.')
parser.add_option('-u', '--user', default='chrome-bot',
help='The user to use when setting up the machine. '
'Defaults to %default')
parser.add_option('-p', '--print_only', action='store_true',
help='Print what command would be executed to setup the '
'swarm bot.')
parser.add_option('-w', '--win', action='store_true')
parser.add_option('-l', '--linux', action='store_true')
parser.add_option('-m', '--mac', action='store_true')
options, args = parser.parse_args()
if len(args) > 0:
parser.error('Unknown arguments, ' + str(args))
if not options.bot and not options.raw:
parser.error('Must specify a bot or bot file.')
if len([x for x in [options.win, options.linux, options.mac] if x]) != 1:
parser.error('Must specify the bot\'s OS.')
if options.win:
platform = 'win'
elif options.linux:
platform = 'linux'
elif options.mac:
platform = 'mac'
bots = options.bot
if options.raw:
# Remove extra spaces and empty lines.
bots.extend(filter(None, (s.strip() for s in open(options.raw, 'r'))))
for bot in bots:
commands = []
if options.clean:
commands.append(BuildCleanCommand(options.user, bot, platform))
command_options = Options(
swarm_server=SWARM_SERVER_DEV if options.use_dev else SWARM_SERVER_PROD)
commands.append(BuildSetupCommand(options.user, bot, platform,
command_options))
if options.print_only:
print commands
else:
for command in commands:
subprocess.check_call(command)
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000006
|
@@ -870,25 +870,24 @@
rm_bootstrap
-/
',%0A 'mac':
@@ -925,17 +925,16 @@
ootstrap
-/
',%0A 'wi
@@ -975,18 +975,16 @@
ootstrap
-%5C%5C
',%0A%7D%0A%0A#
@@ -1122,16 +1122,22 @@
b%5C%5Cswarm
+_slave
%5C%5C',%0A%7D%0A%0A
@@ -1792,20 +1792,24 @@
func = '
+x
copy
+ /i
'%0A else
@@ -1829,16 +1829,19 @@
nc = 'cp
+ -r
'%0A bot_
|
381adeeec0fd1d65372d7003183d4b1ec8f2cfbf
|
Increase V8JS Stack Limit (#584)
|
dmoj/executors/V8JS.py
|
dmoj/executors/V8JS.py
|
from dmoj.executors.script_executor import ScriptExecutor
class Executor(ScriptExecutor):
ext = 'js'
name = 'V8JS'
command = 'v8dmoj'
test_program = 'print(gets());'
address_grace = 786432
nproc = -1
@classmethod
def get_version_flags(cls, command):
return [('-e', 'print(version())')]
|
Python
| 0
|
@@ -322,8 +322,131 @@
n())')%5D%0A
+ %0A def get_cmdline(self):%0A return %5Bself.get_command(), '--stack-size=131072', self._code%5D # 128MB Stack Limit%0A
|
7fba4a676622e93416f32ee69bfa295647979c7a
|
fix path on test file
|
taxcalc/tests/test_calculate.py
|
taxcalc/tests/test_calculate.py
|
import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "../../"))
sys.path.append(os.path.join(cur_path, "../"))
import numpy as np
import pandas as pd
from numba import jit, vectorize, guvectorize
from taxcalc import *
def test_make_Calculator():
tax_dta = pd.read_csv("../../puf2.csv")
calc = Calculator(tax_dta)
def test_make_Calculator_mods():
cur_path = os.path.abspath(os.path.dirname(__file__))
tax_dta = pd.read_csv(os.path.join(cur_path, "../../puf2.csv"))
calc1 = calculator(tax_dta)
calc2 = calculator(tax_dta, _amex=np.array([4000]))
update_calculator_from_module(calc2, constants)
update_globals_from_calculator(calc2)
assert all(calc2._amex == np.array([4000]))
|
Python
| 0.000001
|
@@ -328,16 +328,39 @@
ead_csv(
+os.path.join(cur_path,
%22../../p
@@ -368,16 +368,17 @@
f2.csv%22)
+)
%0A cal
|
f4a460646f87b63781ad32b8ef6a0b9c0d8a6290
|
fix issue #357, which makes real problem more obvious (media file does not exist
|
moviepy/video/io/VideoFileClip.py
|
moviepy/video/io/VideoFileClip.py
|
import os
from moviepy.video.VideoClip import VideoClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.Clip import Clip
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
class VideoFileClip(VideoClip):
"""
A video clip originating from a movie file. For instance: ::
>>> clip = VideoFileClip("myHolidays.mp4")
>>> clip2 = VideoFileClip("myMaskVideo.avi")
Parameters
------------
filename:
The name of the video file. It can have any extension supported
by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.
has_mask:
Set this to 'True' if there is a mask included in the videofile.
Video files rarely contain masks, but some video codecs enable
that. For istance if you have a MoviePy VideoClip with a mask you
can save it to a videofile with a mask. (see also
``VideoClip.write_videofile`` for more details).
audio:
Set to `False` if the clip doesn't have any audio or if you do not
wish to read the audio.
Attributes
-----------
filename:
Name of the original video file.
fps:
Frames per second in the original file.
Read docstrings for Clip() and VideoClip() for other, more generic, attributes.
"""
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
self.reader = reader
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
self.filename = self.reader.filename
if has_mask:
self.make_frame = lambda t: reader.get_frame(t)[:,:,:3]
mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.make_frame = lambda t: reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
def __del__(self):
""" Close/delete the internal reader. """
del self.reader
|
Python
| 0
|
@@ -1635,16 +1635,110 @@
+self.reader = None #need this just in case FFMPEG has issues (__del__ complains)%0A self.
reader =
@@ -1788,37 +1788,8 @@
mt)%0A
- self.reader = reader%0A
|
b419da087c762fe3fb18896aed3adbf489ac7036
|
add test_tag_values_correctly_localized for get_config_tag_json
|
roundwared/tests/test_db.py
|
roundwared/tests/test_db.py
|
from django.test import TestCase
from model_mommy import mommy
# from .common import *
from roundware.rw.models import (MasterUI, Language, Session,
UIMapping, Project, LocalizedString, Tag)
from roundwared.db import get_config_tag_json
class TestGetConfigTagJSON(TestCase):
""" test various permutations of db.get_config_tag_json
"""
def setUp(self):
self.maxDiff = None
# make a masterui, a project, a ui_mode, tag category, selectionmethod
self.english = mommy.make(Language, language_code='en')
self.spanish = mommy.make(Language, language_code='es')
self.english_hdr = mommy.make(LocalizedString,
localized_string="Head",
language=self.english)
self.spanish_hdr = mommy.make(LocalizedString,
localized_string="Cabeza",
language=self.spanish)
self.masterui = mommy.make(MasterUI, active=True,
tag_category__name='TagCatName',
index=1, ui_mode__name='Listen',
header_text_loc=[self.english_hdr,
self.spanish_hdr])
self.ui_mode_one = self.masterui.ui_mode
self.english_sess = mommy.make(Session, project=self.masterui.project,
language=self.english)
self.spanish_sess = mommy.make(Session, project=self.masterui.project,
language=self.spanish)
self.english_msg = mommy.make(LocalizedString, localized_string="One",
language=self.english)
self.spanish_msg = mommy.make(LocalizedString, localized_string="Uno",
language=self.spanish)
self.project_one = self.masterui.project
self.tag = mommy.make(Tag, data="{'json':'value'}",
loc_msg=[self.english_msg, self.spanish_msg])
self.ui_mapping_one = mommy.make(UIMapping, master_ui=self.masterui,
active=True, tag=self.tag,
index=1, default=True)
self.master_ui_two = mommy.make(MasterUI, name='inactivemui',
ui_mode=self.ui_mode_one, active=True)
self.project_two = self.master_ui_two.project
self.project_three = mommy.make(Project, name='project_three')
def _proj_one_config(self):
return {'Listen': [
{'name': self.masterui.name,
'header_text': "Head",
'code': 'TagCatName',
'select': self.masterui.select.name,
'order': 1,
'defaults': [self.ui_mapping_one.tag.id],
'options': [{
'tag_id': self.ui_mapping_one.tag.id,
'order': 1,
'data': "{'json':'value'}",
'relationships': [],
'value': 'One'
}]},
]}
def test_get_uimapping_info_for_project(self):
""" Test proper UIMapping data returned based on project passed """
config = get_config_tag_json(self.project_one, self.english_sess)
expected = self._proj_one_config()
self.assertEquals(expected, config)
def test_only_masteruis_for_project_returned(self):
""" Confirm only info for MasterUIs for passed project or session
project are returned in config tag dictionary
"""
config = get_config_tag_json(self.project_three)
self.assertEquals({}, config)
config = get_config_tag_json(self.project_two)
# should not have any uimapping info for project _one_
self.assertNotIn(self.masterui.name,
[dic['name'] for dic in
config['Listen']])
def test_session_project_overrides_passed_project(self):
""" The project associated with a passed session should be used
even if a project is explicitly passed. (really?)
"""
pass
def test_only_active_masteruis_returned(self):
""" Confirm that only active MasterUIs are returned in
config tag 'JSON' (dictionary)
"""
self.master_ui_two.active = False
self.master_ui_two.save()
config = get_config_tag_json(self.project_two)
self.assertEquals({}, config)
self.master_ui_two.active = True
self.master_ui_two.save()
def test_get_right_masterui_without_passed_project(self):
""" Don't pass a project, just use the project for the session.
Do we still get the right MasterUI?
"""
config = get_config_tag_json(None, self.english_sess)
expected = self._proj_one_config()
self.assertEquals(expected, config)
def test_get_correct_localized_header_text(self):
""" Test that we get correct localized header text for session, or if
none passed, header text in English.
"""
config = get_config_tag_json(None, self.spanish_sess)
self.assertEquals('Cabeza',
config['Listen'][0]['header_text'])
def test_tag_values_correctly_localized(self):
""" Test that we get correct localized header text for tag values
based on session language, or if none passed, in English.
"""
pass
|
Python
| 0.000003
|
@@ -5455,39 +5455,32 @@
rrect localized
-header
text for tag val
@@ -5483,16 +5483,16 @@
values%0A
+
@@ -5557,29 +5557,182 @@
sh.%0A %22%22%22%0A
-pass
+config = get_config_tag_json(None, self.spanish_sess)%0A self.assertEquals('Uno', %0A config%5B'Listen'%5D%5B0%5D%5B'options'%5D%5B0%5D%5B'value'%5D)
%0A
|
faf9638bc69dc79c7fdc9294cc309c40ca57d518
|
Fix process names in test_nailyd_alive
|
fuelweb_test/integration/test_nailyd.py
|
fuelweb_test/integration/test_nailyd.py
|
import logging
import xmlrpclib
from fuelweb_test.integration.base import Base
from fuelweb_test.helpers import SSHClient
class TestNailyd(Base):
def __init__(self, *args, **kwargs):
super(TestNailyd, self).__init__(*args, **kwargs)
self.remote = SSHClient()
def setUp(self):
logging.info('Admin node ip: %s' % self.get_admin_node_ip())
self.ip = self.get_admin_node_ip()
def tearDown(self):
pass
def test_nailyd_alive(self):
self.remote.connect_ssh(self.ip, 'root', 'r00tme')
ps_output = self.remote.execute('ps ax')['stdout']
naily_processes = filter(lambda x: '/usr/bin/nailyd' in x, ps_output)
logging.debug("Found naily processes: %s" % naily_processes)
self.assertEquals(len(naily_processes), 1)
|
Python
| 0.000024
|
@@ -648,23 +648,270 @@
x: '
-/usr/bin/nailyd
+naily master' in x, ps_output)%0A logging.debug(%22Found %25d naily master processes: %25s%22 %25%0A (len(naily_processes), naily_processes))%0A self.assertEqual(1, len(naily_processes))%0A naily_processes = filter(lambda x: 'naily worker
' in
@@ -954,22 +954,32 @@
(%22Found
+%25d
naily
+worker
processe
@@ -986,32 +986,78 @@
s: %25s%22 %25
- naily_processes
+%0A (len(naily_processes), naily_processes)
)%0A
@@ -1078,10 +1078,15 @@
qual
-s
(
+True,
len(
@@ -1101,13 +1101,14 @@
ocesses)
-,
+ %3E
1)%0A
|
13a64059b71fccb8315f552d8e96f130c513a540
|
Remove old code.
|
charity_server.py
|
charity_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from flask import Flask, jsonify
from parse_likecharity import refresh_charities
import threading
from datetime import datetime
app = Flask(__name__)
refresh_rate = 24 * 60 * 60 #Seconds
start_time = datetime.now()
# variables that are accessible from anywhere
payload = {}
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
backgroundThread = threading.Thread()
def update_charities():
print('Updating charities in background thread')
global payload
global backgroundThread
with dataLock:
categories, charity_dict = refresh_charities()
payload = {'categories':categories, 'charities':charity_dict}
print('Running!')
# Set the next thread to happen
backgroundThread = threading.Timer(refresh_rate, update_charities, ())
backgroundThread.start()
@app.route("/gci")
def gci():
global payload
delta = datetime.now() - start_time
if delta.total_seconds() > refresh_rate:
categories, charity_dict = refresh_charities()
payload = {'categories':categories, 'charities':charity_dict}
return jsonify(payload)
if __name__ == "__main__":
update_charities()
app.run(host='0.0.0.0')
backgroundThread.cancel()
print('test')
|
Python
| 0.000045
|
@@ -185,25 +185,8 @@
ies%0A
-import threading%0A
from
@@ -299,16 +299,36 @@
me.now()
+%0Ainitialized = False
%0A%0A# vari
@@ -383,547 +383,8 @@
= %7B%7D
-%0A# lock to control access to variable%0AdataLock = threading.Lock()%0A# thread handler%0AbackgroundThread = threading.Thread()%0A%0Adef update_charities():%0A print('Updating charities in background thread')%0A %0A global payload%0A global backgroundThread%0A with dataLock:%0A %0A categories, charity_dict = refresh_charities()%0A payload = %7B'categories':categories, 'charities':charity_dict%7D%0A print('Running!')%0A %0A # Set the next thread to happen%0A backgroundThread = threading.Timer(refresh_rate, update_charities, ())%0A backgroundThread.start()
%0A%0A@a
@@ -507,16 +507,36 @@
esh_rate
+ or not(initialized)
:%0A
@@ -710,17 +710,42 @@
:%0A
-%0A update
+categories, charity_dict = refresh
_cha
@@ -783,36 +783,8 @@
0')%0A
- backgroundThread.cancel()%0A
pr
|
dce4866473c84dd84e47202812fede21869c03bc
|
remove commented stuff
|
teknologr/teknologr/settings.py
|
teknologr/teknologr/settings.py
|
"""
Django settings for teknologr project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from getenv import env
import dj_database_url
import ldap
from django_auth_ldap.config import LDAPSearch, PosixGroupType
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR = os.path.dirname(__file__)
TEST_PEP8_DIRS = [os.path.dirname(PROJECT_DIR), ]
# PEP8
TEST_PEP8_EXCLUDE = ['migrations', ] # Exclude this paths from tests
TEST_PEP8_IGNORE = [] # Ignore this tests (E501 is line length)
TEST_PEP8_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'setup.cfg')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY', 'secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG', True)
ALLOWED_HOSTS = ['localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_pep8',
'rest_framework',
'ajax_select',
'members',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'teknologr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'teknologr.wsgi.application'
# Logging
if not DEBUG:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/var/log/teknologr/info.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
}
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': dj_database_url.parse(env('DATABASE', 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')))
#{
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Helsinki'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/members/'
# REST Framework settings
# TODO: provide GET access to certain users for non-admins
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
),
}
# LDAP stuff
# Baseline configuration.
AUTH_LDAP_SERVER_URI = env("AUTH_LDAP_SERVER_URI", "ldaps://localhost:45671")
AUTH_LDAP_USER_DN_TEMPLATE = env("LDAP_USER_DN_TEMPLATE", "uid=%(user)s,dc=example,dc=com")
# Set up the basic group parameters.
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
env("LDAP_GROUP_DN", "ou=group,dc=example,dc=com"),
ldap.SCOPE_SUBTREE,
"(objectClass=PosixGroupType)"
)
AUTH_LDAP_GROUP_TYPE = PosixGroupType(name_attr="cn")
# Populate the Django user from the LDAP directory.
AUTH_LDAP_USER_ATTR_MAP = {
"username": "uid",
"first_name": "givenName",
"last_name": "sn",
"email": "mail"
}
# Map LDAP group to is_staff property in Member model
# this restricts all is_staff required views to those that are members of the specified LDAP group
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
"is_staff": env("LDAP_STAFF_GROUP_DN", "cn=admin,ou=group,dc=example,dc=com"),
}
# This is the default, but I like to be explicit.
AUTH_LDAP_ALWAYS_UPDATE_USER = True
# Use LDAP group membership to calculate group permissions.
AUTH_LDAP_FIND_GROUP_PERMS = True
# Cache group memberships for an hour to minimize LDAP traffic
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600
# Keep ModelBackend around for per-user permissions and maybe a local
# superuser.
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Never require cert
AUTH_LDAP_GLOBAL_OPTIONS = {
ldap.OPT_X_TLS_REQUIRE_CERT: ldap.OPT_X_TLS_NEVER
}
|
Python
| 0
|
@@ -3394,126 +3394,8 @@
)))%0A
- #%7B%0A # 'ENGINE': 'django.db.backends.sqlite3',%0A # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),%0A #%7D%0A
%7D%0A%0A%0A
|
11e13c3412661e79f8a70374a1c14a2b03411b65
|
Remove reload, it's not usable
|
test.py
|
test.py
|
from zirc.test import TestCase
import log as logging
import utils
import commands
import config
logging.setLevel(30)
class botTest(TestCase):
def __init__(self):
self.config = {}
self.config['nickname'] = 'zIRC-test'
def on_privmsg(self, event, irc, arguments):
if " ".join(arguments).startswith(config.commandChar):
utils.call_command(self, event, irc, arguments)
def on_kick(self, event, irc):
nick = event.raw.split(" ")[3]
if nick == 'zIRC-test':
irc.join(event.target)
def on_join(self, event, irc):
irc.send("WHO {0} nuhs%nhu".format(event.target))
def on_invite(self, event, irc):
if utils.checkPerms(event.source.host, trusted=True):
hostmask = event.source.hostmask
irc.join(event.target)
bot = botTest()
log = """:user!~user@user/user PRIVMSG #zirc :Hey!
:user2!~user@user/user2 PRIVMSG #zirc :How are you?
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?ban *!*@*
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?unban *!*@*
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?calc 1+1
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?config ignores
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?cycle #zirc
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?deop
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?echo moo
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?help
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?host
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?join ##foo
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?kban
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?kick user,user2,user3
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?list
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?list alias
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?log.level info
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?nick foo
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?op
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?part
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?perms
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?ping
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?quit
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?rainbow mooo
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?reload config
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?version
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?voice
:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?unvoice
:user3!~user@user/user3 PRIVMSG zIRC-test :Hello there!"""
bot.start(log)
|
Python
| 0.000004
|
@@ -2375,78 +2375,8 @@
ooo%0A
-:wolfy1339!~wolfy1339@botters/wolfy1339 PRIVMSG #zirc :?reload config%0A
:wol
|
c5c0b3f8b6d61a1534e74e4ceba8b6a7eedb106d
|
support multiple registration to the same event
|
dbus-tools/dbus-register.py
|
dbus-tools/dbus-register.py
|
###############################################################################
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys, dbus, json
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor, defer
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
log.startLogging(sys.stdout)
###############################################################################
class DbusSignalHandler:
def __init__(self, bus, senderName, objectName, interfaceName, signalName):
# publish hash id
self.id = senderName + "#" + objectName + "#" + interfaceName + "#" + signalName
# connect dbus proxy object to signal
self.object = bus.get_object(senderName, objectName)
self.object.connect_to_signal(signalName, self.handleSignal, interfaceName)
def handleSignal(self, *args):
# publish dbus args under topic hash id
factory.dispatch(self.id, json.dumps(args))
###############################################################################
class DbusRegisterService:
def __init__(self):
# signal handlers
self.signalHandlers = []
@exportRpc
def dbusRegister(self, list):
# read arguments list by position
if len(list) < 5:
raise Exception("Error: expected arguments: bus, sender, object, interface, signal)")
if list[0] == "session":
bus = dbus.SessionBus()
elif list[0] == "system":
bus = dbus.SystemBus()
else:
raise Exception("Error: invalid bus: %s" % list[0])
# create a handler that will publish the signal
dbusSignalHandler = DbusSignalHandler(bus, list[1], list[2], list[3], list[4])
self.signalHandlers.append(dbusSignalHandler)
return dbusSignalHandler.id
###############################################################################
class DbusRegisterServerProtocol(WampServerProtocol):
def onSessionOpen(self):
# create dbus-register service instance
self.DbusRegisterService = DbusRegisterService()
# register it for RPC
self.registerForRpc(self.DbusRegisterService)
# register for Publish / Subscribe
self.registerForPubSub("", True)
###############################################################################
if __name__ == '__main__':
port = "9001"
if len(sys.argv) == 2:
port = sys.argv[1]
uri = "ws://localhost:" + port
factory = WampServerFactory(uri, debugWamp = True)
factory.protocol = DbusRegisterServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
|
Python
| 0
|
@@ -1324,24 +1324,249 @@
.stdout)%0A%0A%0A%0A
+###############################################################################%0Adef hashId(senderName, objectName, interfaceName, signalName):%0A%09return senderName + %22#%22 + objectName + %22#%22 + interfaceName + %22#%22 + signalName%0A%0A%0A%0A
############
@@ -1767,16 +1767,23 @@
lf.id =
+hashId(
senderNa
@@ -1780,32 +1780,25 @@
d(senderName
- + %22#%22 +
+,
objectName
@@ -1792,32 +1792,25 @@
, objectName
- + %22#%22 +
+,
interfaceNa
@@ -1807,32 +1807,25 @@
nterfaceName
- + %22#%22 +
+,
signalName%0A
@@ -1823,16 +1823,17 @@
gnalName
+)
%0A
@@ -2524,16 +2524,223 @@
gnal)%22)%0A
+ %0A %09# check if a handler exists%0A sigId = hashId(list%5B1%5D, list%5B2%5D, list%5B3%5D, list%5B4%5D)%0A for handler in self.signalHandlers:%0A %09if handler.id == sigId:%0A %09%09return sigId%0A %09%0A
|
ddf4cbfc263b71ba3eee54b53d33e7ed31e5a8e5
|
remove args logging
|
swampdragon/models.py
|
swampdragon/models.py
|
from .pubsub_providers.base_provider import PUBACTIONS
from .model_tools import get_property
from .pubsub_providers.model_publisher import publish_model
from .serializers.serializer_importer import get_serializer
from django.db.models.signals import pre_delete, m2m_changed
from django.dispatch.dispatcher import receiver
import logging
logger = logging.getLogger(__name__)
class SelfPublishModel(object):
serializer_class = None
def __init__(self, *args, **kwargs):
if isinstance(self.serializer_class, str):
self.serializer_class = get_serializer(self.serializer_class, self)
self._pre_save_state = dict()
super(SelfPublishModel, self).__init__(*args, **kwargs)
self._serializer = self.serializer_class(instance=self)
self._set_pre_save_state()
#logger.info("__init__ in SelfPublishModel")
#logger.info(self)
logger.info(args)
logger.info(kwargs)
def _set_pre_save_state(self):
"""
Set the state of the model before any changes are done,
so it's possible to determine what fields have changed.
"""
relevant_fields = self._get_relevant_fields()
for field in relevant_fields:
val = get_property(self, field)
if hasattr(self._serializer, field):
continue
if val is None:
self._pre_save_state[field] = None
continue
self._pre_save_state[field] = val
def _get_relevant_fields(self):
"""
Get all fields that will affect the state.
This is used to save the state of the model before it's updated,
to be able to get changes used when publishing an update (so not all fields are published)
"""
# update_fields = list(self._serializer.opts.update_fields)
# publish_fields = list(self._serializer.opts.publish_fields)
# relevant_fields = set(update_fields + publish_fields)
relevant_fields = self._serializer.base_fields
if 'id' in relevant_fields:
relevant_fields.remove('id')
return relevant_fields
def get_changed_fields(self):
changed_fields = []
for k, v in self._pre_save_state.items():
val = get_property(self, k)
if val != v:
changed_fields.append(k)
return changed_fields
def serialize(self):
return self._serializer.serialize()
def _publish(self, action, changed_fields=None):
publish_model(self, self._serializer, action, changed_fields)
def save(self, *args, **kwargs):
logger.info(self.pk)
# if not self.pk:
# http://stackoverflow.com/questions/11561722/django-what-is-the-role-of-modelstate
if self._state.adding:
self.action = PUBACTIONS.created
self.changed_fields = None
else:
self.action = PUBACTIONS.updated
self.changed_fields = self.get_changed_fields()
super(SelfPublishModel, self).save(*args, **kwargs)
self._publish(self.action, self.changed_fields)
@receiver(m2m_changed)
def _self_publish_model_m2m_change(sender, instance, action, model, pk_set, **kwargs):
if not isinstance(instance, SelfPublishModel):
return
instance.action = PUBACTIONS.updated
if action in ['post_add', 'post_clear', 'post_remove']:
instance._publish(instance.action, instance._serializer.opts.publish_fields)
@receiver(pre_delete)
def _self_publish_model_delete(sender, instance, **kwargs):
if isinstance(instance, SelfPublishModel):
instance._publish(PUBACTIONS.deleted)
|
Python
| 0.000003
|
@@ -885,32 +885,33 @@
o(self)%0A
+#
logger.info(args
@@ -912,32 +912,33 @@
o(args)%0A
+#
logger.info(kwar
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.