commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
d8556707aa3ab0bc89878e0b5daaaeb7b54616ae
|
Disable images
|
zephyr/lib/bugdown.py
|
zephyr/lib/bugdown.py
|
import re
import markdown
# We need to re-initialize the markdown engine every 30 messages
# due to some sort of performance leak in the markdown library.
MAX_MD_ENGINE_USES = 30
_md_engine = None
_use_count = 0
# A link starts after whitespace, and cannot contain spaces,
# end parentheses, or end brackets (which would confuse Markdown).
# FIXME: Use one of the actual linkification extensions.
_link_regex = re.compile(r'(\s|\A)(?P<url>https?://[^\s\])]+)')
# Pad heading markers to make Markdown ignore them
# FIXME: Write a real extension for the markdown library
_heading_regex = re.compile(r'^([#-=])', flags=re.MULTILINE)
def _linkify(match):
url = match.group('url')
return ' [%s](%s) ' % (url, url)
def convert(md):
"""Convert Markdown to HTML, with Humbug-specific settings and hacks."""
global _md_engine, _use_count
if _md_engine is None:
_md_engine = markdown.Markdown(
extensions = ['fenced_code', 'codehilite', 'nl2br'],
safe_mode = 'escape',
output_format = 'xhtml')
md = _heading_regex.sub(r' \1', md)
md = _link_regex.sub(_linkify, md)
try:
html = _md_engine.convert(md)
except:
# FIXME: Do something more reasonable here!
html = '<p>[Humbug note: Sorry, we could not understand the formatting of your message]</p>'
_use_count += 1
if _use_count >= MAX_MD_ENGINE_USES:
_md_engine = None
_use_count = 0
return html
|
Python
| 0
|
@@ -20,16 +20,191 @@
rkdown%0A%0A
+class Bugdown(markdown.Extension):%0A def extendMarkdown(self, md, md_globals):%0A del md.inlinePatterns%5B'image_link'%5D%0A del md.inlinePatterns%5B'image_reference'%5D%0A%0A
# We nee
@@ -1152,16 +1152,27 @@
'nl2br'
+, Bugdown()
%5D,%0A
|
e4d32def2ef91518198e6a500908ea3839c43257
|
Fix typo
|
cairis/data/DimensionDAO.py
|
cairis/data/DimensionDAO.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import *
from cairis.daemon.CairisHTTPError import CairisHTTPError, ARMHTTPError
from cairis.data.CairisDAO import CairisDAO
from cairis.core.MySQLDatabaseProxy import MySQLDatabaseProxy
from http.client import BAD_REQUEST, NOT_FOUND
__author__ = 'Shamal Faily'
class DimensionDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id)
def getDimensions(self,table,id):
try:
permissableDimensions = ['access_right', 'architectural_pattern', 'asset', 'asset_reference', 'asset_type','attacker','attacker_reference', 'behavioural_variable', 'capability','characteristic_synopsis', 'component', 'concept_reference','connector', 'countermeasure' 'countermeasure_reference', 'countermeasure_value', 'datastore', 'detection_mechanism', 'dfd_filter', 'document_reference', 'domainproperty','domainproperty_reference', 'entity','environment', 'environment_reference','external_document', 'goal', 'goal_reference','goal_category_type','interface','likelihood','locations','misusability_case','misusecase','misusecase_reference','motivation','obstacle','obstacle_category_type','obstacle_reference','persona','persona_characteristic','persona_characteristic_synopsis','persona_implied_process','persona_reference','persona_type','priority_type', 'privilege', 'protocol', 'reference_synopsis','requirement', 'requirement_reference', 'requirement_type','response', 'response_reference', 'risk', 'risk_class','risk_reference','role', 'role_reference', 'role_type', 'securitypattern','severity', 'surface_type', 'task', 'task_characteristic', 'task_reference','template_asset', 'template_goal', 'template_requirement','trace_dimension','threat', 'threat_reference','threat_type', 'threat_value', 'usecase', 'vulnerability','vulnerability_reference', 'vulnerability_type']
if (table not in permissableDimensions):
raise CairisHTTPError(BAD_REQUEST,'Invalid dimension',table + ' is not a permissable dimension')
if (table == 'persona_characteristic_synopsis'):
return self.getDimensionNames(table,'')
else:
return sorted(self.db_proxy.getDimensions(table,id).keys())
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
def getDimensionNames(self,table,environment):
try:
permissableDimensions = ['asset','asset_value','attacker','countermeasure','datastore','detection_mechanism','dfd_filter','entity','goal','misusecase','obstacle','persona', 'requirement','response','risk','role','task','threat','usecase', 'vulnerability']
if (table not in permissableDimensions):
raise CairisHTTPError(BAD_REQUEST,'Invalid dimension',table + ' is not a permissable dimension when specifying environment')
if (self.db_proxy.nameExists(environment,'environment') == False):
raise CairisHTTPError(NOT_FOUND,'Unknown environment',environment + ' does not exist')
return self.db_proxy.getDimensionNames(table,environment)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
|
Python
| 0.999999
|
@@ -1503,17 +1503,17 @@
measure'
-
+,
'counter
|
4c1d0877fabf3f95c488e58d2460a9ca2330b3eb
|
Support --host for devserver
|
zeus/cli/devserver.py
|
zeus/cli/devserver.py
|
import click
import os
import socket
import sys
from subprocess import list2cmdline
from honcho.manager import Manager
from .base import cli
DEFAULT_HOST_NAME = socket.gethostname().split(".", 1)[0].lower()
@cli.command()
@click.option("--environment", default="development", help="The environment name.")
@click.option("--workers/--no-workers", default=False)
@click.option("--port", "-p", default=8080)
@click.option("--ngrok/--no-ngrok", default=False)
@click.option("--ngrok-domain", default="zeus-{}".format(DEFAULT_HOST_NAME))
@click.option("--pubsub/--no-pubsub", default=True)
@click.option("--pubsub-port", default=8090)
def devserver(environment, workers, port, ngrok, ngrok_domain, pubsub, pubsub_port):
os.environ.setdefault("FLASK_DEBUG", "1")
os.environ["NODE_ENV"] = environment
if pubsub:
os.environ["PUBSUB_ENDPOINT"] = "http://localhost:{}".format(pubsub_port)
if ngrok:
root_url = "https://{}.ngrok.io".format(ngrok_domain)
os.environ["SSL"] = "1"
os.environ["SERVER_NAME"] = "{}.ngrok.io".format(ngrok_domain)
else:
root_url = "http://localhost:{}".format(port)
click.echo("Launching Zeus on {}".format(root_url))
# TODO(dcramer): pass required attributes to 'run' directly instead
# of relying on FLASK_DEBUG
daemons = [
("web", ["zeus", "run", "--port={}".format(port)]),
(
"webpack",
[
"node_modules/.bin/webpack",
"--watch",
"--config=config/webpack.config.js",
],
),
]
if pubsub:
daemons.append(("pubsub", ["zeus", "pubsub", "--port={}".format(pubsub_port)]))
if workers:
daemons.append(("worker", ["zeus", "worker", "--cron", "--log-level=INFO"]))
if ngrok:
daemons.append(
(
"ngrok",
["ngrok", "http", "-subdomain={}".format(ngrok_domain), str(port)],
)
)
cwd = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
)
manager = Manager()
for name, cmd in daemons:
manager.add_process(name, list2cmdline(cmd), quiet=False, cwd=cwd)
manager.loop()
sys.exit(manager.returncode)
|
Python
| 0
|
@@ -351,32 +351,83 @@
default=False)%0A
+@click.option(%22--host%22, %22-h%22, default=%22127.0.0.1%22)%0A
@click.option(%22-
@@ -693,16 +693,21 @@
vserver(
+%0A
environm
@@ -719,16 +719,22 @@
workers,
+ host,
port, n
@@ -772,16 +772,17 @@
sub_port
+%0A
):%0A o
@@ -1173,33 +1173,26 @@
l = %22http://
-localhost
+%7B%7D
:%7B%7D%22.format(
@@ -1187,24 +1187,30 @@
:%7B%7D%22.format(
+host,
port)%0A%0A c
@@ -1412,16 +1412,42 @@
, %22run%22,
+ %22--host=%7B%7D%22.format(host),
%22--port
@@ -1713,71 +1713,265 @@
end(
-(%22pubsub%22, %5B%22zeus%22, %22pubsub%22, %22--port=%7B%7D%22.format(pubsub_port)%5D)
+%0A (%0A %22pubsub%22,%0A %5B%0A %22zeus%22,%0A %22pubsub%22,%0A %22--host=%7B%7D%22.format(host),%0A %22--port=%7B%7D%22.format(pubsub_port),%0A %5D,%0A )%0A
)%0A%0A
@@ -2070,16 +2070,17 @@
NFO%22%5D))%0A
+%0A
if n
|
6c69c58761acf2fdd20df656f60f1e94203b5072
|
stop useless print
|
aiy-voice/assistant_library_with_local_commands_demo.py
|
aiy-voice/assistant_library_with_local_commands_demo.py
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Strube: add youtube and vlc
# pip3 install youtube_dl
# pip3 install python_vlc
# Volume control: sudo apt-get install python3-alsaaudio
"""Run a recognizer using the Google Assistant Library.
The Google Assistant Library has direct access to the audio API, so this Python
code doesn't need to record audio. Hot word detection "OK, Google" is supported.
It is available for Raspberry Pi 2/3 only; Pi Zero is not supported.
"""
import logging
import platform
import subprocess
import sys
import re
from google.assistant.library.event import EventType
from aiy.assistant import auth_helpers
from aiy.assistant.library import Assistant
from aiy.board import Board, Led
from aiy.voice import tts
import vlc
import youtube_dl
import alsaaudio
ydl_opts = {
'default_search': 'ytsearch1:',
'format': 'bestaudio/best',
'noplaylist': True,
'quiet': True
}
vlc_instance = vlc.get_default_instance()
vlc_player = vlc_instance.media_player_new()
def power_off_pi():
tts.say("Good bye, but I won\'t turn it off!")
#subprocess.call('sudo shutdown now', shell=True)
def reboot_pi():
tts.say('See you in a bit, but no way I will reboot!')
#subprocess.call('sudo reboot', shell=True)
def say_ip():
ip_address = subprocess.check_output("hostname -I | cut -d' ' -f1", shell=True)
tts.say('My IP address is %s' % ip_address.decode('utf-8'))
def play_music(name):
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
meta = ydl.extract_info(name, download=False)
except Exception:
tts.say('Sorry, I can\'t find that song.')
return
if meta:
info = meta['entries'][0]
vlc_player.set_media(vlc_instance.media_new(info['url']))
tts.say('Playing ' + re.sub(r'[^\s\w]', '', info['title']))
vlc_player.play()
def lower_volume():
m = alsaaudio.Mixer()
current_volume = m.getvolume()
print("Current volume is ",current_volume)
next_volume=(current_volume[0]-25) if current_volume[0] > 30 else 0
m.setvolume(next_volume)
def raise_volume():
m = alsaaudio.Mixer()
current_volume = m.getvolume()
print("Current volume is ", current_volume)
next_volume=current_volume[0]+25 if current_volume[0] < 70 else 100
print("Next volume is ",next_volume)
m.setvolume(next_volume)
def process_event(assistant, led, event):
logging.info(event)
if event.type == EventType.ON_START_FINISHED:
led.state = Led.BEACON_DARK # Ready.
print('Say "OK, Google" then speak, or press Ctrl+C to quit...')
elif event.type == EventType.ON_CONVERSATION_TURN_STARTED:
led.state = Led.ON # Listening.
elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args:
print('You said:', event.args['text'])
text = event.args['text'].lower()
if text == 'power off':
assistant.stop_conversation()
power_off_pi()
elif text == 'reboot':
assistant.stop_conversation()
reboot_pi()
elif text == 'ip address' or text == 'my ip':
assistant.stop_conversation()
say_ip()
elif text == 'pause':
assistant.stop_conversation()
vlc_player.set_pause(True)
elif text == 'stop':
assistant.stop_conversation()
vlc_player.stop()
elif text == 'resume':
assistant.stop_conversation()
vlc_player.set_pause(False)
elif text.startswith('play '):
assistant.stop_conversation()
play_music(text[5:])
elif text == 'raise volume' or text == 'louder':
assistant.stop_conversation()
raise_volume()
elif text == 'lower volume' or text == 'quieter':
assistant.stop_conversation()
lower_volume()
elif event.type == EventType.ON_END_OF_UTTERANCE:
led.state = Led.PULSE_QUICK # Thinking.
elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
or event.type == EventType.ON_NO_RESPONSE):
led.state = Led.BEACON_DARK # Ready.
elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']:
sys.exit(1)
def main():
logging.basicConfig(level=logging.INFO)
credentials = auth_helpers.get_assistant_credentials()
with Board() as board, Assistant(credentials) as assistant:
for event in assistant.start():
process_event(assistant, board.led, event)
if __name__ == '__main__':
main()
|
Python
| 0.000006
|
@@ -16,16 +16,41 @@
python3%0A
+%0A# TODO: button control%0A%0A
# Copyri
@@ -2510,32 +2510,33 @@
getvolume()%0A
+#
print(%22Current v
@@ -2628,10 +2628,10 @@
%5D %3E
-30
+25
els
@@ -2745,24 +2745,25 @@
olume()%0A
+#
print(%22Curre
@@ -2855,17 +2855,17 @@
e%5B0%5D %3C 7
-0
+5
else 10
@@ -2866,24 +2866,25 @@
lse 100%0A
+#
print(%22Next
|
1748d039feb40ddb1ceef0cf2f7b49270d0aae6e
|
Change to support Python3
|
carmen/resolvers/profile.py
|
carmen/resolvers/profile.py
|
"""Resolvers based on Twitter user profile data."""
import re
import warnings
from ..names import *
from ..resolver import AbstractResolver, register
STATE_RE = re.compile(r'.+,\s*(\w+)')
NORMALIZATION_RE = re.compile(r'\s+|\W')
def normalize(location_name, preserve_commas=False):
"""Normalize *location_name* by stripping punctuation and collapsing
runs of whitespace, and return the normalized name."""
def replace(match):
if preserve_commas and ',' in match.group(0):
return ','
return ' '
return NORMALIZATION_RE.sub(replace, location_name).strip().lower()
@register('profile')
class ProfileResolver(AbstractResolver):
"""A resolver that locates a tweet by matching the tweet author's
profile location against known locations."""
name = 'profile'
def __init__(self):
self.location_name_to_location = {}
def add_location(self, location):
aliases = list(location.aliases)
aliases_already_added = set()
for alias in aliases:
if alias in aliases_already_added:
continue
if alias in self.location_name_to_location:
warnings.warn('Duplicate location name "%s"' % alias)
else:
self.location_name_to_location[alias] = location
# Additionally add a normalized version of the alias
# stripped of punctuation, and with runs of whitespace
# reduced to single spaces.
normalized = normalize(alias)
if normalized != alias:
aliases.append(normalized)
aliases_already_added.add(alias)
def resolve_tweet(self, tweet):
location_string = tweet.get('user', {}).get('location', '')
if not location_string:
return None
normalized = normalize(location_string)
if normalized in self.location_name_to_location:
return (False, self.location_name_to_location[normalized])
# Try again with commas.
normalized = normalize(location_string, preserve_commas=True)
match = STATE_RE.search(normalized)
if match:
after_comma = match.group(1)
location_name = None
if after_comma in US_STATES or after_comma in COUNTRIES:
location_name = after_comma
elif after_comma in US_STATE_ABBREVIATIONS:
location_name = US_STATE_ABBREVIATIONS[after_comma]
elif after_comma in COUNTRY_CODES:
location_name = COUNTRY_CODES[after_comma]
if location_name in self.location_name_to_location:
return (False, self.location_name_to_location[location_name])
return None
|
Python
| 0
|
@@ -1684,16 +1684,35 @@
tweet):%0A
+ import sys%0A
@@ -1771,16 +1771,127 @@
n', '')%0A
+ if sys.version_info%5B0%5D %3C 3:%0A location_string = location_string.encode('utf-8')%0A %0A
@@ -1934,24 +1934,25 @@
return None%0A
+%0A
norm
@@ -1987,16 +1987,17 @@
string)%0A
+%0A
|
d9b7be65aae78a76454cae4f1f75029f1fa5084b
|
rename mapper to mapfxn to avoid confusion with mrjob.MRJob.mapper()
|
specializers/ftdock/cloud_dock.py
|
specializers/ftdock/cloud_dock.py
|
from mrjob.protocol import PickleProtocol as protocol
from asp.jit import mapreduce_support as mr
import cPickle as pickle
class FtdockMRJob(mr.AspMRJob):
DEFAULT_INPUT_PROTOCOL = 'pickle'
DEFAULT_PROTOCOL = 'pickle'
def configure_options(self):
super(mr.AspMRJob, self).configure_options()
self.add_file_option('--ftdockargs')
def job_runner_kwargs(self):
config = super(mr.AspMRJob, self).job_runner_kwargs()
config['file_upload_args'] += [('--ftdockargs', "/Users/driscoll/sejits/cloud_ftdock/pickled_args")]
config['cmdenv']['PYTHONPATH'] = "/Users/driscoll/sejits/asp:/Users/driscoll/sejits/ftdock_v2.0"
return config
def mapper(self, key, value):
"""
Each mapper executes ftdock for a combination (qi, qj, qk)
"""
from ftdock_main import ftdock
arguments = pickle.load(open('pickled_args'))
geometry_res = ftdock(key[0], key[1], key[2], *arguments)
yield 1, geometry_res
def reducer(self, key, values):
"""
The reducer just emits the list of geometries
"""
result = []
for temp in values:
result.append(temp)
yield 1, result
class AllCombMap(object):
def __init__(self, lists_to_combine, *ftdock_args):
self._lists_to_combine = lists_to_combine
self._ftdock_args = ftdock_args
def execute(self, nproc=1):
cloud_flag = True
mapper = self.ftdock_using_mapreduce if cloud_flag else self.ftdock_classic
return mapper(self._lists_to_combine, self._ftdock_args)
def ftdock_using_mapreduce(self, lists_to_combine, ftdock_args):
"""
Perform docking experiment using MapReduce
"""
print "Map-Reduce execution"
# Dump the ftdock_args in a file
pickle.dump(ftdock_args, open('pickled_args','w'))
# Add a map task for each point in the search space
import itertools
task_args = [protocol.write(x, "") for x in itertools.product(*lists_to_combine)]
import asp.jit.asp_module as asp_module
mod = asp_module.ASPModule(use_mapreduce=True)
mod.add_mr_function("ftdock_mr", FtdockMRJob)
kv_pairs = mod.ftdock_mr(task_args)
return kv_pairs[0][1]
def ftdock_classic(self, lists_to_combine, ftdock_args):
"""
Perform docking experiment using AllCombMap
"""
raise NotImplementedError
"""
print "Classic execution"
geometry_list = AllCombMap(lists_to_combine, ftdock, *ftdock_args).execute(nproc=2)
return geometry_list
"""
# this appears to be necessary because this script will be called as __main__ on
# every worker node.
if __name__ == '__main__':
FtdockMRJob().run()
|
Python
| 0
|
@@ -1479,19 +1479,19 @@
map
-per
+fxn
= self.
@@ -1566,27 +1566,27 @@
return map
-per
+fxn
(self._lists
|
a5f092b81233db019df44c46b25b320cfaca2730
|
Add world_chunk_update event, emitted for every single chunk
|
spockbot/plugins/helpers/world.py
|
spockbot/plugins/helpers/world.py
|
"""
Provides a very raw (but very fast) world map for use by plugins. Plugins
interested in a more comprehensive world map view can use mcp.mapdata to
interpret blocks and their metadata more comprehensively. Planned to provide
light level interpretation based on sky light and time of day
"""
from spockbot.mcdata import constants as const
from spockbot.plugins.base import PluginBase, pl_announce
from spockbot.plugins.tools import smpmap
from spockbot.vector import Vector3
class WorldData(smpmap.Dimension):
def __init__(self, dimension=const.SMP_OVERWORLD):
super(WorldData, self).__init__(dimension)
self.age = 0
self.time_of_day = 0
def update_time(self, data):
self.age = data['world_age']
self.time_of_day = data['time_of_day']
def new_dimension(self, dimension):
super(WorldData, self).__init__(dimension)
def reset(self):
self.__init__(self.dimension)
@pl_announce('World')
class WorldPlugin(PluginBase):
requires = 'Event'
events = {
'PLAY<Join Game': 'handle_new_dimension',
'PLAY<Respawn': 'handle_new_dimension',
'PLAY<Time Update': 'handle_time_update',
'PLAY<Chunk Data': 'handle_chunk_data',
'PLAY<Multi Block Change': 'handle_multi_block_change',
'PLAY<Block Change': 'handle_block_change',
'PLAY<Map Chunk Bulk': 'handle_map_chunk_bulk',
'PLAY<Update Sign': 'handle_update_sign',
'PLAY<Update Block Entity': 'handle_update_block_entity',
'net_disconnect': 'handle_disconnect',
}
def __init__(self, ploader, settings):
super(WorldPlugin, self).__init__(ploader, settings)
self.world = WorldData()
ploader.provides('World', self.world)
def handle_time_update(self, name, packet):
"""Time Update - Update World Time"""
self.world.update_time(packet.data)
self.event.emit('world_time_update', packet.data)
def handle_new_dimension(self, name, packet):
"""Join Game/Respawn - New Dimension"""
self.world.new_dimension(packet.data['dimension'])
self.event.emit('world_new_dimension', packet.data['dimension'])
def handle_chunk_data(self, name, packet):
"""Chunk Data - Update World state"""
self.world.unpack_column(packet.data)
def handle_multi_block_change(self, name, packet):
"""Multi Block Change - Update multiple blocks"""
chunk_x = packet.data['chunk_x'] * 16
chunk_z = packet.data['chunk_z'] * 16
for block in packet.data['blocks']:
x = block['x'] + chunk_x
z = block['z'] + chunk_z
y = block['y']
old_data = self.world.set_block(x, y, z, data=block['block_data'])
self.event.emit('world_block_update', {
'location': {
'x': x,
'y': y,
'z': z,
},
'block_data': block['block_data'],
'old_data': old_data,
})
def handle_block_change(self, name, packet):
"""Block Change - Update a single block"""
p = packet.data['location']
block_data = packet.data['block_data']
old_data = self.world.set_block(p['x'], p['y'], p['z'], data=block_data)
self.event.emit('world_block_update', {
'location': p,
'block_data': block_data,
'old_data': old_data,
})
def handle_map_chunk_bulk(self, name, packet):
"""Map Chunk Bulk - Update World state"""
self.world.unpack_bulk(packet.data)
def handle_update_sign(self, event, packet):
location = Vector3(packet.data['location'])
sign_data = smpmap.SignData(packet.data)
old_data = self.world.set_block_entity_data(location, data=sign_data)
self.event.emit('world_block_entity_data', {
'location': location,
'data': sign_data,
'old_data': old_data,
})
def handle_update_block_entity(self, event, packet):
location = Vector3(packet.data['location'])
block_entity_class = smpmap.block_entities[packet.data['action']]
data = block_entity_class(packet.data['nbt'])
old_data = self.world.set_block_entity_data(location, data=data)
self.event.emit('world_block_entity_data', {
'location': location,
'data': data,
'old_data': old_data,
})
def handle_disconnect(self, name, data):
self.world.reset()
self.event.emit('world_reset')
|
Python
| 0.000007
|
@@ -2307,32 +2307,168 @@
umn(packet.data)
+%0A location = packet.data%5B'chunk_x'%5D, packet.data%5B'chunk_z'%5D%0A self.event.emit('world_chunk_update', %7B'location': location%7D)
%0A%0A def handle
@@ -3731,32 +3731,207 @@
ulk(packet.data)
+%0A for meta in packet.data%5B'metadata'%5D:%0A location = meta%5B'chunk_x'%5D, meta%5B'chunk_z'%5D%0A self.event.emit('world_chunk_update', %7B'location': location%7D)
%0A%0A def handle
|
a0ac12676f3b716ad241a28f4e1725c0d3be5eed
|
update squad5
|
dictlearn/extractive_qa_configs.py
|
dictlearn/extractive_qa_configs.py
|
from dictlearn.config_registry import ConfigRegistry
qa_config_registry = ConfigRegistry()
qa_config_registry.set_root_config({
# data
'data_path' : "",
'dict_path' : "",
'vocab_path' : "",
'dict_vocab_path' : "",
'embedding_path' : "",
'layout' : 'standard',
'num_input_words' : 10000,
'def_num_input_words' : 0,
'max_length' : 100,
'batch_size' : 32,
'batch_size_valid' : 32,
'max_def_length' : 1000,
'exclude_top_k' : 0,
# model
'def_reader' : 'LSTMReadDefinitions',
'dim' : 128,
'emb_dim' : 0,
'readout_dims' : [],
'coattention' : True,
'learning_rate' : 0.001,
'momentum' : 0.9,
'grad_clip_threshold' : 5.0,
'dropout' : 0.,
'random_unk' : False,
'def_word_gating' : "none",
'compose_type' : "sum",
'reuse_word_embeddings' : False,
'train_only_def_part' : False,
# monitoring and checkpointing
'mon_freq_train' : 10,
'save_freq_batches' : 1000,
'save_freq_epochs' : 1,
'n_batches' : 0,
'monitor_parameters' : False
})
c = qa_config_registry['root']
c['data_path'] = 'squad/squad_from_scratch'
c['layout'] = 'squad'
c['mon_freq_train'] = 100
c['grad_clip_threshold'] = 50.
qa_config_registry['squad'] = c
c = qa_config_registry['squad']
c['data_path'] = 'squad/squad_glove'
c['emb_dim'] = 300
c['embedding_path'] = 'squad/squad_glove/glove_w_specials.npy'
c['num_input_words'] = 0
qa_config_registry['squad_glove'] = c
def from1to2(c):
c['batch_size'] = 128
c['batch_size_valid'] = 128
c['dim'] = 200
return c
c = qa_config_registry['squad']
from1to2(c)
qa_config_registry['squad2'] = c
c = qa_config_registry['squad_glove']
from1to2(c)
qa_config_registry['squad_glove2'] = c
def from2to3(c):
c['max_def_length'] = 30
c['exclude_top_k'] = 10000
c['dict_path'] = 'squad/squad_from_scratch/dict.json'
return c
c = qa_config_registry['squad2']
from2to3(c)
qa_config_registry['squad3'] = c
c = qa_config_registry['squad_glove2']
from2to3(c)
qa_config_registry['squad_glove3'] = c
def from3to4(c):
c['num_input_words'] = 3000
c['exclude_top_k'] = 3000
c['emb_dim'] = 300
c['reuse_word_embeddings'] = True
c['compose_type'] = 'transform_and_sum'
c['dict_path'] = 'squad/squad_from_scratch/dict2.json'
return c
c = qa_config_registry['squad3']
from3to4(c)
qa_config_registry['squad4'] = c
c = qa_config_registry['squad_glove3']
from3to4(c)
qa_config_registry['squad_glove4'] = c
def from4to5(c):
c['dict_path'] = 'squad/squad_from_scratch/dict_wordnet3.1.json'
return c
qa_config_registry['squad5'] = from4to5(qa_config_registry['squad4'])
qa_config_registry['squad_glove5'] = from4to5(qa_config_registry['squad_glove4'])
|
Python
| 0
|
@@ -2565,16 +2565,41 @@
et3.
-1
+2
.json'%0A
+ c%5B'batch_size'%5D = 32%0A
|
c8a280d6466623b8d76fa01c12ebf295151d35d6
|
remove primary key constraint
|
wim-adaptor/vtn-api/database/sqlalchemy_declaritive.py
|
wim-adaptor/vtn-api/database/sqlalchemy_declaritive.py
|
import os
import sys
from sqlalchemy import create_engine, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Connectivity(Base):
__tablename__ = 'connectivity'
# define the columns for the table
segment = Column(String(250),nullable=False)
bridge_name = Column(String(250), nullable=False)
port_id = Column(String(250),primary_key=True)
location = Column(String(250))
# Create engine that stores data in the local directory's
engine = create_engine('sqlite:///wim_info.db')
# Create the Table
Base.metadata.create_all(engine)
|
Python
| 0.000384
|
@@ -441,25 +441,8 @@
250)
-,primary_key=True
)%0A%09l
|
440f42a062e30ce8953c7a1a69aea2e6f62248f8
|
Allow 2016 in boilerplate
|
hack/boilerplate/boilerplate.py
|
hack/boilerplate/boilerplate.py
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
def get_refs():
refs = {}
for path in glob.glob(os.path.join(rootdir, "hack/boilerplate/boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
extension = file_extension(filename)
ref = refs[extension]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_output', '.git']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
extension = file_extension(pathname)
if extension in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014 or 2015, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.00043
|
@@ -2239,16 +2239,21 @@
regex %22
+2016%7C
2015%7C201
@@ -4198,16 +4198,21 @@
014%7C2015
+%7C2016
)' )%0A
|
f984db30c4d4cab1377d21a73ec0b802590f8a51
|
Update sqlalchemy migrate scripts for postgres
|
trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py
|
trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import Integer
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
#modify column
instances.c.flavor_id.alter(type=Integer())
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# modify column:
instances = Table('instances', meta, autoload=True)
instances.c.flavor_id.alter(type=String(36))
|
Python
| 0.000001
|
@@ -903,24 +903,489 @@
rate_engine%0A
+ # pgsql %3C= 8.3 was lax about char-%3Eother casting but this was tightened up%0A # in 8.4+. We now have to specify the USING clause for the cast to succeed.%0A # NB: The generated sqlalchemy query doesn't support this, so this override%0A # is needed.%0A if migrate_engine.name == 'postgresql':%0A migrate_engine.execute('ALTER TABLE instances ALTER COLUMN flavor_id '%0A 'TYPE INTEGER USING flavor_id::integer')%0A else:%0A
instance
@@ -1424,24 +1424,28 @@
oload=True)%0A
+
#modify
@@ -1451,16 +1451,20 @@
column%0A
+
inst
@@ -1585,24 +1585,134 @@
rate_engine%0A
+ # int-%3Echar casts in pgsql still work fine without any USING clause,%0A # so downgrade is not affected.%0A
# modify
|
dbe1ac7fda9188e59479ff4716141651d627f76c
|
Fix cheroot.test.test_errors doc spelling
|
cheroot/test/test_errors.py
|
cheroot/test/test_errors.py
|
"""Test suite for ``cheroot.errors``."""
import pytest
from cheroot import errors
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS
@pytest.mark.parametrize(
'err_names,err_nums',
(
(('', 'some-nonsense-name'), []),
(
(
'EPROTOTYPE', 'EAGAIN', 'EWOULDBLOCK',
'WSAEWOULDBLOCK', 'EPIPE',
),
(91, 11, 32) if IS_LINUX else
(32, 35, 41) if IS_MACOS else
(32, 10041, 11, 10035) if IS_WINDOWS else
(),
),
),
)
def test_plat_specific_errors(err_names, err_nums):
"""Test that plat_specific_errors retrieves correct err num list."""
actual_err_nums = errors.plat_specific_errors(*err_names)
assert len(actual_err_nums) == len(err_nums)
assert sorted(actual_err_nums) == sorted(err_nums)
|
Python
| 0.000442
|
@@ -613,16 +613,18 @@
st that
+%60%60
plat_spe
@@ -639,17 +639,14 @@
rors
- retrieve
+%60%60 get
s co
@@ -658,12 +658,18 @@
err
+or
num
+bers
lis
|
423a15d7c8841b40bddbd129b2abfb1135f0b7c0
|
fix date parsing in logsearch
|
scripts/logfetch/search.py
|
scripts/logfetch/search.py
|
import os
import re
import sys
import fnmatch
import logfetch_base
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
if args.verbose:
sys.stderr.write(colored('Including log {0}\n'.format(filename), 'magenta'))
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
if args.verbose:
sys.stderr.write(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'))
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"\d{13}", filename)
if timestamps:
return logfetch_base.is_in_date_range(args, int(str(timestamps[-1])[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId)
|
Python
| 0.000005
|
@@ -765,14 +765,16 @@
l(r%22
+-
%5Cd%7B13%7D
+-
%22, f
@@ -876,16 +876,33 @@
mps%5B-1%5D)
+.replace(%22-%22, %22%22)
%5B0:-3%5D))
|
aa0a133748b958317047e84a1b1b484ede7fa7c3
|
Fix unicode in json_job.py urls.
|
scripts/master/json_job.py
|
scripts/master/json_job.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import urllib
from buildbot.changes.base import PollingChangeSource
from buildbot.process.properties import Properties
from buildbot.schedulers.trysched import TryBase
from twisted.internet import defer
from twisted.python import log
from twisted.web import client
from master import get_password as get_pw
_DEFAULT_POLLING_INTERVAL = 10 # seconds
class JsonPoller(PollingChangeSource):
"""Polls a url for JSON blobs."""
def __init__(self, url, password=None, interval=_DEFAULT_POLLING_INTERVAL):
"""
Args:
url: Url used to retrieve json blobs describing jobs.
password: The password to use to authenticate to the url.
interval: Interval used to poll the url, in seconds.
"""
# Set the interval used by base PollingChangeSource
self.pollInterval = interval
# The url that the poller will poll.
self._url = url + '/pull'
self._password = password
# The parent scheduler that is using this poller.
self._scheduler = None
def setServiceParent(self, parent):
PollingChangeSource.setServiceParent(self, parent)
self._scheduler = parent
def poll(self):
"""Polls the url for any job JSON blobs and submits them.
Override of PollingChangeSource base method.
Returns:
A deferred object to be called once the polling completes.
"""
log.msg('JsonPoller.poll')
if self._password:
postdata = urllib.urlencode({'password': self._password})
else:
postdata = ''
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
d = client.getPage(self._url, method='POST',
postdata=postdata, headers=headers)
d.addCallback(self._handleJobs)
d.addErrback(log.err, 'error in JsonPoller.poll')
return d
def _handleJobs(self, blob):
log.msg('JsonScheduler handling blob: %s' % blob)
if not blob:
return
jobs = json.loads(blob)
return self._scheduler.submitJobs(jobs)
class JsonScheduler(TryBase):
"""A scheduler to spawn jobs based on JSON blobs retrieved from a url."""
compare_attrs = TryBase.compare_attrs + ('url',)
def __init__(self, name, builders, url, properties=None):
"""
Args:
name: The name of this scheduler, for buildbot indexing.
builders: The names of the builders it can schedule jobs for.
url: The url to poll for new jobs.
properties: Key-value pairs to be added to every job description.
"""
TryBase.__init__(self, name, builders, properties or {})
# The password to use for authentication.
self._password = get_pw.Password('.jobqueue_password').MaybeGetPassword()
# The poller instance that will be sending us jobs.
self._poller = JsonPoller(url, self._password,
interval=_DEFAULT_POLLING_INTERVAL)
# The url to which the scheduler posts that it started the job.
self._url = url + '/accept/%s'
def gotChange(self, _change, _important): # pylint: disable=R0201
log.msg('ERROR: gotChange was unexpectedly called.')
@defer.inlineCallbacks
def submitJobs(self, jobs):
log.msg('JsonScheduler.submitJobs')
for job in jobs:
d = self._createSourcestamp(job)
d.addCallback(self._createBuildset, job)
d.addCallback(self._acceptJob, job)
d.addErrback(log.err, "Failed to queue a job!")
log.msg(d)
yield d
def _createSourcestamp(self, job):
log.msg('JsonScheduler adding sourcestamp: %s' % job)
return self.master.db.sourcestamps.addSourceStamp(
project=job.get('project', ''),
repository=job.get('repository', ''),
branch=job.get('branch', ''),
revision=job.get('revision', ''))
def _createBuildset(self, ssid, job):
log.msg('JsonScheduler adding buildset: %s' % ssid)
properties = Properties()
properties.update(job, 'Job JSON')
return self.addBuildsetForSourceStamp(ssid,
builderNames=job.get('buildername', None),
reason=job.get('reason', 'Job from JsonScheduler'),
properties=properties)
def _acceptJob(self, _, job):
log.msg('JsonScheduler accepting job: %s' % job['job_key'])
if self._password:
postdata = urllib.urlencode({'password': self._password})
else:
postdata = ''
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return client.getPage(self._url % job['job_key'], method='POST',
postdata=postdata, headers=headers)
def setServiceParent(self, parent):
TryBase.setServiceParent(self, parent)
self._poller.setServiceParent(self)
|
Python
| 0.999999
|
@@ -1032,24 +1032,36 @@
f._url = url
+.rstrip('/')
+ '/pull'%0A
@@ -3065,16 +3065,28 @@
rl = url
+.rstrip('/')
+ '/acc
@@ -4501,24 +4501,93 @@
rlencoded'%7D%0A
+ # We are guaranteed job_key is a str, but json makes it unicode.%0A
return c
@@ -4612,16 +4612,20 @@
._url %25
+str(
job%5B'job
@@ -4630,16 +4630,17 @@
ob_key'%5D
+)
, method
|
fc1d468d6602022405d4959ea8d12c825a1916f0
|
Add AuthToken model
|
passwordless/models.py
|
passwordless/models.py
|
from django.db import models
# Create your models here.
class User(models.Model):
"""
User model
This User model eschews passwords, relying instead on emailed OTP tokens.
"""
username = models.CharField(max_length=30, unique=True)
email = models.EmailField(null=True)
is_active = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
@property
def is_authenticated(self):
#Used to tell authenticated Users from anonymous ones
return True
@property
def is_anonymous(self):
#This is not an anonymous user
return False
def __str__(self):
return self.username
class AnonymousUser:
"""
An object to represent an anonymous/unauthenticated user
"""
username = ''
email = None
is_active = False
is_superuser = False
date_joined = None
@property
def is_authenticated(self):
#Anonymous sessions are not authenticated
return False
@property
def is_anonymous(self):
return True
def __str__(self):
return "Anonymous User"
|
Python
| 0
|
@@ -3,30 +3,109 @@
om d
-jango.db import models
+atetime import timedelta%0Aimport uuid%0A%0A%0Afrom django.db import models%0Afrom django.utils import timezone
%0A%0A#
@@ -1259,8 +1259,752 @@
User%22%0A%0A
+%0Adef make_token():%0A %22%22%22%0A Generate a random token suitable for activation/confirmation via email%0A%0A A hex-encoded random UUID has plenty of entropy to be secure enough for our%0A needs.%0A %22%22%22%0A return uuid.uuid4().hex%0A%0A%0Aclass AuthToken(models.Model):%0A %22%22%22%0A OTP Token for passwordless authentication%0A %22%22%22%0A user = models.OneToOneField(User, primary_key=True)%0A token = models.CharField(max_length=40, default=make_token)%0A date_sent = models.DateTimeField(default=timezone.now)%0A%0A _expiration_hours = 24%0A%0A @property%0A def expiration_date(self):%0A return self.date_sent + timedelta(hours=self._expiration_hours)%0A%0A @property%0A def is_valid(self):%0A return self.expiration_date %3E= timezone.now()%0A%0A
|
d9e9e4e9cce22c608ed39b0db1d5edc7ae277332
|
Correct metaclass implementation
|
patchboard/resource.py
|
patchboard/resource.py
|
# resource.py
#
# Copyright 2014 BitVault.
from __future__ import print_function
import json
from exception import PatchboardError
class ResourceType(type):
"""A metaclass for resource classes."""
# Must override to supply default arguments
def __new__(cls, name, patchboard, definition, schema, mapping):
return type.__new__(cls, name, (Resource,), {})
def __init__(cls, name, patchboard, definition, schema, mapping):
super(ResourceType, cls).__init__(name, (Resource,), {})
setattr(cls, 'api', classmethod(lambda(self_): patchboard.api))
setattr(cls, 'schema', classmethod(lambda(self_): mapping))
if schema:
if u'properties' in schema:
for name, schema_def in schema[u'properties'].iteritems():
setattr(
cls,
name,
lambda(self): self.attributes[name])
if schema.get(u'additionalProperties', False) is not False:
# FIXME: doesn't take the block the ruby code does
def fn(self, name, *args):
if len(args) == 0:
return self.attributes[name]
else:
return super(cls, self).method_missing(name, *args)
setattr(cls, 'method_missing', fn)
setattr(
cls,
'generate_url',
classmethod(
lambda(self_, params): mapping.generate_url(params)))
for name, action in definition[u'actions'].iteritems():
# FIXME: create actions
# FIXME: implement correctly
setattr(cls, name, lambda(self): False)
class Resource(object):
"""Base class for resources"""
@classmethod
def decorate(cls, instance, attributes):
# TODO: non destructive decoration
# TODO: add some sort of validation for the input attributes.
if cls.schema and u'properties' in cls.schema:
context = instance.context
properties = cls.schema[u'properties']
for key, sub_schema in properties.iteritems():
if key not in attributes:
next
value = attributes[key]
mapping = cls.api.find_mapping(sub_schema)
if mapping:
if mapping.query:
# TODO: find a way to define this at runtime,
# not once for every instance.
def fn(self, params={}):
params[u'url'] = value[u'url']
url = mapping.generate_url(params)
return mapping.cls(context, {u'url': url})
setattr(instance, key, fn)
else:
attributes[key] = mapping.cls(context, value)
else:
attributes[key] = cls.api.decorate(
context,
sub_schema,
value)
return attributes
def __init__(self, context, attributes={}):
self.context = context
self.attributes = Resource.decorate(self, attributes)
self.url = self.attributes[u'url']
# TODO: implement
#def __str__(self):
def __len__(self):
return len(self.attributes)
def __getitem__(self, key):
return self.attributes[key]
def __setitem__(self, key, value):
self.attributes[key] = value
#def __delitem__(self, key):
# del self.attributes[key]
def __contains__(self, obj):
return (obj in self.attributes)
def curl(self):
raise PatchboardError(u"Resource.curl() not implemented")
def to_hash(self):
return self.attributes
def to_json(self):
return json.generate(self.attributes)
|
Python
| 0.000002
|
@@ -449,73 +449,8 @@
ng):
-%0A super(ResourceType, cls).__init__(name, (Resource,), %7B%7D)
%0A%0A
@@ -1655,16 +1655,112 @@
False)%0A%0A
+ # Must be called last%0A super(ResourceType, cls).__init__(name, (Resource,), %7B%7D)%0A%0A
%0Aclass R
|
5114bf3960b944c193c37ef8ecbcac50ae098d02
|
Add InvalidLengthError class
|
pathvalidate/_error.py
|
pathvalidate/_error.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
class NullNameError(ValueError):
"""
Raised when a name is empty.
"""
class InvalidCharError(ValueError):
"""
Raised when includes invalid character(s) within a string.
"""
|
Python
| 0
|
@@ -349,16 +349,113 @@
string.%0A %22%22%22%0A
+%0A%0Aclass InvalidLengthError(ValueError):%0A %22%22%22%0A Raised when a string too long/short.%0A %22%22%22%0A
|
c37591f23e33383e7b63525db243aba7e0a46fc3
|
add run in log file
|
scripts/parallel_driver.py
|
scripts/parallel_driver.py
|
#!/usr/bin/env python
"""
Usage example:
1. First realization per model
./parallel_driver.py -p my_Param_ENSO.py --mip cmip6 --modnames all --realization r1i1p1f1 --metricsCollection ENSO_perf
2. All realizations of individual models
./parallel_driver.py -p my_Param_ENSO.py --mip cmip6 --modnames all --realization all --metricsCollection ENSO_perf
"""
from __future__ import print_function
from argparse import RawTextHelpFormatter
from genutil import StringConstructor
from subprocess import Popen
from PMPdriver_lib import AddParserArgument
from PMPdriver_lib import sort_human
import datetime
import glob
import os
import pcmdi_metrics
import sys
import time
# To avoid below error
# OpenBLAS blas_thread_init: pthread_create failed for thread XX of 96: Resource temporarily unavailable
os.environ['OPENBLAS_NUM_THREADS'] = '1'
# Must be done before any CDAT library is called.
# https://github.com/CDAT/cdat/issues/2213
if 'UVCDAT_ANONYMOUS_LOG' not in os.environ:
os.environ['UVCDAT_ANONYMOUS_LOG'] = 'no'
# =================================================
# Collect user defined options
# -------------------------------------------------
param = AddParserArgument()
# Pre-defined options
mip = param.mip
exp = param.exp
print('mip:', mip)
print('exp:', exp)
# Path to model data as string template
modpath = param.process_templated_argument("modpath")
# Check given model option
models = param.modnames
print('models:', models)
# Include all models if conditioned
if ('all' in [m.lower() for m in models]) or (models == 'all'):
model_index_path = param.modpath.split('/')[-1].split('.').index("%(model)")
models = ([p.split('/')[-1].split('.')[model_index_path] for p in glob.glob(modpath(
mip=mip, exp=exp, model='*', realization='*', variable='ts'))])
# remove duplicates
models = sorted(list(dict.fromkeys(models)), key=lambda s: s.lower())
print('models:', models)
print('number of models:', len(models))
# Realizations
realization = param.realization
if ('all' in [r.lower() for r in realization]) or (realization == 'all'):
realization = '*'
print('realization: ', realization)
# Metrics Collection
mc_name = param.metricsCollection
# case id
case_id = param.case_id
print('case_id:', case_id)
# Output
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(str(outdir_template(
output_type='%(output_type)',
mip=mip, exp=exp, metricsCollection=mc_name, case_id=case_id)))
# Debug
debug = param.debug
print('debug:', debug)
# =================================================
# Create output directories
# -------------------------------------------------
for output_type in ['graphics', 'diagnostic_results', 'metrics_results']:
if not os.path.exists(outdir(output_type=output_type)):
os.makedirs(outdir(output_type=output_type))
print(outdir(output_type=output_type))
# =================================================
# Generates list of command
# -------------------------------------------------
param_file = './my_Param_ENSO.py'
cmds_list = []
for model in models:
print(' ----- model: ', model, ' ---------------------')
# Find all xmls for the given model
model_path_list = glob.glob(
#modpath(mip=mip, exp=exp, model=model, realization=realization, variable='ts'))
modpath(mip=mip, exp=exp, model=model, realization="*", variable='ts'))
# sort in nice way
model_path_list = sort_human(model_path_list)
#if debug:
# print('model_path_list:', model_path_list)
# Find where run can be gripped from given filename template for modpath
run_in_modpath = modpath(mip=mip, exp=exp, model=model, realization=realization,
variable='ts').split('/')[-1].split('.').index(realization)
# Collect available runs
runs_list = [model_path.split('/')[-1].split('.')[run_in_modpath] for model_path in model_path_list]
if debug:
print('runs_list (all):', runs_list)
# Check if given run member is included. If not for all runs and given run member is not included,
# take alternative run
if realization is not "*":
if realization in runs_list:
runs_list = [realization]
else:
runs_list = runs_list[0:1]
if debug:
print('runs_list (revised):', runs_list)
for run in runs_list:
cmd = ['python', 'PMPdriver_EnsoMetrics.py',
'-p', param_file,
'--mip', mip, '--metricsCollection', mc_name,
'--case_id', case_id,
'--modnames', model,
'--realization', run]
cmds_list.append(cmd)
# =================================================
# Run subprocesses in parallel
# -------------------------------------------------
# log dir
log_dir = os.path.join("log", case_id, mc_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# number of tasks to submit at the same time
num_workers = 8
#num_workers = 10
#num_workers = 30
#num_workers = 25
print("Start : %s" % time.ctime())
# submit tasks and wait for subset of tasks to complete
procs_list = []
for p, cmd in enumerate(cmds_list):
timenow = time.ctime()
print(timenow, p, ' '.join(cmd))
model = cmd[-3]
run = cmd[-1]
log_filename = '_'.join(['log_enso', mc_name, mip, exp, model, case_id])
log_file = os.path.join(log_dir, log_filename)
with open(log_file+"_stdout.txt", "wb") as out, open(log_file+"_stderr.txt", "wb") as err:
procs_list.append(Popen(cmd, stdout=out, stderr=err))
time.sleep(1)
if ((p > 0 and p % num_workers == 0) or (p == len(cmds_list)-1)):
print('wait...')
for proc in procs_list:
proc.wait()
print("Tasks end : %s" % time.ctime())
procs_list = []
# tasks done
print("End : %s" % time.ctime())
sys.exit('DONE')
|
Python
| 0.000001
|
@@ -5324,16 +5324,21 @@
, model,
+ run,
case_id
|
b479491e914c271a41ba92c958c6e3d42ccdb799
|
add get_followers to twitter api
|
polbotcheck/twitter_api.py
|
polbotcheck/twitter_api.py
|
import tweepy
import json
from keys import myauth
import pprint
import time
import db
auth = tweepy.OAuthHandler(myauth['consumer_key'], myauth['consumer_secret'])
auth.set_access_token(myauth['access_token'], myauth['access_token_secret'] )
api = tweepy.API(auth,wait_on_rate_limit=True, wait_on_rate_limit_notify=True,\
retry_count=3, retry_delay=5, retry_errors=set([401, 404, 500, 503]))
def limit_handled(cursor):
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print('Warning: Rate limit reached!' + timestamp)
time.sleep(15 * 60)
def get_tweets(screen_name):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
content = []
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id=screen_name, count=200).items()):
content.append(tweet.text)
retweets = get_retweets(tweet.id)
db.saveRetweets(tweet, retweets)
return content
# def get_all_retweeters(screen_name):
# timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
# print(timestamp)
# all_retweeters = []
# for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id=screen_name, count=200).items()):
# print(tweet.id)
# retweeters = get_retweets(tweet.id)
# # somehow get to retweeters
# # all_retweeters.append(retweeters_per_tweet)
# return all_retweeters
def get_retweets(tweet_id):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
content = []
for tweet in api.retweets(id=tweet_id, count=200):
content.append(tweet)
return content
if __name__ == "__main__":
# example to get list all tweets (text)
#tweets_save = True
name = '@malechanissen'
content = get_tweets(name)
# if tweets_save == True:
# with open('sample_tweets.json', 'w') as json_out:
# json.dump(content, json_out)
# print('samples have been saved')
# example get user_ids of who retweeted tweet with specific id
#retweeters_save = False
#status_id = '837968136074891264'
#retweets = get_retweets(status_id)
#print(retweets)
#
#db.saveRetweets()
#if retweeters_save == True:
# with open('retweeters.json', 'w') as json_out:
# json.dump(retweeters, json_out)
# example to get all retweeters associated with an user
# print(get_all_retweeters(screen_name=name))
|
Python
| 0
|
@@ -1783,16 +1783,322 @@
ontent%0A%0A
+def get_followers(screen_name):%0A timestamp = time.strftime(%22%25d.%25m.%25Y %25H:%25M:%25S%22, time.localtime())%0A print(timestamp)%0A followers =%5B%5D%0A for user in limit_handled(tweepy.Cursor(twitter_api.followers, screen_name=screen_name, count=200).items()):%0A followers.append(user)%0A return followers%0A%0A
%0Aif __na
|
b40e79d7f5884e5d4681159c9b6d52edbc5d8549
|
Fix another import
|
hiicart/gateway/amazon/views.py
|
hiicart/gateway/amazon/views.py
|
import logging
import pprint
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_view_exempt
from hiicart.gateway.amazon.ipn import AmazonIPN
from hiicart.gateway.countries import COUNTRIES
from hiicart.models import cart_by_uuid
from hiicart.utils import format_exceptions
log = logging.getLogger("hiicart.gateway.amazon")
def _find_cart(request_data):
# Subscription payments look like '<uuid>-4' so grab the uuid id
uuid = request_data["callerReference"][:36]
return cart_by_uuid(uuid)
@csrf_view_exempt
@format_exceptions
@never_cache
def cbui(request, settings=None):
"""
View used when the Co-Branded UI returns.
This view verifies that the CBUI returned successfully and
uses the provided authorization to initiate a Pay request.
"""
log.debug("CBUI Received: \n%s" % pprint.pformat(dict(request.GET), indent=10))
cart = _find_cart(request.GET)
handler = AmazonIPN(cart)
handler._update_with_cart_settings(cart_settings_kwargs={'request': request})
if not handler.verify_signature(request.GET.urlencode(), "GET", handler.settings["CBUI_RETURN_URL"]):
log.error("Validation of Amazon request failed!")
return HttpResponseRedirect(handler.settings.get("ERROR_RETURN_URL",
handler.settings.get("RETURN_URL", "/")))
if request.GET["status"] not in ("SA", "SB", "SC"):
log.error("CBUI unsuccessful. Status code: %s" % request.GET["status"])
return HttpResponseRedirect(handler.settings.get("CANCEL_RETURN_URL",
handler.settings.get("RETURN_URL", "/")))
if not cart:
log.error("Unable to find cart.")
return HttpResponseRedirect(handler.settings.get("ERROR_RETURN_URL",
handler.settings.get("RETURN_URL", "/")))
# Address collection. Any data already in cart is assumed correct
cart.bill_first_name = cart.bill_first_name or request.GET.get("billingName", "")
cart.ship_first_name = cart.ship_first_name or request.GET.get("addressName", "")
cart.bill_street1 = cart.bill_street1 or request.GET.get("addressLine1", "")
cart.ship_street1 = cart.ship_street1 or cart.bill_street1
cart.bill_street2 = cart.bill_street1 or request.GET.get("addressLine2", "")
cart.ship_street2 = cart.ship_street1 or cart.bill_street1
cart.bill_state = cart.bill_state or request.GET.get("state", "")
cart.ship_state = cart.ship_state or cart.bill_state
cart.bill_postal_code = cart.bill_postal_code or request.GET.get("zip", "")
cart.ship_postal_code = cart.ship_postal_code or cart.bill_postal_code
country = request.GET.get("country", "").upper()
cart.bill_country = cart.bill_country or COUNTRIES.get(country, "")
cart.ship_country = cart.ship_country or cart.bill_country
cart.bill_email = cart.bill_email = request.GET.get("buyerEmailAddress", "");
cart.ship_email = cart.ship_email or cart.bill_email
cart.save()
recurring = cart.recurring_lineitems
if len(recurring) > 0:
handler.save_recurring_token(request.GET["tokenID"])
if recurring[0].recurring_start is None:
result = handler.make_pay_request(request.GET["tokenID"])
if result == "Success":
handler.begin_recurring()
else:
handler.begin_recurring()
else:
result = handler.make_pay_request(request.GET["tokenID"])
if 'RETURN_URL' in handler.settings:
return HttpResponseRedirect(handler.settings['RETURN_URL'])
return HttpResponseRedirect("/")
@csrf_view_exempt
@format_exceptions
@never_cache
def ipn(request):
"""Instant Payment Notification handler."""
log.debug("IPN Received: \n%s" % pprint.pformat(dict(request.POST), indent=10))
cart = _find_cart(request.POST)
handler = AmazonIPN(cart)
handler._update_with_cart_settings(cart_settings_kwargs={'request': request})
if not handler.verify_signature(request.POST.urlencode(), "POST", handler.settings["IPN_URL"]):
log.error("Validation of Amazon request failed!")
return HttpResponseBadRequest("Validation of Amazon request failed!")
if not cart:
log.error("Unable to find cart.")
return HttpResponseBadRequest()
if request.POST["notificationType"] == "TransactionStatus":
handler.accept_payment(request.POST)
elif request.POST["notificationType"] == "TokenCancellation":
handler.end_recurring(request.POST.get("tokenId", None))
return HttpResponse()
|
Python
| 0.000206
|
@@ -318,48 +318,8 @@
IES%0A
-from hiicart.models import cart_by_uuid%0A
from
@@ -357,16 +357,30 @@
ceptions
+, cart_by_uuid
%0A%0Alog =
|
41df2254187bd895e1884563ac0cc3a4353ced5b
|
use string types instead of unicode in web.querystring
|
circuits/web/querystring.py
|
circuits/web/querystring.py
|
# -*- coding: utf-8 -*-
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl # NOQA
from circuits.six import iteritems, text_type
def parse(data):
obj = QueryStringParser(data)
return obj.result
class QueryStringToken(object):
ARRAY = "ARRAY"
OBJECT = "OBJECT"
KEY = "KEY"
class QueryStringParser(object):
def __init__(self, data):
self.result = {}
if isinstance(data, text_type):
sorted_pairs = self._sorted_from_string(data)
else:
sorted_pairs = self._sorted_from_obj(data)
[self.process(x) for x in sorted_pairs]
def _sorted_from_string(self, data):
stage1 = parse_qsl(data)
stage2 = [(x[0].strip(), x[1].strip()) for x in stage1]
return sorted(stage2, key=lambda p: p[0])
def _sorted_from_obj(self, data):
# data is a list of the type generated by parse_qsl
if isinstance(data, list):
items = data
else:
# complex objects:
try:
# django.http.QueryDict,
items = [(i[0], j) for i in data.lists() for j in i[1]]
except AttributeError:
# webob.multidict.MultiDict
# werkzeug.datastructures.MultiDict
items = iteritems(data)
return sorted(items, key=lambda p: p[0])
def process(self, pair):
key = pair[0]
value = pair[1]
#faster than invoking a regex
try:
key.index("[")
self.parse(key, value)
return
except ValueError:
pass
try:
key.index(".")
self.parse(key, value)
return
except ValueError:
pass
self.result[key] = value
def parse(self, key, value):
ref = self.result
tokens = self.tokens(key)
for token in tokens:
token_type, key = token
if token_type == QueryStringToken.ARRAY:
if key not in ref:
ref[key] = []
ref = ref[key]
elif token_type == QueryStringToken.OBJECT:
if key not in ref:
ref[key] = {}
ref = ref[key]
elif token_type == QueryStringToken.KEY:
try:
ref = ref[key]
next(tokens)
# TypeError is for pet[]=lucy&pet[]=ollie
# if the array key is empty a type error will be raised
except (IndexError, KeyError, TypeError):
# the index didn't exist
# so we look ahead to see what we are setting
# there is not a next token
# set the value
try:
next_token = next(tokens)
if next_token[0] == QueryStringToken.ARRAY:
ref.append([])
ref = ref[key]
elif next_token[0] == QueryStringToken.OBJECT:
try:
ref[key] = {}
except IndexError:
ref.append({})
ref = ref[key]
except StopIteration:
try:
ref.append(value)
except AttributeError:
ref[key] = value
return
def tokens(self, key):
buf = ""
for char in key:
if char == "[":
yield QueryStringToken.ARRAY, buf
buf = ""
elif char == ".":
yield QueryStringToken.OBJECT, buf
buf = ""
elif char == "]":
try:
yield QueryStringToken.KEY, int(buf)
buf = ""
except ValueError:
yield QueryStringToken.KEY, None
else:
buf = buf + char
if len(buf) > 0:
yield QueryStringToken.KEY, buf
else:
raise StopIteration()
|
Python
| 0.000005
|
@@ -164,25 +164,28 @@
ritems,
-text
+string
_type
+s
%0A%0A%0Adef p
@@ -469,17 +469,20 @@
ta,
-text
+string
_type
+s
):%0A
|
042edce052d5307fff8dfbce8c08b72fb72af7f1
|
Remove some noise
|
ckanext/groupadmin/authz.py
|
ckanext/groupadmin/authz.py
|
'''This module monkey patches functions in ckan/authz.py and replaces the
default roles with custom roles and decorates
has_user_permission_for_group_org_org to allow a GroupAdmin to admin groups.
GroupAdmins can manage all organizations/groups, but have no other sysadmin
powers.
'''
from ckan import authz, model
from ckan.common import OrderedDict
from ckan.plugins import toolkit
from ckanext.groupadmin.model import GroupAdmin
import logging
log = logging.getLogger(__name__)
old_auth_roles = authz.ROLE_PERMISSIONS
authz.ROLE_PERMISSIONS.update({'group_admin': ['read', 'manage_group']})
log.info(authz.ROLE_PERMISSIONS)
def _trans_role_group_admin():
return toolkit._('Group Admin')
authz._trans_role_group_admin = _trans_role_group_admin
def is_group_admin_decorator(method):
def decorate_has_user_permission_for_group_or_org(group_id, user_name,
permission):
user_id = authz.get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
if GroupAdmin.is_user_group_admin(model.Session, user_id):
return True
return method(group_id, user_name, permission)
return decorate_has_user_permission_for_group_or_org
authz.has_user_permission_for_group_or_org = is_group_admin_decorator(
authz.has_user_permission_for_group_or_org)
|
Python
| 0.000199
|
@@ -312,44 +312,8 @@
del%0A
-from ckan.common import OrderedDict%0A
from
@@ -394,100 +394,8 @@
in%0A%0A
-import logging%0Alog = logging.getLogger(__name__)%0A%0A%0Aold_auth_roles = authz.ROLE_PERMISSIONS%0A%0A
auth
@@ -468,42 +468,8 @@
%7D)%0A%0A
-log.info(authz.ROLE_PERMISSIONS)%0A%0A
%0Adef
|
afb400e16c1335531f259218a8b9937de48644e9
|
Update stream health health api url
|
polyaxon/checks/streams.py
|
polyaxon/checks/streams.py
|
from checks.base import Check
from checks.results import Result
from libs.api import get_settings_ws_api_url
from libs.http import safe_request
class StreamsCheck(Check):
@classmethod
def run(cls):
response = safe_request(get_settings_ws_api_url(), 'GET')
status_code = response.status_code
if status_code == 200:
result = Result()
else:
result = Result(message='Service is not healthy, response {}'.format(status_code),
severity=Result.ERROR)
return {'STREAMS': result}
|
Python
| 0.000001
|
@@ -234,16 +234,36 @@
request(
+'%7B%7D/_health'.format(
get_sett
@@ -279,16 +279,17 @@
pi_url()
+)
, 'GET')
|
35f9f3b3a1ca9174194975e5281682c2712b653f
|
add get_absolute_url to article categories too
|
project/articles/models.py
|
project/articles/models.py
|
from django.db import models
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from markitup.fields import MarkupField
from autoslug import AutoSlugField
from sorl.thumbnail import ImageField
class Category(models.Model):
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
name = models.CharField(max_length=25, verbose_name=_('name'))
slug = AutoSlugField(populate_from='name', unique=True)
def __unicode__(self):
return self.name
class ArticleManager(models.Manager):
def published(self):
q = self.get_query_set()
return q.filter(pub_date__lte=now())
class Article(models.Model):
objects = ArticleManager()
class Meta:
ordering = ('-pub_date',)
verbose_name = _('Article')
verbose_name_plural = _('Articles')
title = models.CharField(max_length=100, verbose_name=_('title'))
description = MarkupField(blank=True, verbose_name=_('description'),
help_text=_('populated from body if not given'))
body = MarkupField(verbose_name=_('body'))
image = ImageField(blank=True, upload_to='images',
verbose_name=_('image'))
pub_date = models.DateTimeField(default=now,
verbose_name=_('publication date'))
categories = models.ManyToManyField(Category, blank=True, null=True,
verbose_name=_('categories'))
slug = AutoSlugField(populate_from='title', unique=True)
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return 'articles:detail', (), {'slug': self.slug}
|
Python
| 0
|
@@ -539,16 +539,131 @@
f.name%0A%0A
+ @models.permalink%0A def get_absolute_url(self):%0A return 'articles:category', (), %7B'slug': self.slug%7D%0A%0A
%0Aclass A
|
f4ad0904dd7411b3e9fad7608f1de385530da155
|
Bump version
|
octave_kernel.py
|
octave_kernel.py
|
from __future__ import print_function
from metakernel import MetaKernel, ProcessMetaKernel, REPLWrapper, u
from IPython.display import Image, SVG
import subprocess
import os
import sys
import tempfile
__version__ = '0.12.9'
class OctaveKernel(ProcessMetaKernel):
implementation = 'Octave Kernel'
implementation_version = __version__,
language = 'octave'
language_version = '0.1',
banner = "Octave Kernel"
language_info = {
'mimetype': 'text/x-octave',
'name': 'octave_kernel',
'file_extension': '.m',
'help_links': MetaKernel.help_links,
}
_setup = """
more off;
set(0, 'defaultfigurepaperunits', 'inches');
set(0, 'defaultfigureunits', 'inches');
"""
_first = True
_banner = None
@property
def banner(self):
if self._banner is None:
banner = subprocess.check_output(['octave', '--version'])
self._banner = banner.decode('utf-8')
return self._banner
def makeWrapper(self):
"""Start an Octave process and return a :class:`REPLWrapper` object.
"""
if os.name == 'nt':
orig_prompt = u(chr(3))
prompt_cmd = u('disp(char(3))')
change_prompt = None
else:
orig_prompt = u('octave.*>')
prompt_cmd = None
change_prompt = u("PS1('{0}'); PS2('{1}')")
self._first = True
executable = os.environ.get('OCTAVE_EXECUTABLE', 'octave')
try:
info = subprocess.check_output([executable, '--version'])
if 'version 4' in info.decode('utf-8').lower():
executable += ' --no-gui'
except OSError: # pragma: no cover
pass
return REPLWrapper(executable, orig_prompt, change_prompt,
prompt_emit_cmd=prompt_cmd)
def do_execute_direct(self, code):
if self._first:
self._first = False
if sys.platform == 'darwin':
self.plot_settings['format'] = 'svg'
self.handle_plot_settings()
super(OctaveKernel, self).do_execute_direct(self._setup)
if os.name != 'nt':
msg = ('may not be able to display plots properly '
'without gnuplot, please install it '
'(gnuplot-x11 on Linux)')
try:
subprocess.check_call(['gnuplot', '--version'])
except subprocess.CalledProcessError:
self.Error(msg)
resp = super(OctaveKernel, self).do_execute_direct(code)
if self.plot_settings.get('backend', None) == 'inline':
plot_dir = tempfile.mkdtemp()
self._make_figs(plot_dir)
for fname in os.listdir(plot_dir):
filename = os.path.join(plot_dir, fname)
try:
if fname.lower().endswith('.svg'):
im = SVG(filename)
else:
im = Image(filename)
self.Display(im)
except Exception as e:
self.Error(e)
return resp
def get_kernel_help_on(self, info, level=0, none_on_fail=False):
obj = info.get('help_obj', '')
if not obj or len(obj.split()) > 1:
if none_on_fail:
return None
else:
return ""
resp = self.do_execute_direct('help %s' % obj)
return resp
def get_completions(self, info):
"""
Get completions from kernel based on info dict.
"""
cmd = 'completion_matches("%s")' % info['obj']
resp = self.do_execute_direct(cmd)
return resp.splitlines()
def handle_plot_settings(self):
"""Handle the current plot settings"""
settings = self.plot_settings
if settings.get('format', None) is None:
settings.clear()
settings.setdefault('backend', 'inline')
settings.setdefault('format', 'svg')
settings.setdefault('size', '560,420')
cmds = []
self._plot_fmt = settings['format']
if settings['backend'] == 'inline':
cmds.append("set(0, 'defaultfigurevisible', 'off');")
cmds.append("graphics_toolkit('gnuplot');")
else:
cmds.append("set(0, 'defaultfigurevisible', 'on');")
cmds.append("graphics_toolkit('%s');" % settings['backend'])
width, height = 560, 420
if isinstance(settings['size'], tuple):
width, height = settings['size']
elif settings['size']:
try:
width, height = settings['size'].split(',')
width, height = int(width), int(height)
except Exception as e:
self.Error('Error setting plot settings: %s' % e)
size = "set(0, 'defaultfigurepaperposition', [0 0 %s %s]);"
cmds.append(size % (width / 150., height / 150.))
self.do_execute_direct('\n'.join(cmds))
def _make_figs(self, plot_dir):
cmd = """
figHandles = get(0, 'children');
for fig=1:length(figHandles);
h = figHandles(fig);
filename = fullfile('%s', ['OctaveFig', sprintf('%%03d', fig)]);
saveas(h, [filename, '.%s']);
close(h);
end;
""" % (plot_dir, self._plot_fmt)
super(OctaveKernel, self).do_execute_direct(cmd.replace('\n', ''))
if __name__ == '__main__':
try:
from ipykernel.kernelapp import IPKernelApp
except ImportError:
from IPython.kernel.zmq.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=OctaveKernel)
|
Python
| 0
|
@@ -221,9 +221,10 @@
.12.
-9
+10
'%0A%0A%0A
|
0623212baaccb938e19891a50cca58b33b339f9c
|
Improve version handling
|
oddt/__init__.py
|
oddt/__init__.py
|
"""Open Drug Discovery Toolkit
==============================
Universal and easy to use resource for various drug discovery tasks,
ie docking, virutal screening, rescoring.
Attributes
----------
toolkit : module,
Toolkits backend module, currenlty OpenBabel [ob] and RDKit [rdk].
This setting is toolkit-wide, and sets given toolkit as default
"""
from __future__ import absolute_import
import os
import subprocess
import warnings
try:
from oddt.toolkits import ob
except ImportError as e:
ob = None
try:
from oddt.toolkits import rdk
except ImportError as e:
rdk = None
toolkit = None
if 'ODDT_TOOLKIT' in os.environ:
if os.environ['ODDT_TOOLKIT'] in ['ob', 'openbabel']:
if ob is None:
warnings.warn('OpenBabel toolkit is forced by ODDT_TOOLKIT, '
'but can\'t be imported')
toolkit = ob
elif os.environ['ODDT_TOOLKIT'] in ['rdk', 'rdkit']:
if rdk is None:
warnings.warn('RDKit toolkit is forced by ODDT_TOOLKIT, '
'but can\'t be imported')
toolkit = rdk
else:
raise EnvironmentError('ODDT_TOOLKIT is set to invalid value: "%s". '
'Use one of ["ob", "openbabel"] for OpenBabel '
'or ["rdk", "rdkit"] for RDKit' % os.environ['ODDT_TOOLKIT'])
elif ob:
toolkit = ob
elif rdk:
toolkit = rdk
else:
warnings.warn('No toolkit is present. Install OpenBabel or RDKit')
def get_version():
home = os.path.dirname(__file__)
git_v = None
v = '0.4.1'
if os.path.isdir(home + '/../.git'):
try:
git_v = str(subprocess.check_output(['git',
'describe',
'--tags'], cwd=home).strip())
except subprocess.CalledProcessError: # catch errors, eg. no git installed
pass
if git_v:
v = git_v
return v
__version__ = get_version()
__all__ = ['toolkit']
def random_seed(i):
"""
Set global random seed for all underlying components.
Use 'brute-force' approach, by setting undelying libraries' seeds.
Parameters
----------
i: int
integer used as seed for random number generators
"""
from numpy.random import seed as np_seed
from random import seed as python_seed
# python's random module
python_seed(i)
# numpy random module
np_seed(i)
|
Python
| 0.000002
|
@@ -453,16 +453,29 @@
arnings%0A
+%0Aimport six%0A%0A
try:%0A
@@ -1685,12 +1685,8 @@
v =
-str(
subp
@@ -1713,20 +1713,16 @@
%5B'git',%0A
-
@@ -1815,20 +1815,16 @@
-
'--tags'
@@ -1843,16 +1843,97 @@
.strip()
+%0A if git_v and six.PY3:%0A git_v = git_v.decode('latin-1'
)%0A
|
f4d41f9a75f464dcf2dca2953536ed28c6221b33
|
numPages should be an int
|
server/central_psparser.py
|
server/central_psparser.py
|
import logging
import json
import re
class PSParser():
def __init__(self):
self.logger = logging.getLogger("PSParser")
self.logger.info("Loaded PostScript Parser")
def __getPSFromJID(self, jid):
jobFile = open(jid, 'r')
job = json.load(jobFile)
jobFile.close()
return job["postscript"]
def isDuplex(self, jid):
ps = self.__getPSFromJID(jid)
if("/Duplex true" in ps):
self.logger.debug("job %s is duplex enabled", jid)
return True
else:
self.logger.debug("job %s is not duplex enabled", jid)
return False
def pageCount(self, jid):
ps = self.__getPSFromJID(jid)
numPages = None
self.logger.debug("Computing page count for %s", jid)
rgxresult = re.search('%%Pages: [0-9]*', ps)
logging.debug("rgxresult: {0}".format(rgxresult.group(0)))
if(rgxresult != None) :
numPages = re.search('%%Pages: [0-9]*', ps).group(0).split(" ")[1]
self.logger.debug("File is adobe compliant, suspect to be {0} pages".format(numPages))
else:
self.logger.error("File is not adobe compliant, page count indeterminate.")
return numPages
|
Python
| 0.999947
|
@@ -972,16 +972,20 @@
Pages =
+int(
re.searc
@@ -1031,16 +1031,17 @@
(%22 %22)%5B1%5D
+)
%0A
|
94350201979e6c4a313666da05ce8bcaac7bd0f2
|
Update docstring
|
openmc/source.py
|
openmc/source.py
|
from numbers import Real
from xml.etree import ElementTree as ET
import openmc.checkvalue as cv
from openmc.stats.multivariate import UnitSphere, Spatial
from openmc.stats.univariate import Univariate
from ._xml import get_text
class Source:
"""Distribution of phase space coordinates for source sites.
Parameters
----------
space : openmc.stats.Spatial
Spatial distribution of source sites
angle : openmc.stats.UnitSphere
Angular distribution of source sites
energy : openmc.stats.Univariate
Energy distribution of source sites
filename : str
Source file from which sites should be sampled
library : str
Path to a custom source library
parameters : str
Parameters to be provided to the custom source
.. versionadded:: 0.12
strength : float
Strength of the source
particle : {'neutron', 'photon'}
Source particle type
Attributes
----------
space : openmc.stats.Spatial or None
Spatial distribution of source sites
angle : openmc.stats.UnitSphere or None
Angular distribution of source sites
energy : openmc.stats.Univariate or None
Energy distribution of source sites
file : str or None
Source file from which sites should be sampled
library : str or None
Path to a custom source library
parameters : str
Parameters to be provided to the custom source
strength : float
Strength of the source
particle : {'neutron', 'photon'}
Source particle type
"""
def __init__(self, space=None, angle=None, energy=None, filename=None,
library=None, parameters=None, strength=1.0, particle='neutron'):
self._space = None
self._angle = None
self._energy = None
self._file = None
self._library = None
self._parameters = None
if space is not None:
self.space = space
if angle is not None:
self.angle = angle
if energy is not None:
self.energy = energy
if filename is not None:
self.file = filename
if library is not None:
self.library = library
if parameters is not None:
self.parameters = parameters
self.strength = strength
self.particle = particle
@property
def file(self):
return self._file
@property
def library(self):
return self._library
@property
def parameters(self):
return self._parameters
@property
def space(self):
return self._space
@property
def angle(self):
return self._angle
@property
def energy(self):
return self._energy
@property
def strength(self):
return self._strength
@property
def particle(self):
return self._particle
@file.setter
def file(self, filename):
cv.check_type('source file', filename, str)
self._file = filename
@library.setter
def library(self, library_name):
cv.check_type('library', library_name, str)
self._library = library_name
@parameters.setter
def parameters(self, parameters_path):
cv.check_type('parameters', parameters_path, str)
self._parameters = parameters_path
@space.setter
def space(self, space):
cv.check_type('spatial distribution', space, Spatial)
self._space = space
@angle.setter
def angle(self, angle):
cv.check_type('angular distribution', angle, UnitSphere)
self._angle = angle
@energy.setter
def energy(self, energy):
cv.check_type('energy distribution', energy, Univariate)
self._energy = energy
@strength.setter
def strength(self, strength):
cv.check_type('source strength', strength, Real)
cv.check_greater_than('source strength', strength, 0.0, True)
self._strength = strength
@particle.setter
def particle(self, particle):
cv.check_value('source particle', particle, ['neutron', 'photon'])
self._particle = particle
def to_xml_element(self):
"""Return XML representation of the source
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing source data
"""
element = ET.Element("source")
element.set("strength", str(self.strength))
if self.particle != 'neutron':
element.set("particle", self.particle)
if self.file is not None:
element.set("file", self.file)
if self.library is not None:
element.set("library", self.library)
if self.parameters is not None:
element.set("parameters", self.parameters)
if self.space is not None:
element.append(self.space.to_xml_element())
if self.angle is not None:
element.append(self.angle.to_xml_element())
if self.energy is not None:
element.append(self.energy.to_xml_element('energy'))
return element
@classmethod
def from_xml_element(cls, elem):
"""Generate source from an XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
XML element
Returns
-------
openmc.Source
Source generated from XML element
"""
source = cls()
strength = get_text(elem, 'strength')
if strength is not None:
source.strength = float(strength)
particle = get_text(elem, 'particle')
if particle is not None:
source.particle = particle
filename = get_text(elem, 'file')
if filename is not None:
source.file = filename
library = get_text(elem, 'library')
if library is not None:
source.library = library
parameters = get_text(elem, 'parameters')
if parameters is not None:
source.parameters = parameters
space = elem.find('space')
if space is not None:
source.space = Spatial.from_xml_element(space)
angle = elem.find('angle')
if angle is not None:
source.angle = UnitSphere.from_xml_element(angle)
energy = elem.find('energy')
if energy is not None:
source.energy = Univariate.from_xml_element(energy)
return source
|
Python
| 0
|
@@ -777,24 +777,32 @@
ustom source
+ library
%0A%0A ..
@@ -1450,24 +1450,32 @@
ustom source
+ library
%0A strengt
|
edb04d8e0ae03c9244b7d934fd713efbb94d5a58
|
Add api url to album and link
|
opps/api/urls.py
|
opps/api/urls.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from tastypie.api import Api
from opps.containers.api import Container
from opps.articles.api import Post
from .conf import settings
_api = Api(api_name=settings.OPPS_API_NAME)
_api.register(Container())
_api.register(Post())
urlpatterns = patterns(
'',
url(r'^', include(_api.urls)),
)
|
Python
| 0
|
@@ -198,16 +198,29 @@
ort Post
+, Album, Link
%0A%0Afrom .
@@ -335,16 +335,61 @@
Post())%0A
+_api.register(Album())%0A_api.register(Link())%0A
%0A%0Aurlpat
|
94261ea9fd576a1d0b6ec59d754da94f5c32efcc
|
Allow to `PUT` linked
|
shanghai/actions/linked.py
|
shanghai/actions/linked.py
|
from django.http import HttpResponseNotFound, HttpResponseBadRequest
from django.db import models
from shanghai.http import HttpResponseNoContent
class LinkedMixin(object):
def get_linked_relationship(self):
return self.relationship_for(self.link)
def get_linked_resource(self, relationship=None):
if not relationship:
relationship = self.get_linked_relationship()
return self.api.resource_for(relationship.target)
def get_linked_serializer(self):
resource = self.get_linked_relationship()
return resource.serializer
def get_linked_data(self):
qs = self.get_queryset()
try:
obj = qs.get(pk=self.pk)
except models.ObjectDoesNotExist:
return None
else:
relationship = self.get_linked_relationship()
return relationship.get_from(obj)
def get_linked_input_data(self):
relationship = self.get_linked_relationship()
return self.input.get(relationship.target)
def get_linked(self):
data = self.get_linked_data()
if not data:
return HttpResponseNotFound()
serializer = self.get_linked_serializer()
return self.response(data, serializer=serializer)
def post_linked(self):
obj = self.get_object_data()
relationship = self.get_linked_relationship()
if not obj:
return HttpResponseNotFound()
if relationship.is_belongs_to():
link = self.get_linked_data()
if link:
return HttpResponseBadRequest()
linked_pk = self.get_linked_input_data()
linked_resource = self.get_linked_resource()
linked_object = linked_resource.get_object_data(pk=linked_pk)
relationship.set_to(obj, linked_object)
obj.save()
return HttpResponseNoContent()
elif relationship.is_has_many():
related_manager = relationship.get_from(obj)
linked_pk = self.get_linked_input_data()
linked_resource = self.get_linked_resource()
if isinstance(linked_pk, str):
linked_pk = linked_pk,
linked_objects = linked_resource.get_objects_data(pk=linked_pk)
related_manager.add(*linked_objects)
return HttpResponseNoContent()
def delete_linked(self):
obj = self.get_object_data()
relationship = self.get_linked_relationship()
if not obj or not relationship.is_belongs_to():
return HttpResponseNotFound()
relationship.set_to(obj, None)
update_fields = [relationship.name]
obj.save(update_fields=update_fields)
return HttpResponseNoContent()
class LinkedObjectMixin(object):
def get_linked_object_data(self):
resource = self.get_linked_resource()
return resource.get_object_data(self.link_pk)
def delete_linked_object(self):
obj = self.get_object_data()
relationship = self.get_linked_relationship()
related_manager = relationship.get_from(obj)
linked_object = self.get_linked_object_data()
if not obj or not relationship.is_has_many():
return HttpResponseNotFound()
related_manager.remove(linked_object)
return HttpResponseNoContent()
class LinkedObjectsMixin(object):
def get_linked_objects_data(self):
resource = self.get_linked_resource()
return resource.get_objects_data(self.link_pk)
def delete_linked_objects(self):
obj = self.get_object_data()
relationship = self.get_linked_relationship()
related_manager = relationship.get_from(obj)
linked_objects = self.get_linked_objects_data()
if not obj or not relationship.is_has_many():
return HttpResponseNotFound()
related_manager.remove(*linked_objects)
return HttpResponseNoContent()
|
Python
| 0.000001
|
@@ -2360,24 +2360,952 @@
oContent()%0A%0A
+ def put_linked(self):%0A obj = self.get_object_data()%0A relationship = self.get_linked_relationship()%0A linked_resource = self.get_linked_resource()%0A linked_pk = self.get_linked_input_data()%0A%0A if not obj:%0A return HttpResponseNotFound()%0A%0A if relationship.is_belongs_to():%0A linked_obj = None%0A%0A if linked_pk:%0A linked_obj = linked_resource.get_object_data(linked_pk)%0A%0A if not linked_obj:%0A return HttpResponseNotFound%0A%0A relationship.set_to(obj, linked_obj)%0A obj.save(update_fields=%5Brelationship.name%5D)%0A%0A elif relationship.is_has_many():%0A linked_objects = list()%0A%0A if len(linked_pk):%0A linked_objects = linked_resource.get_objects_data(pk=linked_pk)%0A%0A relationship.set_to(obj, linked_objects)%0A%0A return HttpResponseNoContent()%0A%0A
def dele
|
3df0f14a9e2625081a1b1f51aef997d18b4a9b50
|
Remove Logging
|
pabiana/brain.py
|
pabiana/brain.py
|
import importlib
import logging
import multiprocessing as mp
import os
import signal
from os import path
import pip
from . import _default_clock, load_interfaces, repo
def main(*args):
args = list(args)
stop_pip = False
if '-X' in args:
stop_pip = True
args.remove('-X')
if '-C' in args:
args.append('clock:clock')
args.remove('-C')
if len(args) > 1:
logging.info('Starting %s processes', len(args))
signal.signal(signal.SIGINT, lambda *args, **kwargs: None)
mp.set_start_method('spawn')
for module_area_name in args:
process = mp.Process(target=run, args=(module_area_name, stop_pip))
process.start()
else:
run(*args, stop_pip=stop_pip)
def run(module_area_name, stop_pip=False):
module_name, area_name = module_area_name.split(':')
repo['base-path'] = os.getcwd()
repo['module-name'] = module_name
repo['area-name'] = area_name
intf_path = path.join(repo['base-path'], 'interfaces.json')
if path.isfile(intf_path):
load_interfaces(intf_path)
req_path = path.join(repo['base-path'], module_name, 'requirements.txt')
if not stop_pip and path.isfile(req_path):
pip.main(['install', '--upgrade', '-r', req_path])
try:
mod = importlib.import_module(module_name)
except ImportError:
logging.info('Import Error %s:%s', module_name, area_name)
if module_name is 'clock' and area_name is 'clock':
mod = _default_clock
logging.info('Module %s', mod)
else:
raise
if hasattr(mod, 'setup'):
mod.setup()
if hasattr(mod, 'area'):
if hasattr(mod, 'config'):
params = {'clock_name': mod.config['clock-name']}
if 'clock-slot' in mod.config:
if mod.config['clock-slot'] is not None:
params['clock_slot'] = mod.config['clock-slot']
if 'subscriptions' in mod.config:
if mod.config['subscriptions'] is not None:
params['subscriptions'] = mod.config['subscriptions']
mod.area.setup(**params)
if 'context-values' in mod.config:
mod.area.context.update(mod.config['context-values'])
mod.area.run()
elif hasattr(mod, 'clock'):
if hasattr(mod, 'config'):
params = {}
if 'timeout' in mod.config:
if mod.config['timeout'] is not None:
params['timeout'] = mod.config['timeout']
if 'use-template' in mod.config:
if mod.config['use-template'] is not None:
params['use_template'] = mod.config['use-template']
mod.clock.setup(**params)
mod.clock.run()
elif hasattr(mod, 'runner'):
if hasattr(mod.runner, 'setup'):
params = {}
if hasattr(mod, 'config'):
params.update(mod.config)
mod.runner.setup(**params)
mod.runner.run()
|
Python
| 0.000002
|
@@ -1236,86 +1236,25 @@
:%0A%09%09
-logging.info('Import Error %25s:%25s', module_name, area_name)%0A%09%09if module_name is
+if module_name ==
'cl
@@ -1272,18 +1272,18 @@
ea_name
-is
+==
'clock'
@@ -1312,42 +1312,8 @@
ock%0A
-%09%09%09logging.info('Module %25s', mod)%0A
%09%09el
|
e82d477194393ff3142f6c25c5db4c7b7f2a98a5
|
Call ConsoleViewer init
|
simpleai/search/viewers.py
|
simpleai/search/viewers.py
|
# coding: utf-8
from os import path
from threading import Thread
from time import sleep
class DummyViewer(object):
def start(self):
pass
def new_iteration(self, fringe):
pass
def chosen_node(self, node, is_goal):
pass
def expanded(self, node, successors):
pass
class ConsoleViewer(DummyViewer):
def __init__(self, interactive=True):
self.interactive = interactive
def pause(self):
if self.interactive:
raw_input('> press Enter ')
def output(self, *args):
print ' '.join(map(str, args))
def new_iteration(self, fringe):
self.output(' **** New iteration ****')
self.output(len(fringe), 'elements in fringe:', fringe)
self.pause()
def chosen_node(self, node, is_goal):
self.output('Chosen node:', node)
if is_goal:
self.output('Is goal!')
else:
self.output('Not goal')
self.pause()
def expanded(self, node, successors):
self.output('Expand:', node)
self.output(len(successors), 'successors:', successors)
self.pause()
class WebViewer(ConsoleViewer):
def __init__(self, host='127.0.0.1', port=8000):
self.host = host
self.port = port
self.paused = True
self.events = []
web_template_path = path.join(path.dirname(__file__), 'web_viewer.html')
self.web_template = open(web_template_path).read()
def start(self):
from bottle import route, run
route('/')(self.web_status)
route('/next')(self.web_next)
t = Thread(target=run, kwargs=dict(host=self.host, port=self.port))
t.daemon = True
t.start()
self.pause()
def web_status(self):
from bottle import template
return template(self.web_template, events=self.events)
def web_next(self):
from bottle import redirect
self.paused = False
while not self.paused:
sleep(0.1)
redirect('/')
def pause(self):
self.paused = True
while self.paused:
sleep(0.1)
def output(self, *args):
self.events.append(' '.join(map(str, args)))
|
Python
| 0
|
@@ -1219,16 +1219,74 @@
=8000):%0A
+ super(WebViewer, self).__init__(interactive=True)%0A
|
e165d16660cb10a7008be2b7566a6db471dafde0
|
Fixing a typo that handles generating new mailbox names.
|
django_mailbox/management/commands/processincomingmessage.py
|
django_mailbox/management/commands/processincomingmessage.py
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'][1][0:255])
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
Python
| 0.999963
|
@@ -729,16 +729,17 @@
%5B'from'%5D
+)
%5B1%5D%5B0:25
@@ -740,17 +740,16 @@
%5D%5B0:255%5D
-)
%0A
|
cb117d449c3e5d611951a5d1a9efbafd34525238
|
fix finding files
|
chandra_suli/make_lightcurve.py
|
chandra_suli/make_lightcurve.py
|
#!/usr/bin/env python
"""
Generate lightcurves for each candidate given a list of candidates
"""
import argparse
import os
import sys
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import seaborn as sbs
from chandra_suli import find_files
from chandra_suli import logging_system
from chandra_suli.run_command import CommandRunner
from chandra_suli.sanitize_filename import sanitize_filename
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Generate light curves for transients listed in a'
'master list')
parser.add_argument("--masterfile", help="Path to file containing list of transients",
required=True, type=str)
parser.add_argument("--data_path", help="Path to directory containing data of all obsids", required = True,
type=str)
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Get the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
# Get data from masterfile
data_path = sanitize_filename(args.data_path)
masterfile = sanitize_filename(args.masterfile)
transient_data = np.array(np.recfromtxt(masterfile, names=True), ndmin=1)
for transient in transient_data:
obsid = transient['Obsid']
ccd = transient['CCD']
candidate = transient['Candidate']
tstart = transient['Tstart']
tstop = transient['Tstop']
# use region file from xtdac and cut region
regions = find_files.find_files(str(obsid), "ccd_%s_%s_filtered_candidate_%s.reg" %(ccd, obsid, candidate))
event_file = find_files.find_files(str(obsid), "ccd_%s_%s_filtered.fits" %(ccd, obsid))[0]
if len(regions) != 1:
raise IOError("More than one region file found")
else:
region = regions[0]
evt_reg = "ccd_%s_%s_filtered_candidate_%s_reg.fits" %(ccd, obsid, candidate)
cmd_line = "ftcopy \'%s[EVENTS][regfilter(\"%s\")]\' %s clobber=yes " %(event_file, region, evt_reg)
runner.run(cmd_line)
data = pyfits.getdata(evt_reg)
sbs.set(font_scale=2)
sbs.set_style('white')
fig = plt.figure(figsize=(15, 15 / 1.33333))
duration = tstop - tstart
bins = np.arange(-10 * duration, 10 * duration, duration)
time = data.field("TIME")
rate, obins, _ = plt.hist(time - tstart, bins, weights=np.ones(time.shape[0]) * 1.0 / duration,
color='white')
# Centers of the bins
tc = (bins[:-1] + bins[1:]) / 2.0
plt.errorbar(tc, rate, yerr=np.sqrt(rate * duration) / duration, fmt='.')
plt.axvline(0, linestyle=':')
plt.axvline(duration, linestyle=':')
plt.xlabel("Time since trigger (s)")
plt.ylabel("Count rate (cts/s)")
plt.title("Transient Lightcurve\nObsID = %s, CCD ID = %s, Candidate=%s\n" %(obsid, ccd, candidate))
plot_file = "ccd_%s_%s_candidate_%s_lightcurve.png" %(ccd, obsid, candidate)
plt.savefig(plot_file)
os.rename(plot_file, os.path.join(data_path, str(obsid), plot_file))
os.rename(evt_reg,os.path.join(data_path, str(obsid), evt_reg))
plt.close()
|
Python
| 0.000366
|
@@ -1612,32 +1612,56 @@
iles.find_files(
+os.path.join(data_path,
str(obsid), %22ccd
@@ -1646,32 +1646,33 @@
path, str(obsid)
+)
, %22ccd_%25s_%25s_fil
@@ -1764,16 +1764,40 @@
d_files(
+os.path.join(data_path,
str(obsi
@@ -1798,16 +1798,17 @@
r(obsid)
+)
, %22ccd_%25
|
cfa8b88e3d86e560415260eb596dd3bbdab52736
|
Fix test of auto_backup_download
|
auto_backup_download/tests/test_auto_backup_download.py
|
auto_backup_download/tests/test_auto_backup_download.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
from odoo.exceptions import Warning
class TestAutoBackupDownload(common.TransactionCase):
def test_01_create_not_existing(self):
backup_dir = self.env.ref(
'auto_backup_download.default_backup_directory')
# test method get_dir()
with self.assertRaises(Warning):
backup_dir.get_dir()
# test computed field file_ids
self.assertEqual(len(backup_dir.file_ids), 0)
# test count list of directory
self.assertEqual(len(backup_dir.file_ids), backup_dir.file_count)
# test reload list of directory
with self.assertRaises(Warning):
backup_dir.reload()
self.assertEqual(len(backup_dir.file_ids), 0)
self.assertEqual(len(backup_dir.file_ids), backup_dir.file_count)
def test_02_create_existing(self):
backup_dir = self.env.ref(
'auto_backup_download.default_backup_directory')
self.env['db.backup'].create({
'name': 'Test Backup 1',
'folder': '/tmp'
})
# test method get_dir()
full_dir = backup_dir.get_dir()
self.assertEqual(full_dir[-1], '/')
# test computed field file_ids
self.assertGreaterEqual(len(backup_dir.file_ids), 0)
# test count list of directory
self.assertEqual(len(full_dir.file_ids), full_dir.file_count)
# test reload list of directory
full_dir.reload()
self.assertEqual(len(full_dir.file_ids), full_dir.file_count)
|
Python
| 0.000003
|
@@ -529,440 +529,104 @@
est
-computed field file_ids%0A self.assertEqual(len(backup_dir.file_ids), 0)%0A%0A # test count list of directory%0A self.assertEqual(len(backup_dir.file_ids), backup_dir.file_count)%0A%0A # test reload list of directory%0A with self.assertRaises(Warning):%0A backup_dir.reload()%0A self.assertEqual(len(backup_dir.file_ids), 0)%0A self.assertEqual(len(backup_dir.file_ids), backup_dir.file_count
+reload list of directory%0A with self.assertRaises(Warning):%0A backup_dir.reload(
)%0A%0A
|
79ec3f3ebabd1625830952ecdec1b0761c2b5324
|
Rewrite serialization of Attempt submit response
|
web/attempts/rest.py
|
web/attempts/rest.py
|
from django.db import transaction
from rest_framework import validators, decorators, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, Field
from rest_framework.viewsets import ModelViewSet
from .models import Attempt
class WritableJSONField(Field):
def to_internal_value(self, data):
return data
class AttemptSerializer(ModelSerializer):
"""
Serialize an Attempt object.
"""
secret = WritableJSONField(write_only=True, required=False)
class Meta:
model = Attempt
@staticmethod
def check_secret(validated_data):
# Check and remove secret from the validated_data dictionary
user_secret = validated_data.pop('secret', '[]')
secret_matches = validated_data['part'].check_secret(user_secret)[0]
if not secret_matches:
validated_data['valid'] = False
def create(self, validated_data):
self.check_secret(validated_data)
return super(AttemptSerializer, self).create(validated_data)
def update(self, instance, validated_data):
self.check_secret(validated_data)
return super(AttemptSerializer, self).update(instance, validated_data)
class AttemptViewSet(ModelViewSet):
"""
A viewset for viewing and editing Attempt instances.
"""
serializer_class = AttemptSerializer
queryset = Attempt.objects.all()
@decorators.list_route(methods=['post'], authentication_classes=[TokenAuthentication])
@transaction.atomic
def submit(self, request):
serializer = AttemptSerializer(data=request.data, many=True, partial=True)
def _f(validator):
return not isinstance(validator, validators.UniqueTogetherValidator)
serializer.child.validators = filter(_f, serializer.child.validators)
if serializer.is_valid():
attempts = []
for attempt_data in serializer.validated_data:
AttemptSerializer.check_secret(attempt_data)
attempt, _ = Attempt.objects.update_or_create(
user=request.user,
part=attempt_data['part'],
defaults=attempt_data)
attempts.append({
'part': attempt.part.pk,
'solution': attempt.solution,
'valid': attempt.valid,
'feedback': attempt.feedback,
})
response = {'attempts': attempts}
return Response(response, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
Python
| 0
|
@@ -1,12 +1,24 @@
+import json%0A
from django.
@@ -436,16 +436,385 @@
rn data%0A
+ %0A%0Aclass JSONStringField(Field):%0A %22%22%22%0A Store a JSON object in a TextField.%0A When object is received store its json dump.%0A When object is retrieved load JSON object from string representation. %0A %22%22%22%0A def to_internal_value(self, data):%0A return json.dumps(data)%0A %0A def to_representation(self, value):%0A return json.loads(value)%0A
%0A%0Aclass
@@ -961,16 +961,49 @@
d=False)
+%0A feedback = JSONStringField()
%0A%0A cl
@@ -2505,13 +2505,37 @@
empt
-, _ =
+s.append(%0A
Att
@@ -2697,32 +2697,36 @@
ts=attempt_data)
+%5B0%5D)
%0A
@@ -2726,283 +2726,58 @@
- attempts.append(%7B%0A 'part': attempt.part.pk,%0A 'solution': attempt.solution,%0A 'valid': attempt.valid,%0A 'feedback': attempt.feedback,%0A %7D)%0A response = %7B'attempts': attempts%7D
+data = AttemptSerializer(attempts, many=True).data
%0A
@@ -2801,24 +2801,20 @@
esponse(
-response
+data
, status
|
c3996af1f7b201355d1cbcd6ef4c8fe420c8b67e
|
Fix lint
|
solutions/uri/1028/1028.py
|
solutions/uri/1028/1028.py
|
import sys
def gcd(a, b):
while b > 0:
a, b = b, a % b
return a
n = int(input())
for line in range(n):
a, b = map(int, input().split())
print(gcd(a, b))
|
Python
| 0.000032
|
@@ -1,17 +1,4 @@
-import sys%0A%0A%0A
def
|
f43ffd907302dc87a3682ada1d36d3a195970ec4
|
Refactor URLWrapper subclasses to make slugs settable again (#2242)
|
pelican/urlwrappers.py
|
pelican/urlwrappers.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import logging
import os
import six
from pelican.utils import python_2_unicode_compatible, slugify
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
@functools.total_ordering
class URLWrapper(object):
def __init__(self, name, settings):
self.settings = settings
self._name = name
self._slug = None
self._slug_from_name = True
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
# if slug wasn't explicitly set, it needs to be regenerated from name
# so, changing name should reset slug for slugification
if self._slug_from_name:
self._slug = None
@property
def slug(self):
if self._slug is None:
self._slug = slugify(self.name,
self.settings.get('SLUG_SUBSTITUTIONS', ()))
return self._slug
@slug.setter
def slug(self, slug):
# if slug is expliticly set, changing name won't alter slug
self._slug_from_name = False
self._slug = slug
def as_dict(self):
d = self.__dict__
d['name'] = self.name
d['slug'] = self.slug
return d
def __hash__(self):
return hash(self.slug)
def _normalize_key(self, key):
subs = self.settings.get('SLUG_SUBSTITUTIONS', ())
return six.text_type(slugify(key, subs))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.slug == other.slug
if isinstance(other, six.text_type):
return self.slug == self._normalize_key(other)
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return self.slug != other.slug
if isinstance(other, six.text_type):
return self.slug != self._normalize_key(other)
return True
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.slug < other.slug
if isinstance(other, six.text_type):
return self.slug < self._normalize_key(other)
return False
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, repr(self._name))
def _from_settings(self, key, get_page_name=False):
"""Returns URL information as defined in settings.
When get_page_name=True returns URL without anything after {slug} e.g.
if in settings: CATEGORY_URL="cat/{slug}.html" this returns
"cat/{slug}" Useful for pagination.
"""
setting = "%s_%s" % (self.__class__.__name__.upper(), key)
value = self.settings[setting]
if not isinstance(value, six.string_types):
logger.warning('%s is set to %s', setting, value)
return value
else:
if get_page_name:
return os.path.splitext(value)[0].format(**self.as_dict())
else:
return value.format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL',
get_page_name=True))
url = property(functools.partial(_from_settings, key='URL'))
save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
class Category(URLWrapper):
@property
def slug(self):
if self._slug is None:
substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ())
substitutions += tuple(self.settings.get('CATEGORY_SUBSTITUTIONS',
()))
self._slug = slugify(self.name, substitutions)
return self._slug
class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(name.strip(), *args, **kwargs)
@property
def slug(self):
if self._slug is None:
substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ())
substitutions += tuple(self.settings.get('TAG_SUBSTITUTIONS', ()))
self._slug = slugify(self.name, substitutions)
return self._slug
class Author(URLWrapper):
@property
def slug(self):
if self._slug is None:
self._slug = slugify(self.name,
self.settings.get('AUTHOR_SUBSTITUTIONS', ()))
return self._slug
|
Python
| 0
|
@@ -907,84 +907,34 @@
ame,
-%0A self.settings.get('SLUG_SUBSTITUTIONS', ()
+ self._slug_substitutions(
))%0A
@@ -1130,24 +1130,120 @@
lug = slug%0A%0A
+ def _slug_substitutions(self):%0A return self.settings.get('SLUG_SUBSTITUTIONS', ()))%0A%0A
def as_d
@@ -3472,38 +3472,24 @@
apper):%0A
- @property%0A
def
slug(sel
@@ -3484,62 +3484,43 @@
def
+_
slug
-(self):%0A if self._slug is None:%0A
+_substitutions(self):%0A
sub
@@ -3507,33 +3507,32 @@
(self):%0A
-
substitutions =
@@ -3567,36 +3567,32 @@
TITUTIONS', ())%0A
-
substitu
@@ -3653,151 +3653,42 @@
NS',
-%0A ()))%0A self._slug = slugify(self.name, substitutions)%0A return self._slug
+ ()))%0A return substitutions
%0A%0A%0Ac
@@ -3822,38 +3822,24 @@
wargs)%0A%0A
- @property%0A
def
slug(sel
@@ -3834,55 +3834,35 @@
def
+_
slug
-(self):%0A if self._slug is None:%0A
+_substitutions(self):%0A
@@ -3929,28 +3929,24 @@
())%0A
-
substitution
@@ -4012,84 +4012,28 @@
- self._slug = slugify(self.name, substitutions)%0A return self._slug
+return substitutions
%0A%0A%0Ac
@@ -4057,38 +4057,24 @@
apper):%0A
- @property%0A
def
slug(sel
@@ -4057,36 +4057,51 @@
apper):%0A def
+_
slug
+_substitutions
(self):%0A
@@ -4104,107 +4104,76 @@
-if self._slug is None:%0A self._slug = slugify(self.name,%0A
+# ??? Should this include SLUG_SUBSTITUTIONS as well?%0A return
sel
@@ -4218,32 +4218,5 @@
())
-)%0A return self._slug
%0A
|
36436f8afa6310802a3161eba9f7b00844bfe9c4
|
Add a FOR UPDATE NOWAIT at the beginning of the transaction.
|
base_location_geonames_import/wizard/geonames_import.py
|
base_location_geonames_import/wizard/geonames_import.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Base Location Geonames Import module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
import requests
import tempfile
import StringIO
import unicodecsv
import zipfile
import os
import logging
logger = logging.getLogger(__name__)
class better_zip_geonames_import(orm.TransientModel):
_name = 'better.zip.geonames.import'
_description = 'Import Better Zip from Geonames'
_columns = {
'country_id': fields.many2one('res.country', 'Country', required=True),
}
def _prepare_better_zip(
self, cr, uid, row, country_id, states, context=None):
'''This function is designed to be inherited'''
state_id = False
if states and row[4] and row[4] in states:
state_id = states[row[4].upper()]
if row[0] == 'FR' and 'CEDEX' in row[1]:
return False
vals = {
'name': row[1],
'city': row[2],
'state_id': state_id,
'country_id': country_id,
}
return vals
def create_better_zip(
self, cr, uid, row, country_id, country_code, states,
context=None):
bzip_id = False
if row[0] != country_code:
raise orm.except_orm(
_('Error:'),
_("The country code inside the file (%s) doesn't "
"correspond to the selected country (%s).")
% (row[0], country_code))
logger.debug('ZIP = %s - City = %s' % (row[1], row[2]))
if row[1] and row[2]:
vals = self._prepare_better_zip(
cr, uid, row, country_id, states, context=context)
if vals:
bzip_id = self.pool['res.better.zip'].create(
cr, uid, vals, context=context)
return bzip_id
def run_import(self, cr, uid, ids, context=None):
assert len(ids) == 1, 'Only one ID for the better zip import wizard'
bzip_obj = self.pool['res.better.zip']
wizard = self.browse(cr, uid, ids[0], context=context)
country_id = wizard.country_id.id
country_code = wizard.country_id.code.upper()
url = 'http://download.geonames.org/export/zip/%s.zip' % country_code
logger.info('Starting to download %s' % url)
res_request = requests.get(url)
if res_request.status_code != requests.codes.ok:
raise orm.except_orm(
_('Error:'),
_('Got an error %d when trying to download the file %s.')
% (res_request.status_code, url))
bzip_ids_to_delete = bzip_obj.search(
cr, uid, [('country_id', '=', country_id)], context=context)
if bzip_ids_to_delete:
bzip_obj.unlink(cr, uid, bzip_ids_to_delete, context=context)
logger.info(
'%d better zip entries deleted for country %s'
% (len(bzip_ids_to_delete), wizard.country_id.name))
state_ids = self.pool['res.country.state'].search(
cr, uid, [('country_id', '=', country_id)], context=context)
states = {}
# key = code of the state ; value = ID of the state in OpenERP
if state_ids:
states_r = self.pool['res.country.state'].read(
cr, uid, state_ids, ['code', 'country_id'], context=context)
for state in states_r:
states[state['code'].upper()] = state['id']
f_geonames = zipfile.ZipFile(StringIO.StringIO(res_request.content))
tempdir = tempfile.mkdtemp(prefix='openerp')
f_geonames.extract('%s.txt' % country_code, tempdir)
logger.info('The geonames zipfile has been decompressed')
data_file = open(os.path.join(tempdir, '%s.txt' % country_code), 'r')
data_file.seek(0)
logger.info(
'Starting to create the better zip entries %s state information'
% (states and 'with' or 'without'))
for row in unicodecsv.reader(
data_file, encoding='utf-8', delimiter=' '):
self.create_better_zip(
cr, uid, row, country_id, country_code, states,
context=context)
data_file.close()
logger.info(
'The wizard to create better zip entries from geonames '
'has been successfully completed.')
return True
|
Python
| 0
|
@@ -3726,16 +3726,155 @@
delete:%0A
+ cr.execute('SELECT id FROM res_better_zip WHERE id in %25s '%0A 'FOR UPDATE NOWAIT', (tuple(bzip_ids_to_delete), ))%0A
|
5fd1f7cbe9534a47c4dc837773f22f6f177fdcf5
|
Update affineHacker: fixed imports and typo
|
books/CrackingCodesWithPython/Chapter15/affineHacker.py
|
books/CrackingCodesWithPython/Chapter15/affineHacker.py
|
# Affine Cipher Hacker
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import pyperclip, affineCipher, detectEnglish, cryptomath
SILENT_MODE = False
def main():
# You might want to copy & paste this text from the source code at
# https://www.nostarch.com/crackingcodes/.
myMessage = """5QG9ol3La6QI93!xQxaia6faQL9QdaQG1!!axQARLa!!A
uaRLQADQALQG93!xQxaGaAfaQ1QX3o1RQARL9Qda!AafARuQLX1LQALQI1
iQX3o1RN"Q-5!1RQP36ARu"""
hackedMessage = hackAffine(myMessage)
if hackedMessage != None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard:
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on macOS and Linux):
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# Brute-force by looping through every possible key:
for key in range(len(affineCipher.SYMBOLS) ** 2):
keyA = affineCipher.getKeyParts(key)[0]
if cryptomath.gcd(keyA, len(affineCipher.SYMBOLS)) ! = 1:
continue
decryptedText = affineCipher.decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if detectEnglish.isEnglish(decryptedText):
# Check with the user if the decrypted key has been found:
print()
print('Possible encryption hack:')
print('Key: %s' % (key))
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -78,64 +78,308 @@
d)%0A%0A
-import pyperclip, affineCipher, detectEnglish, cryptomat
+from books.CrackingCodesWithPython.pyperclip import copy%0Afrom books.CrackingCodesWithPython.Chapter14.affineCipher import decryptMessage, SYMBOLS, getKeyParts%0Afrom books.CrackingCodesWithPython.Chapter13.cryptomath import gcd%0Afrom books.CrackingCodesWithPython.Chapter11.detectEnglish import isEnglis
h%0A%0AS
@@ -989,18 +989,8 @@
-pyperclip.
copy
@@ -1368,37 +1368,24 @@
n range(len(
-affineCipher.
SYMBOLS) **
@@ -1403,29 +1403,16 @@
keyA =
-affineCipher.
getKeyPa
@@ -1438,19 +1438,8 @@
if
-cryptomath.
gcd(
@@ -1448,29 +1448,16 @@
yA, len(
-affineCipher.
SYMBOLS)
@@ -1459,17 +1459,16 @@
BOLS)) !
-
= 1:%0A
@@ -1514,21 +1514,8 @@
t =
-affineCipher.
decr
@@ -1653,22 +1653,8 @@
if
-detectEnglish.
isEn
|
b6727f6bd9f3d8ffe59b17f157180a8fa5e61467
|
Fix typo.
|
py/vttest/fakezk_config.py
|
py/vttest/fakezk_config.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Generate a config file for fakezk topology."""
__author__ = 'enisoc@google.com (Anthony Yeh)'
import base64
import codecs
import json
class FakeZkConfig(object):
"""Create fakezk config for use as static topology for vtgate."""
def __init__(self, mysql_port, cell='test_cell', host='127.0.0.1'):
self.keyspaces = {}
self.served_from = {}
self.host = host
self.cell = cell
self.mysql_port = mysql_port
def add_shard(self, keyspace, shard, vt_port):
"""Add a shard to the config."""
# compute the start and end
start = ''
end = ''
if '-' in shard:
parts = shard.split('-', 2)
start = parts[0]
end = parts[1]
if keyspace not in self.keyspaces:
self.keyspaces[keyspace] = []
self.keyspaces[keyspace].append({
'shard': shard,
'vt_port': vt_port,
'start': start,
'end': end,
})
def add_redirect(self, from_keyspace, to_keyspace):
"""Set a keyspace to be ServedFrom another."""
self.served_from[from_keyspace] = to_keyspace
def keyspace_id_as_base64(self, s):
raw = codecs.decode(s, 'hex')
return base64.b64encode(raw)
def as_json(self):
"""Return the config as JSON. This is a proto3 version of SrkKeyspace."""
result = {}
tablet_types_str = ['master', 'replica', 'rdonly']
tablet_types_int = [2, 3, 4]
sharding_colname = 'keyspace_id'
sharding_coltype = 1
for keyspace, shards in self.keyspaces.iteritems():
shard_references = []
for shard in shards:
key_range = {}
if shard['start']:
key_range['start'] = self.keyspace_id_as_base64(shard['start'])
if shard['end']:
key_range['end'] = self.keyspace_id_as_base64(shard['end'])
shard_references.append({
'name': shard['shard'],
'key_range': key_range,
})
for dbtype in tablet_types_str:
path = '/zk/%s/vt/ns/%s/%s/%s' % (self.cell, keyspace,
shard['shard'], dbtype)
result[path] = {
'entries': [
{
'uid': 0,
'host': self.host,
'port_map': {
'mysql': self.mysql_port,
'vt': shard['vt_port'],
},
},
],
}
path = '/zk/%s/vt/ns/%s' % (self.cell, keyspace)
partitions = []
for tablet_type in tablet_types_int:
partitions.append({
'served_type': tablet_type,
'shard_references': shard_references,
})
result[path] = {
'partitions': partitions,
'sharding_column_name': sharding_colname,
'sharding_column_type': sharding_coltype,
}
for from_keyspace, to_keyspace in self.served_from.iteritems():
path = '/zk/%s/vt/ns/%s' % (self.cell, from_keyspace)
served_from = []
for dbtype in tablet_types_int:
served_from.append({
'tablet_type': dbtype,
'keyspace': to_keyspace,
})
result[path] = {
'served_from': served_from,
}
return json.dumps(result)
|
Python
| 0.001604
|
@@ -1290,17 +1290,17 @@
on of Sr
-k
+v
Keyspace
|
d607de07ae3aaa2a245b8eb90cb42ca3e29f6e33
|
add lambda sample
|
05.Function.py
|
05.Function.py
|
#-*- encoding: utf-8 -*-
# Error
#def func():
def func():
pass
def func(num, num1=1, num2=2):
print(num, num1, num2)
func(1, 3, 4) # 1 3 4
func(5) # 5 1 2
# Error
#func()
def func(**args):
for k, v in args.items():
print('key: ' + k, 'value: ' + v)
for k in args.keys():
print('key: ' + k, 'value: ' + args[k])
func(name = "rxb", age = "24")
def func(name, age):
print('name: ' + name, 'age: ' + age)
people = {"name": "rxb", "age": "24"}
func(**people) # name: rxb age: 24
def func(num, *args):
print(num)
for a in args:
print(a)
func(1, 2, 3, 4, 5, 6)
def func(num, num1):
print(num, num1)
func(num1 = 2, num = 1) # 1 2
d = {
"num": 3,
"num1": 4
}
func(**d) # 3 4
t = (4, 5)
func(*t) # 4 5
def func():
'''
The documentation of the func
'''
print("func")
print(func.__doc__)
l = lambda num1, num2: num1 + num2
print(l(2, 3)) # 5
|
Python
| 0.000001
|
@@ -928,12 +928,192 @@
2, 3)) # 5%0D%0A
+%0D%0Adef func2(func, num1, num2):%0D%0A%09return func(num1, num2)%0D%0A%0D%0Adef func(num1, num2):%0D%0A%09return num1 + num2%0D%0A%0D%0Aprint(func2(func, 3, 4)) # 7%0D%0A%0D%0Aprint(func2(lambda a, b: a - b, 7, 4)) # 3
|
e4c9ca4bb21539fe6677a082c9dbc39245785100
|
delete line
|
chainercv/experimental/links/model/pspnet/transforms.py
|
chainercv/experimental/links/model/pspnet/transforms.py
|
from __future__ import division
import math
import numpy as np
def convolution_crop(img, size, stride, return_param=False):
"""Strided cropping.
This extracts cropped images from the input. The cropped images are
extracted from the entire image, while taking a constant steps between
neighboring patches.
Args:
img (~numpy.ndarray): An image array to be cropped. This is in
CHW format.
size (tuple): The size of output image after cropping.
This value is :math:`(height, width)`.
stride (tuple): The stride between crops. This contains
two values: stride in the vertical and horizontal directions.
return_param (bool): If :obj:`True`, this function returns
information of slices.
Returns:
~numpy.ndarray or (~numpy.ndarray, dict):
If :obj:`return_param = False`,
returns an array :obj:`crop_imgs` that is a stack of cropped images.
If :obj:`return_param = True`,
returns a tuple whose elements are :obj:`crop_imgs, param`.
:obj:`param` is a dictionary of intermediate parameters whose
contents are listed below with key, value-type and the description
of the value.
* **y_slices** (*list slices*): Slices used to crop the input image.\
The relation below holds together with :obj:`x_slices`.
* **x_slices** (*list of slices*): Similar to :obj:`y_slices`.
* **crop_y_slices** (*list of slices*): This indicates the region of \
the cropped image that is actually extracted from the input. \
This is relevant only when borders of the input are cropped.
* **crop_x_slices** (*list of slices*): Similar to \
:obj:`crop_y_slices`.
.. code::
crop_img = crop_imgs[i][:, crop_y_slices[i], crop_x_slices[i]]
crop_img == img[:, y_slices[i], x_slices[i]]
Examples:
>>> import numpy as np
>>> from chainercv.datasets import VOCBboxDataset
>>> from chainercv.transforms import resize
>>> from chainercv.experimental.links.model.pspnet import \
... convolution_crop
>>>
>>> img, _, _ = VOCBboxDataset(year='2007')[0]
>>> img = resize(img, (300, 300))
>>> imgs, param = convolution_crop(
>>> img, (128, 128), (96, 96), return_param=True)
>>> # Restore the original image from the cropped images.
>>> output = np.zeros((3, 300, 300))
>>> count = np.zeros((300, 300))
>>> for i in range(len(imgs)):
>>> crop_y_slice = param['crop_y_slices'][i]
>>> crop_x_slice = param['crop_x_slices'][i]
>>> y_slice = param['y_slices'][i]
>>> x_slice = param['x_slices'][i]
>>> output[:, y_slice, x_slice] +=\
... imgs[i][:, crop_y_slice, crop_x_slice]
>>> count[y_slice, x_slice] += 1
>>> output = output / count[None]
>>> np.testing.assert_equal(output, img)
>>>
>>> # Visualization of the cropped images
>>> import matplotlib.pyplot as plt
>>> from chainercv.utils import tile_images
>>> from chainercv.visualizations import vis_image
>>> v_imgs = tile_images(imgs, 5, fill=122.5)
>>> vis_image(v_imgs)
>>> plt.show()
"""
_, H, W = img.shape
h = int(math.ceil((H - size[0]) / stride[0])) + 1
w = int(math.ceil((W - size[1]) / stride[1])) + 1
start_y = -(size[0] + stride[0] * (h - 1) - H) // 2
start_x = -(size[1] + stride[1] * (w - 1) - W) // 2
crop_imgs = []
y_slices = []
x_slices = []
crop_y_slices = []
crop_x_slices = []
for y in range(h):
for x in range(w):
y_min = y * stride[0] + start_y
x_min = x * stride[1] + start_x
y_max = y_min + size[0]
x_max = x_min + size[1]
crop_y_min = np.abs(np.minimum(y_min, 0))
crop_x_min = np.abs(np.minimum(x_min, 0))
crop_y_max = size[0] - np.maximum(y_max - H, 0)
crop_x_max = size[1] - np.maximum(x_max - W, 0)
crop_img = np.zeros((img.shape[0], size[0], size[1]),
dtype=img.dtype)
y_slice = slice(max(y_min, 0), min(y_max, H))
x_slice = slice(max(x_min, 0), min(x_max, W))
crop_y_slice = slice(crop_y_min, crop_y_max)
crop_x_slice = slice(crop_x_min, crop_x_max)
crop_img[:, crop_y_slice, crop_x_slice] = img[:, y_slice, x_slice]
crop_imgs.append(crop_img)
y_slices.append(y_slice)
x_slices.append(x_slice)
crop_y_slices.append(crop_y_slice)
crop_x_slices.append(crop_x_slice)
if return_param:
param = {'y_slices': y_slices, 'x_slices': x_slices,
'crop_y_slices': crop_y_slices,
'crop_x_slices': crop_x_slices}
return np.array(crop_imgs), param
else:
return np.array(crop_imgs)
|
Python
| 0.000008
|
@@ -3379,17 +3379,16 @@
show()%0A%0A
-%0A
%22%22%22%0A
|
41294d03a53685668fc9663a375173bd527a76a3
|
Version bump
|
pyblish_starter/version.py
|
pyblish_starter/version.py
|
VERSION_MAJOR = 0
VERSION_MINOR = 2
VERSION_PATCH = 0
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
version = '%i.%i.%i' % version_info
__version__ = version
__all__ = ['version', 'version_info', '__version__']
|
Python
| 0.000001
|
@@ -46,17 +46,17 @@
PATCH =
-0
+1
%0A%0Aversio
|
b439017a21ac01ee7fda275753effaf5d103a120
|
Change IP.
|
pybossa/signer/__init__.py
|
pybossa/signer/__init__.py
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from itsdangerous import URLSafeTimedSerializer
from werkzeug import generate_password_hash, check_password_hash
class Signer(object):
def __init__(self, app=None):
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
key = app.config['ITSDANGEROUSKEY']
self.signer = URLSafeTimedSerializer(key)
def loads(self, string, **kwargs):
return self.signer.loads(string, **kwargs)
def dumps(self, key, **kwargs):
return self.signer.dumps(key, **kwargs)
def generate_password_hash(self, password):
return generate_password_hash(password)
def check_password_hash(self, passwd_hash, password):
return check_password_hash(passwd_hash, password)
|
Python
| 0
|
@@ -73,32 +73,24 @@
201
-3 SF Isle of Man Limited
+5 SciFabric LTD.
%0A#%0A#
|
026967d81386f31c779ea0b64cd0dea502270a80
|
Fix import error if invoicegenerator is not available
|
pyconde/attendees/tasks.py
|
pyconde/attendees/tasks.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext as _
from invoicegenerator import generate_invoice
from pyconde.celery import app
from . import settings as settings
if settings.INVOICE_DISABLE_RENDERING:
def do_render(filepath, data, **kwargs):
from django.core.serializers.json import DjangoJSONEncoder
with open(filepath, 'w') as f:
f.write(DjangoJSONEncoder(indent=2).encode(data))
return True, ''
else:
def do_render(filepath, data, **kwargs):
return generate_invoice.render(filepath=filepath, data=data, **kwargs)
@app.task(ignore_result=True)
def render_invoice(purchase_id):
from .exporters import PurchaseExporter
from .models import Purchase
from .utils import generate_invoice_filename
try:
purchase = Purchase.objects.get_exportable_purchases().get(pk=purchase_id)
except Purchase.DoesNotExist:
raise RuntimeError('No exportable purchase found with pk %d' % purchase_id)
generate_invoice_filename(purchase)
filepath = purchase.invoice_filepath
if not os.path.exists(settings.INVOICE_ROOT):
os.makedirs(settings.INVOICE_ROOT)
data = PurchaseExporter(purchase).export()
success, error = False, ''
iteration = 0
while not success and iteration < 3:
try:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
password = bytes(get_random_string(32, chars))
success, error = do_render(filepath, data,
basepdf=settings.INVOICE_TEMPLATE_PATH,
fontdir=settings.INVOICE_FONT_ROOT,
fontconfig=settings.INVOICE_FONT_CONFIG,
modify_password=password)
except Exception as e:
error = e
finally:
iteration += 1
if not success:
purchase.invoice_filename = None
purchase.save(update_fields=['invoice_filename'])
if isinstance(error, Exception):
raise error
else:
raise RuntimeError('Error exporting purchase pk %d: %s' % (purchase_id, error))
else:
purchase.exported = True
if purchase.payment_method == 'invoice' and purchase.state == 'new':
# We must not update the state for credit cards, as that would
# override the prior state.
purchase.state = 'invoice_created'
purchase.save(update_fields=['exported', 'state'])
# Send invoice to buyer
send_invoice.delay(purchase_id, (purchase.email_receiver,))
# Send invoice to orga
send_invoice.delay(purchase_id, settings.INVOICE_EXPORT_RECIPIENTS)
@app.task(ignore_result=True)
def send_invoice(purchase_id, recipients):
if not recipients:
return
from .models import Purchase
try:
purchase = Purchase.objects.get(pk=purchase_id)
except Purchase.DoesNotExist:
raise RuntimeError('No purchase found with pk %d' % purchase_id)
if not purchase.exported:
# Safe call 'cause exported will be set
# if send_invoice is invoked again
render_invoice.delay(purchase_id)
raise RuntimeError('Invoked rendering of invoice pk %d' % purchase_id)
subject = _('Your EuroPython 2014 Invoice %(full_invoice_number)s') % {
'full_invoice_number': purchase.full_invoice_number,
}
message = render_to_string('attendees/mail_payment_invoice.html', {
'first_name': purchase.first_name,
'last_name': purchase.last_name,
'conference': purchase.conference,
})
msg = EmailMessage(subject, message, to=recipients)
msg.encoding = 'utf-8'
ext = '.json' if settings.INVOICE_DISABLE_RENDERING else '.pdf'
filename = '%s%s' % (purchase.full_invoice_number, ext) # attachment filename
with open(purchase.invoice_filepath, 'rb') as f:
content = f.read()
msg.attach(filename, content)
msg.send()
|
Python
| 0.000001
|
@@ -269,55 +269,8 @@
_%0A%0A
-from invoicegenerator import generate_invoice%0A%0A
from
@@ -653,32 +653,86 @@
ata, **kwargs):%0A
+ from invoicegenerator import generate_invoice%0A
return g
|
54d39aaf8c31a5827ae7338fefe7a1d6a19d52cf
|
Add missing docstring.
|
pyslvs_ui/info/__init__.py
|
pyslvs_ui/info/__init__.py
|
# -*- coding: utf-8 -*-
"""'info' module contains Pyslvs program information."""
__all__ = [
'KERNELS',
'SYS_INFO',
'ARGUMENTS',
'HAS_SLVS',
'Kernel',
'check_update',
'PyslvsAbout',
'html',
'logger',
'XStream',
'size_format',
]
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2020"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from .info import KERNELS, SYS_INFO, ARGUMENTS, HAS_SLVS, Kernel
from .about import PyslvsAbout, html, check_update
from .logging_handler import logger, XStream
def size_format(num: float) -> str:
if num <= 0:
return "0 B"
for u in ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'):
if abs(num) < 1024:
return f"{num:3.1f} {u}B"
num /= 1024
return f"{num:.1f} YB"
|
Python
| 0.000005
|
@@ -587,16 +587,47 @@
-%3E str:%0A
+ %22%22%22Calculate file size.%22%22%22%0A
if n
|
7b19611d30dfc9091823ae3d960ab2790dfe9cfc
|
Apply a blur filter automatically for each detected face
|
python/blur_human_faces.py
|
python/blur_human_faces.py
|
import requests
import json
imgUrl = 'https://pixlab.io/images/m3.jpg' # Target picture we want to blur any face on
# Detect all human faces in a given image via /facedetect first and blur all of them later via /mogrify.
# https://pixlab.io/cmd?id=facedetect and https://pixlab.io/cmd?id=mogrify for additional information.
req = requests.get('https://api.pixlab.io/facedetect',params={
'img': imgUrl,
'key':'PIXLAB_API_KEY',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
total = len(reply['faces']) # Total detected faces
print(str(total)+" faces were detected")
if total < 1:
# No faces were detected, exit immediately
exit()
# Pass the detected faces coordinates untouched to mogrify
coordinates = reply['faces']
# Call mogrify & blur the face(s)
req = requests.post('https://api.pixlab.io/mogrify',headers={'Content-Type':'application/json'},data=json.dumps({
'img': imgUrl,
'key':'PIXLAB_API_KEY',
'cord': coordinates # The field of interest
}))
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Blurred Picture URL: "+ reply['ssl_link'])
|
Python
| 0
|
@@ -665,16 +665,17 @@
%09exit()%0A
+%0A
# Pass t
@@ -677,30 +677,22 @@
ass the
-detected
fac
-es
+ial
coordin
@@ -696,16 +696,39 @@
dinates
+for each detected face
untouche
|
60b4915569c891675ed5dc86ff40a7a2587841d1
|
Use .pos_ instead of .tags_ in displaCy by default (see #1006)
|
spacy/displacy/__init__.py
|
spacy/displacy/__init__.py
|
# coding: utf8
from __future__ import unicode_literals
from .render import DependencyRenderer, EntityRenderer
from ..tokens import Doc
from ..compat import b_to_str
from ..util import prints, is_in_jupyter
_html = {}
IS_JUPYTER = is_in_jupyter()
def render(docs, style='dep', page=False, minify=False, jupyter=IS_JUPYTER,
options={}, manual=False):
"""Render displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
jupyter (bool): Experimental, use Jupyter's `display()` to output markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
RETURNS (unicode): Rendered HTML markup.
"""
factories = {'dep': (DependencyRenderer, parse_deps),
'ent': (EntityRenderer, parse_ents)}
if style not in factories:
raise ValueError("Unknown style: %s" % style)
if isinstance(docs, Doc) or isinstance(docs, dict):
docs = [docs]
renderer, converter = factories[style]
renderer = renderer(options=options)
parsed = [converter(doc, options) for doc in docs] if not manual else docs
_html['parsed'] = renderer.render(parsed, page=page, minify=minify).strip()
html = _html['parsed']
if jupyter: # return HTML rendered by IPython display()
from IPython.core.display import display, HTML
return display(HTML(html))
return html
def serve(docs, style='dep', page=True, minify=False, options={}, manual=False,
port=5000):
"""Serve displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation.
"""
from wsgiref import simple_server
render(docs, style=style, page=page, minify=minify, options=options,
manual=manual)
httpd = simple_server.make_server('0.0.0.0', port, app)
prints("Using the '%s' visualizer" % style,
title="Serving on port %d..." % port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
prints("Shutting down server on port %d." % port)
finally:
httpd.server_close()
def app(environ, start_response):
# headers and status need to be bytes in Python 2, see #1227
headers = [(b_to_str(b'Content-type'),
b_to_str(b'text/html; charset=utf-8'))]
start_response(b_to_str(b'200 OK'), headers)
res = _html['parsed'].encode(encoding='utf-8')
return [res]
def parse_deps(orig_doc, options={}):
"""Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs.
"""
doc = Doc(orig_doc.vocab).from_bytes(orig_doc.to_bytes())
if options.get('collapse_punct', True):
spans = []
for word in doc[:-1]:
if word.is_punct or not word.nbor(1).is_punct:
continue
start = word.i
end = word.i + 1
while end < len(doc) and doc[end].is_punct:
end += 1
span = doc[start:end]
spans.append((span.start_char, span.end_char, word.tag_,
word.lemma_, word.ent_type_))
for span_props in spans:
doc.merge(*span_props)
words = [{'text': w.text, 'tag': w.tag_} for w in doc]
arcs = []
for word in doc:
if word.i < word.head.i:
arcs.append({'start': word.i, 'end': word.head.i,
'label': word.dep_, 'dir': 'left'})
elif word.i > word.head.i:
arcs.append({'start': word.head.i, 'end': word.i,
'label': word.dep_, 'dir': 'right'})
return {'words': words, 'arcs': arcs}
def parse_ents(doc, options={}):
"""Generate named entities in [{start: i, end: i, label: 'label'}] format.
doc (Doc): Document do parse.
RETURNS (dict): Generated entities keyed by text (original text) and ents.
"""
ents = [{'start': ent.start_char, 'end': ent.end_char, 'label': ent.label_}
for ent in doc.ents]
title = (doc.user_data.get('title', None)
if hasattr(doc, 'user_data') else None)
return {'text': doc.text, 'ents': ents, 'title': title}
|
Python
| 0
|
@@ -3756,19 +3756,19 @@
tag': w.
-tag
+pos
_%7D for w
|
8a950dbfb1281216ed270bf6363c7a71d857133f
|
Make datetime and time +00:00 handling behavior consistent. Fix #3.
|
webhooks/encoders.py
|
webhooks/encoders.py
|
"""
Serialize data to/from JSON
Inspired by https://github.com/django/django/blob/master/django/core/serializers/json.py
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import decimal
import json
class WebHooksJSONEncoder(json.JSONEncoder):
"""
A JSONEncoder that can encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(WebHooksJSONEncoder, self).default(o)
|
Python
| 0.000001
|
@@ -954,16 +954,86 @@
r%5B:12%5D%0A
+ if r.endswith('+00:00'):%0A r = r%5B:-6%5D + 'Z'%0A
|
8120b641ccb66b088fa70c028e5be542bf561dfd
|
Update lex_attrs.py (#5608)
|
spacy/lang/hy/lex_attrs.py
|
spacy/lang/hy/lex_attrs.py
|
# coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
"զրօ",
"մէկ",
"երկու",
"երեք",
"չորս",
"հինգ",
"վեց",
"յոթ",
"ութ",
"ինը",
"տասը",
"տասնմեկ",
"տասներկու",
"տասներեք",
"տասնչորս",
"տասնհինգ",
"տասնվեց",
"տասնյոթ",
"տասնութ",
"տասնինը",
"քսան" "երեսուն",
"քառասուն",
"հիսուն",
"վաթցսուն",
"յոթանասուն",
"ութսուն",
"ինիսուն",
"հարյուր",
"հազար",
"միլիոն",
"միլիարդ",
"տրիլիոն",
"քվինտիլիոն",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
|
Python
| 0
|
@@ -107,9 +107,9 @@
%22%D5%A6%D6%80
-%D6%85
+%D5%B8
%22,%0A
@@ -117,9 +117,9 @@
%22%D5%B4
-%D5%A7
+%D5%A5
%D5%AF%22,%0A
@@ -430,17 +430,16 @@
%22%D5%BE%D5%A1%D5%A9
-%D6%81
%D5%BD%D5%B8%D6%82%D5%B6%22,%0A
@@ -477,17 +477,17 @@
%0A %22%D5%AB%D5%B6
-%D5%AB
+%D5%B6
%D5%BD%D5%B8%D6%82%D5%B6%22,%0A
|
cd8024c762bf5bae8caf210b9224548bee55ee04
|
Bump version to 6.1.5a3
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (6, 1, "5a2")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "contact@piolabs.com"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_mirror_hosts__ = [
"registry.platformio.org",
"registry.nm1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.4.2",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-scons": "~4.40400.0",
"tool-cppcheck": "~1.270.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.18.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
] + __registry_mirror_hosts__
|
Python
| 0
|
@@ -636,17 +636,17 @@
, 1, %225a
-2
+3
%22)%0A__ver
|
12c22ebdf3c7e84f5f9c6b32329f343c8317f11b
|
Correct comments
|
python/dbtools/__init__.py
|
python/dbtools/__init__.py
|
'''
This library provides database access routines.
It's based on the re-usable parts of tailoredstats.
Owain Kenway
'''
'''
Generally abstract away DB queries, such that all complexity is replaced with:
dbtools.dbquery(db, query)
'''
def dbquery(db, query, mysqlhost="mysql.external.legion.ucl.ac.uk", mysqlport = 3306 ):
from auth.secrets import Secrets
import MySQLdb # Note need mysqlclient package from pypi
# Set up our authentication.
s = Secrets()
# Connect to database.
conn = MySQLdb.Connect(host=mysqlhost,
port=mysqlport,
user=s.dbuser,
passwd=s.dbpasswd,
db=db)
# Set up cursor.
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
print(">>> DEBUG SQL query: " + query)
# Run query.
cursor.execute(query)
# Dump output.
output = cursor.fetchall()
# Tidy up.
cursor.close()
conn.close()
return output
# Generate a valid SQL list from a python one.
def sqllist(pylist):
sqlstr="("
if type(pylist) == str:
sqlstr = sqlstr + "'" + pylist + "')"
else:
for a in pylist:
if sqlstr!= "(":
sqlstr = sqlstr + ", "
sqlstr = sqlstr + "'" + a + "'"
sqlstr = sqlstr + ")"
return sqlstr
# Build owner/node limit string for queries.
def onlimits(users="*"):
query = ""
# if users != * then construct a node list.
if users != "*":
userlist = sqllist(users)
query = query + " and owner in " + userlist
return query
|
Python
| 0.000023
|
@@ -1398,13 +1398,8 @@
wner
-/node
lim
|
8d459d86d33992129726ef177ed24fe8a00e9b75
|
Bump version to 4.0.0rc1
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
Python
| 0
|
@@ -627,10 +627,11 @@
, %220
-b3
+rc1
%22)%0A_
|
5f917746e86c733d37c56e15a97f7aecb73fa75f
|
fix bug comparing string with int. int(games)
|
python/guess_the_player.py
|
python/guess_the_player.py
|
# importing modules
import os
import csv
import time
import random
import tweepy
import player
# secrets
consumer_key = os.getenv('c_key')
consumer_secret = os.getenv('c_secret')
access_token = os.getenv('a_token')
access_token_secret = os.getenv('a_secret')
# authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# processing
with open('player_history.csv') as csvfile:
row = random.choice([a for a in list(csv.DictReader(csvfile)) if a['Games'] > 1])
po = player.player(
row['Player Name'],
row['Goals'],
row['Games'],
row['Starter'],
row['Sub'],
row['Active'],
row['Debut']
)
api.update_status(status=po.get_guess_player_string())
time.sleep(10*60)
api.update_status(status=f"#GuessThePlayer Well done if you got it, the answer was: {po.name} #CFC #Chelsea")
|
Python
| 0
|
@@ -536,16 +536,20 @@
le)) if
+int(
a%5B'Games
@@ -550,16 +550,17 @@
'Games'%5D
+)
%3E 1%5D)%0A
|
a172a17c815e8fcbe0f8473c6bac1ea1d9714817
|
Bump version to 4.4.0a4
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 4, "0a3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Professional development environment for Embedded, IoT, Arduino, CMSIS, ESP-IDF, "
"FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, STM32Cube, Zephyr RTOS, ARM, AVR, "
"Espressif (ESP8266/ESP32), FPGA, MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), "
"NXP i.MX RT, PIC32, RISC-V, STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = "https://api.registry.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
|
Python
| 0
|
@@ -624,17 +624,17 @@
, 4, %220a
-3
+4
%22)%0A__ver
|
54921c5dbdc68893fe45649d07d067818c36889b
|
Bump version to 4.0.0b3
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b2")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
Python
| 0
|
@@ -624,17 +624,17 @@
, 0, %220b
-2
+3
%22)%0A__ver
|
302a102a3c72224d2039df35fe4b292e9dd540d3
|
fix typo in docstring
|
client/libs/logdog/bootstrap.py
|
client/libs/logdog/bootstrap.py
|
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import collections
import os
from . import stream, streamname
class NotBootstrappedError(RuntimeError):
"""Raised when the current environment is missing Butler bootstrap variables.
"""
_ButlerBootstrapBase = collections.namedtuple('_ButlerBootstrapBase',
('project', 'prefix', 'streamserver_uri', 'coordinator_host',
'namespace'))
class ButlerBootstrap(_ButlerBootstrapBase):
"""Loads LogDog Butler bootstrap parameters from the environment.
LogDog Butler adds variables describing the LogDog stream parameters to the
environment when it bootstraps an application. This class probes the
environment and identifies those parameters.
"""
# TODO(iannucci): move all of these to LUCI_CONTEXT
_ENV_PROJECT = 'LOGDOG_STREAM_PROJECT'
_ENV_PREFIX = 'LOGDOG_STREAM_PREFIX'
_ENV_STREAM_SERVER_PATH = 'LOGDOG_STREAM_SERVER_PATH'
_ENV_COORDINATOR_HOST = 'LOGDOG_COORDINATOR_HOST'
_ENV_NAMESPACE = 'LOGDOG_NAMESPACE'
@classmethod
def probe(cls, env=None):
"""Returns (ButlerBootstrap): The probed bootstrap environment.
Args:
env (dict): The environment to probe. If None, `os.getenv` will be used.
Raises:
NotBootstrappedError if the current environment is not boostrapped.
"""
if env is None:
env = os.environ
def _check(kind, val):
if not val:
return val
try:
streamname.validate_stream_name(val)
return val
except ValueError as exp:
raise NotBootstrappedError('%s (%s) is invalid: %s' % (kind, val, exp))
streamserver_uri = env.get(cls._ENV_STREAM_SERVER_PATH)
if not streamserver_uri:
raise NotBootstrappedError('No streamserver in bootstrap environment.')
return cls(
project=env.get(cls._ENV_PROJECT, ''),
prefix=_check("Prefix", env.get(cls._ENV_PREFIX, '')),
streamserver_uri=streamserver_uri,
coordinator_host=env.get(cls._ENV_COORDINATOR_HOST, ''),
namespace=_check("Namespace", env.get(cls._ENV_NAMESPACE, '')))
def stream_client(self, reg=None):
"""Returns: (StreamClient) stream client for the bootstrap streamserver URI.
If the Butler accepts external stream connections, it will export a
streamserver URI in the environment. This will create a StreamClient
instance to operate on the streamserver if one is defined.
Args:
reg (stream.StreamProtocolRegistry or None): The stream protocol registry
to use to create the stream. If None, the default global registry will
be used (recommended).
Raises:
ValueError: If no streamserver URI is present in the environment.
"""
reg = reg or stream._default_registry
return reg.create(
self.streamserver_uri,
project=self.project,
prefix=self.prefix,
coordinator_host=self.coordinator_host,
namespace=self.namespace)
|
Python
| 0.020727
|
@@ -1395,16 +1395,17 @@
not boo
+t
strapped
|
a98404a8d7d36fb63d4a402038541264fbd8da07
|
fix unicode issues with MDPI
|
plugins/python/mdpi.py
|
plugins/python/mdpi.py
|
#!/usr/bin/env python2.6
import os, sys, re, urllib2, cookielib, string
from urllib import urlencode
from urllib2 import urlopen
from copy import copy
import BeautifulSoup
import htmlentitydefs
import html5lib
from html5lib import treebuilders
import warnings
import codecs
warnings.simplefilter("ignore",DeprecationWarning)
class ParseException(Exception):
pass
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text).encode('utf-8')
def meta(soup, key):
el = soup.find("meta", {'name':key})
if el:
return el['content'];
return None
def item(soup, entry, key):
el = meta(soup, key)
if el:
print "%s\t%s" % (entry, el)
def handle(url):
# http://www.mdpi.com/2072-4292/1/4/1139
m = re.match(r'http://www\.mdpi\.com/(\d{4}-\d{4}/\d+/\d+/\d+)', url)
if not m:
raise ParseException, "URL not supported %s" % url
wkey = m.group(1)
page = urlopen(url).read()
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("beautifulsoup"))
soup = parser.parse(page)
head = soup.find("head")
doi = meta(head, 'dc.identifier')
if not doi:
raise ParseException, "Cannot find DOI"
m = re.match(r'(?:doi:)?(.*)$', doi)
if not m:
raise ParseException, "Cannot find DOI"
doi = m.group(1)
print "begin_tsv"
print "linkout\tDOI\t\t%s\t\t" % (doi)
print "linkout\tMDPI\t\t%s\t\t" % wkey
print "type\tJOUR"
title = meta(head, "dc.title")
if title:
print "title\t%s" % unescape(title)
item(head, "journal", "prism.publicationName")
item(head, "volume", "prism.volume")
item(head, "issue", "prism.number")
item(head, "start_page", "prism.startingPage")
item(head, "end_page", "prism.endingPage")
item(head, "issn", "prism.issn")
item(head, "abstract", "dc.description")
date = meta(head, 'dc.date')
if date:
m = re.match(r'(\d+)-(\d+)-(\d+)', date)
if m:
day = m.group(1)
month = m.group(2)
year = m.group(3)
if year:
print "year\t%s" % year
if month:
print "month\t%s" % month
if day:
print "day\t%s" % day
# authors
authors = head.findAll("meta", {"name":"dc.creator"})
if authors:
for a in authors:
print "author\t%s" % a['content']
print "doi\t%s" % doi
print "end_tsv"
print "status\tok"
# read url from std input
url = sys.stdin.readline()
# get rid of the newline at the end
url = url.strip()
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
try:
handle(url)
except Exception, e:
import traceback
line = traceback.tb_lineno(sys.exc_info()[2])
print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to bugs@citeulike.org quoting error code %d." % line])
raise
|
Python
| 0.000588
|
@@ -1128,16 +1128,75 @@
e as is%0A
+ #return re.sub(%22&#?%5Cw+;%22, fixup, text).encode('utf-8')%0A
retu
@@ -1228,32 +1228,16 @@
p, text)
-.encode('utf-8')
%0A%0Adef me
@@ -1641,16 +1641,80 @@
oup(1)%0A%0A
+%09#u = codecs.getreader('utf-8')(urlopen(url))%0A%09#page = u.read()%0A
%09page =
@@ -1734,16 +1734,17 @@
read()%0A%0A
+%0A
%09parser
@@ -2226,16 +2226,17 @@
itle%22)%0A%09
+#
if title
@@ -2238,16 +2238,17 @@
title:%0A%09
+#
%09print %22
@@ -2276,16 +2276,55 @@
(title)%0A
+%09if title:%0A%09%09print %22title%5Ct%25s%22 %25 title%0A
%09item(he
|
07e21bfc0d6273a982ed033e78c964c1474eda96
|
make queryset distinct to prevent a too many pages exception
|
cmsplugin_cascade/link/forms.py
|
cmsplugin_cascade/link/forms.py
|
import re
import requests
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.contrib.admin.sites import site as admin_site
from django.db.models.fields.related import ManyToOneRel
from django.forms import fields, Media, ModelChoiceField
from django.forms.widgets import RadioSelect
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django_select2.forms import HeavySelect2Widget
from cms.utils import get_current_site
from cms.models import Page
from entangled.forms import EntangledModelFormMixin, get_related_object
from filer.models.filemodels import File as FilerFileModel
from filer.fields.file import AdminFileWidget, FilerFileField
def format_page_link(title, path):
html = format_html("{} ({})", mark_safe(title), path)
return html
class PageSelect2Widget(HeavySelect2Widget):
def __init__(self, *args, **kwargs):
kwargs.setdefault('data_view', 'admin:get_published_pagelist')
super().__init__(*args, **kwargs)
@property
def media(self):
parent_media = super().media
# append jquery.init.js to enforce select2.js into the global 'jQuery' namespace
js = list(parent_media._js) + ['admin/js/jquery.init.js']
return Media(css=parent_media._css, js=js)
def render(self, *args, **kwargs):
# replace self.choices by an empty list in order to prevent building the whole optgroup
try:
page = Page.objects.get(pk=kwargs['value'])
except (Page.DoesNotExist, ValueError, KeyError):
self.choices = []
else:
self.choices = [(kwargs['value'], str(page))]
return super().render(*args, **kwargs)
class LinkSearchField(ModelChoiceField):
widget = PageSelect2Widget()
def __init__(self, *args, **kwargs):
queryset = Page.objects.public()
try:
queryset = queryset.published().on_site(get_current_site())
except:
choices = [] # can happen if database is not ready yet
else:
# set a minimal set of choices, otherwise django-select2 builds them for every published page
choices = [(index, str(page)) for index, page in enumerate(queryset[:15])]
kwargs.setdefault('queryset', queryset)
super().__init__(*args, **kwargs)
self.choices = choices
class SectionChoiceField(fields.ChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', [('', _("Page Root"))])
super().__init__(*args, **kwargs)
def valid_value(self, value):
"""
The optgroup is adjusted dynamically accroding to the selected cms_page, so always returns True
and let `LinkForm` validate this value.
"""
return True
class LinkForm(EntangledModelFormMixin):
LINK_TYPE_CHOICES = [
('cmspage', _("CMS Page")),
('download', _("Download File")),
('exturl', _("External URL")),
('email', _("Mail To")),
]
link_type = fields.ChoiceField(
label=_("Link"),
help_text=_("Type of link"),
)
cms_page = LinkSearchField(
required=False,
label='',
help_text=_("An internal link onto any CMS page of this site"),
)
section = SectionChoiceField(
required=False,
label='',
help_text=_("Page bookmark"),
)
download_file = ModelChoiceField(
label='',
queryset=FilerFileModel.objects.all(),
widget=AdminFileWidget(ManyToOneRel(FilerFileField, FilerFileModel, 'id'), admin_site),
required=False,
help_text=_("An internal link onto a file from filer"),
)
ext_url = fields.URLField(
required=False,
label=_("URL"),
help_text=_("Link onto external page"),
)
mail_to = fields.EmailField(
required=False,
label=_("Email"),
help_text=_("Open Email program with this address"),
)
link_target = fields.ChoiceField(
choices=[
('', _("Same Window")),
('_blank', _("New Window")),
('_parent', _("Parent Window")),
('_top', _("Topmost Frame")),
],
label=_("Link Target"),
widget=RadioSelect,
required=False,
help_text=_("Open Link in other target."),
)
link_title = fields.CharField(
label=_("Title"),
required=False,
help_text=_("Link's Title"),
)
class Meta:
entangled_fields = {'glossary': ['link_type', 'cms_page', 'section', 'download_file', 'ext_url', 'mail_to',
'link_target', 'link_title']}
def __init__(self, *args, **kwargs):
link_type_choices = []
if not getattr(self, 'require_link', True):
link_type_choices.append(('', _("No Link")))
self.declared_fields['link_type'].required = False
link_type_choices.extend(self.LINK_TYPE_CHOICES)
self.declared_fields['link_type'].choices = link_type_choices
self.declared_fields['link_type'].initial = link_type_choices[0][0]
instance = kwargs.get('instance')
if instance and instance.glossary.get('link_type') == 'cmspage':
self._preset_section(instance)
super().__init__(*args, **kwargs)
def _preset_section(self, instance):
"""
Field ``cms_page`` may refer onto any CMS page, which itself may contain bookmarks. This method
creates the list of bookmarks.
"""
self.base_fields['section'].choices = self.base_fields['section'].choices[:1]
try:
cascade_page = get_related_object(instance.glossary, 'cms_page').cascadepage
for key, val in cascade_page.glossary.get('element_ids', {}).items():
self.base_fields['section'].choices.append((key, val))
except (AttributeError, ObjectDoesNotExist):
pass
def clean(self):
cleaned_data = super().clean()
link_type = cleaned_data.get('link_type')
error = None
if link_type == 'cmspage':
if not cleaned_data.get('cms_page'):
error = ValidationError(_("CMS page to link to is missing."))
self.add_error('cms_page', error)
elif link_type == 'download':
if not cleaned_data.get('download_file'):
error = ValidationError(_("File for download is missing."))
self.add_error('download_file', error)
elif link_type == 'exturl':
ext_url = cleaned_data.get('ext_url')
if ext_url:
try:
response = requests.head(ext_url, allow_redirects=True)
if response.status_code != 200:
error = ValidationError(_("No external page found on {url}.").format(url=ext_url))
except Exception as exc:
error = ValidationError(_("Failed to connect to {url}.").format(url=ext_url))
else:
error = ValidationError(_("No valid URL provided."))
if error:
self.add_error('ext_url', error)
elif link_type == 'email':
mail_to = cleaned_data.get('mail_to')
if mail_to:
if not re.match(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)', mail_to):
error = ValidationError(_("'{email}' is not a valid email address.").format(email=mail_to))
else:
error = ValidationError(_("No email address provided."))
if error:
self.add_error('mail_to', error)
if error:
raise error
return cleaned_data
@classmethod
def unset_required_for(cls, sharable_fields):
"""
Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are
required if they are declared in `sharable_fields`. Otherwise just deactivate them.
"""
if 'link_content' in cls.base_fields and 'link_content' not in sharable_fields:
cls.base_fields['link_content'].required = False
if 'link_type' in cls.base_fields and 'link' not in sharable_fields:
cls.base_fields['link_type'].required = False
|
Python
| 0.000001
|
@@ -2347,16 +2347,27 @@
queryset
+.distinct()
)%0A
|
74ecf023ef13fdba6378d6b50b3eaeb06b9e0c97
|
Rename env vars & modify query
|
rebuild_dependant_repos.py
|
rebuild_dependant_repos.py
|
import os, sys, re, logging
import requests
from github import Github
logging.basicConfig(level=logging.DEBUG)
CIRCLECI_BASEURL = "https://circleci.com/api/v2"
CIRCLECI_ACCESS_TOKEN = os.environ["TAO_CIRCLECI_TOKEN"]
GITHUB_ACCESS_TOKEN = os.environ["TAO_GITHUB_TOKEN"]
g = Github(GITHUB_ACCESS_TOKEN)
if len(sys.argv) < 2:
raise AttributeError("The image name is required as the first argument.")
image_name = sys.argv[1]
image_name = re.sub(r"[^a-zA-Z0-9-]", " ", image_name)
query = "org:avatao-content language:Dockerfile FROM " + image_name
logging.debug("Searching GitHub with query: '%s'", query)
code_search = g.search_code(query)
circleci_project_slugs = set()
for result in code_search:
circleci_project_slugs.add(f"gh/{result.repository.organization.login}/{result.repository.name}")
logging.debug("Found %d candidate repositories.", len(circleci_project_slugs))
current_item = 1
for slug in circleci_project_slugs:
logging.debug("[%d/%d] Triggering CI pipeline for: %s", current_item, len(circleci_project_slugs), slug)
requests.post(f"{CIRCLECI_BASEURL}/project/{slug}/pipeline", headers={"Circle-Token": CIRCLECI_ACCESS_TOKEN})
current_item += 1
|
Python
| 0
|
@@ -15,17 +15,8 @@
, re
-, logging
%0Aimp
@@ -59,50 +59,8 @@
ub%0A%0A
-logging.basicConfig(level=logging.DEBUG)%0A%0A
CIRC
@@ -136,24 +136,27 @@
os.environ%5B%22
+AVA
TAO_CIRCLECI
@@ -198,16 +198,19 @@
nviron%5B%22
+AVA
TAO_GITH
@@ -487,13 +487,8 @@
ile
-FROM
%22 +
@@ -499,29 +499,21 @@
e_name%0A%0A
-logging.debug
+print
(%22Search
@@ -540,17 +540,18 @@
y: '%25s'%22
-,
+ %25
query)%0A
@@ -746,29 +746,21 @@
name%7D%22)%0A
-logging.debug
+print
(%22Found
@@ -786,17 +786,18 @@
tories.%22
-,
+ %25
len(cir
@@ -880,21 +880,13 @@
-logging.debug
+print
(%22%5B%25
@@ -922,18 +922,20 @@
for: %25s%22
-,
+%25 (
current_
@@ -974,16 +974,17 @@
), slug)
+)
%0A req
|
b43b34418d244acee363485d42a23694ed9d654f
|
Add fields to work serializer for GET methods
|
works/serializers.py
|
works/serializers.py
|
from rest_framework import serializers
from . import models
from clients import serializers as client_serializers
from users import serializers as user_serializers
class WorkTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(read_only=True)
class Meta:
model = models.WorkType
fields = ('id', 'work_type_id', 'name',)
class ArtTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtType
fields = ('id', 'work_type', 'name',)
class ArtIgualaSerializer(serializers.ModelSerializer):
art_type_name = serializers.CharField(source='art_type.name', read_only=True)
class Meta:
model = models.ArtIguala
fields = ('id', 'iguala', 'art_type', 'quantity', 'art_type_name')
class IgualaSerializer(serializers.ModelSerializer):
client_complete = client_serializers.ClientSerializer(source='client', read_only=True)
art_iguala = ArtIgualaSerializer(many=True, read_only=True)
class Meta:
model = models.Iguala
fields = ('id', 'client', 'client_complete', 'name', 'start_date', 'end_date',
'art_iguala',)
class StatusSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='__str__', read_only=True)
class Meta:
model = models.Status
fields = ('id', 'status_id', 'name',)
class WorkSerializer(serializers.ModelSerializer):
creation_date = serializers.DateField(read_only=True)
executive_complete = user_serializers.UserSerializer(source='executive', read_only=True)
contact_complete = client_serializers.ContactSerializer(source='contact', read_only=True)
current_status_complete = StatusSerializer(source='current_status', read_only=True)
work_type_complete = WorkTypeSerializer(source='work_type', read_only=True)
iguala_complete = IgualaSerializer(source='iguala', read_only=True)
class Meta:
model = models.Work
fields = ('id',
'executive',
'executive_complete',
'contact',
'contact_complete',
'current_status',
'current_status_complete',
'work_type',
'work_type_complete',
'iguala',
'iguala_complete',
'creation_date',
'name',
'expected_delivery_date',
'brief',
'final_link',
)
class ArtWorkSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtWork
fields = ('id', 'work', 'art_type', 'quantity',)
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = models.File
fields = ('id', 'work', 'upload',)
class WorkDesignerSerializer(serializers.ModelSerializer):
start_date = serializers.DateTimeField(read_only=True)
end_date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.WorkDesigner
fields = ('id', 'designer', 'work', 'start_date', 'end_date', 'active_work',)
class StatusChangeSerializer(serializers.ModelSerializer):
date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.StatusChange
fields = ('id', 'work', 'status', 'user', 'date',)
|
Python
| 0
|
@@ -1371,24 +1371,870 @@
'name',)%0A%0A%0A
+class ArtWorkSerializer(serializers.ModelSerializer):%0A%0A class Meta:%0A model = models.ArtWork%0A fields = ('id', 'work', 'art_type', 'quantity',)%0A%0A%0Aclass FileSerializer(serializers.ModelSerializer):%0A%0A class Meta:%0A model = models.File%0A fields = ('id', 'work', 'upload',)%0A%0A%0Aclass WorkDesignerSerializer(serializers.ModelSerializer):%0A%0A start_date = serializers.DateTimeField(read_only=True)%0A end_date = serializers.DateTimeField(read_only=True)%0A%0A class Meta:%0A model = models.WorkDesigner%0A fields = ('id', 'designer', 'work', 'start_date', 'end_date', 'active_work',)%0A%0A%0Aclass StatusChangeSerializer(serializers.ModelSerializer):%0A%0A date = serializers.DateTimeField(read_only=True)%0A%0A class Meta:%0A model = models.StatusChange%0A fields = ('id', 'work', 'status', 'user', 'date',)%0A%0A%0A
class WorkSe
@@ -2751,32 +2751,290 @@
ead_only=True)%0A%0A
+ art_works = ArtWorkSerializer(many=True, read_only=True)%0A files = FileSerializer(many=True, read_only=True)%0A work_designers = WorkDesignerSerializer(many=True, read_only=True)%0A status_changes = StatusChangeSerializer(many=True, read_only=True)%0A%0A
class Meta:%0A
@@ -3626,852 +3626,135 @@
-)%0A%0A%0Aclass ArtWorkSerializer(serializers.ModelSerializer):%0A%0A class Meta:%0A model = models.ArtWork%0A fields = ('id', 'work', 'art_type', 'quantity',)%0A%0A%0Aclass FileSerializer(serializers.ModelSerializer):%0A%0A class Meta:%0A model = models.File%0A fields = ('id', 'work', 'upload',)%0A%0A%0Aclass WorkDesignerSerializer(serializers.ModelSerializer):%0A%0A start_date = serializers.DateTimeField(read_only=True)%0A end_date = serializers.DateTimeField(read_only=True)%0A%0A class Meta:%0A model = models.WorkDesigner%0A fields = ('id', 'designer', 'work', 'start_date', 'end_date', 'active_work',)%0A%0A%0Aclass StatusChangeSerializer(serializers.ModelSerializer):%0A%0A date = serializers.DateTimeField(read_only=True)%0A%0A class Meta:%0A model = models.StatusChange%0A fields = ('id', 'work', 'status', 'user', 'date',
+'art_works',%0A 'files',%0A 'work_designers',%0A 'status_changes'%0A
)%0A
|
a6fbb077eb3067cedc501cc8b0a9e99594cef9ed
|
Use memoize for compliance cache
|
recruit_app/hr/managers.py
|
recruit_app/hr/managers.py
|
# -*- coding: utf-8 -*-
from recruit_app.user.models import EveCharacter
from recruit_app.extensions import cache_extension
from flask import current_app
import requests
from bs4 import BeautifulSoup
class HrManager:
def __init__(self):
pass
@staticmethod
@cache_extension.cached(timeout=3600, key_prefix='get_compliance')
def get_compliance(corp_id):
url = 'https://goonfleet.com'
s = requests.session()
r = s.get(url, verify=True)
soup = BeautifulSoup(r.text, 'html.parser')
token = soup.find('input', {'name':'auth_key'})['value']
payload = {
'ips_username' : current_app.config['GSF_USERNAME'],
'ips_password' : current_app.config['GSF_PASSWORD'],
'auth_key' : token,
'referer' : 'https://goonfleet.com/',
'rememberMe' : 1,
}
url = 'https://goonfleet.com/index.php?app=core&module=global§ion=login&do=process'
r = s.post(url, data=payload, verify=True)
url = 'https://goonfleet.com/corps/checkMembers.php'
r = s.get(url, verify=True)
payload = {
'corpID' : str(corp_id)
}
r = s.post(url, data=payload, verify=True)
soup = BeautifulSoup(r.text, 'html.parser')
output = "<table id='compliance' class='table tablesorter'><thead><th>Character Name</th><th>Forum Name/Main</th><th>Primary Group</th><th>Status</th></thead><tbody>\n"
for row in soup.findAll('tr'):
alert = None
if row.get('class'):
alert = row.get('class')[1]
cols = row.findAll('td')
charname = cols[1].get_text()
forumname = cols[2].get_text()
group = cols[3].get_text()
# Look for an API for character
if not alert and not EveCharacter.query.filter_by(character_name=charname).first():
alert = 'alert-warning'
# Set status
if alert == 'alert-warning':
status = 'No KF API'
elif alert == 'alert-success':
status = 'Director'
elif alert == 'alert-error':
status = 'No Goon Auth'
else:
status = 'OK'
if alert:
output = output + '<tr class="alert {0}"><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}</td></tr>\n'.format(alert, charname, forumname, group, status)
else:
output = output + '<tr><td>{0}</td><td>{1}</td><td>{2}</td><td>{3}</td></tr>\n'.format(charname, forumname, group, status)
output = output + '</tbody></table>'
return output
|
Python
| 0
|
@@ -292,14 +292,15 @@
ion.
-cached
+memoize
(tim
@@ -312,37 +312,8 @@
3600
-, key_prefix='get_compliance'
)%0A
|
0ba2b371e08c40e7c4d56efee6f4a828f1e7aeb0
|
Update functions.py
|
ref/functions/functions.py
|
ref/functions/functions.py
|
#functions.py
#Written by Jesse Gallarzo
#Add code here
#def function One:
def main():
#Add code here
print('Test')
main()
|
Python
| 0.000001
|
@@ -68,12 +68,57 @@
tion
-
One
+():%0A%0A#def functionTwo():%0A%0A#def functionThree()
:%0A%0Ad
|
2e7b319524ad3d86deeca0be65871d79f4208d19
|
remove useless debug print
|
cmemcached.py
|
cmemcached.py
|
import os
import sys
import traceback
from zlib import compress, decompress, error as zlib_error
from cmemcached_imp import *
import cmemcached_imp
import threading
_FLAG_PICKLE = 1 << 0
_FLAG_INTEGER = 1 << 1
_FLAG_LONG = 1 << 2
_FLAG_BOOL = 1 << 3
_FLAG_COMPRESS = 1 << 4
_FLAG_MARSHAL = 1 << 5
VERSION = "0.41-greenify"
def prepare(val, comp_threshold):
val, flag = cmemcached_imp.prepare(val)
if comp_threshold > 0 and val and len(val) > comp_threshold:
val = compress(val)
flag |= _FLAG_COMPRESS
return val, flag
def restore(val, flag):
if val is None:
return val
if flag & _FLAG_COMPRESS:
try:
val = decompress(val)
except zlib_error:
return None
flag &= ~_FLAG_COMPRESS
return cmemcached_imp.restore(val, flag)
class ThreadUnsafe(Exception):
pass
class Client(cmemcached_imp.Client):
"a wraper around cmemcached_imp"
def __init__(self, servers, do_split=1, comp_threshold=0, behaviors={}, logger=None, cas_support=False, *a, **kw):
cmemcached_imp.Client.__init__(self, logger)
self.servers = servers
self.do_split = do_split
self.comp_threshold = comp_threshold
self.behaviors = dict(behaviors.items())
self.add_server(servers)
self.set_behavior(BEHAVIOR_NO_BLOCK, 1) # nonblock
self.set_behavior(BEHAVIOR_TCP_NODELAY, 1) # nonblock
self.set_behavior(BEHAVIOR_TCP_KEEPALIVE, 1)
self.set_behavior(BEHAVIOR_CACHE_LOOKUPS, 1)
# self.set_behavior(BEHAVIOR_BUFFER_REQUESTS, 0) # no request buffer
#self.set_behavior(BEHAVIOR_KETAMA, 1)
self.set_behavior(BEHAVIOR_HASH, HASH_MD5)
self.set_behavior(BEHAVIOR_KETAMA_HASH, HASH_MD5)
self.set_behavior(BEHAVIOR_DISTRIBUTION, DIST_CONSISTENT_KETAMA)
if cas_support:
self.set_behavior(BEHAVIOR_SUPPORT_CAS, 1)
for k, v in behaviors.items():
self.set_behavior(k, v)
self._thread_ident = None
self._created_stack = traceback.extract_stack()
def __reduce__(self):
return (Client, (self.servers, self.do_split, self.comp_threshold, self.behaviors))
def set_behavior(self, k, v):
self.behaviors[k] = v
return cmemcached_imp.Client.set_behavior(self, k, v)
def set(self, key, val, time=0, compress=True):
self._record_thread_ident()
self._check_thread_ident()
comp = compress and self.comp_threshold or 0
val, flag = prepare(val, comp)
print >> sys.stderr, val, flag
if val is not None:
return self.set_raw(key, val, time, flag)
else:
print >>sys.stderr, '[cmemcached]', 'serialize %s failed' % key
def set_multi(self, values, time=0, compress=True, return_failure=False):
self._record_thread_ident()
self._check_thread_ident()
comp = compress and self.comp_threshold or 0
raw_values = dict((k, prepare(v, comp)) for k, v in values.iteritems())
return self.set_multi_raw(raw_values, time, return_failure=return_failure)
def get(self, key):
self._record_thread_ident()
val, flag = cmemcached_imp.Client.get_raw(self, key)
return restore(val, flag)
def get_multi(self, keys):
self._record_thread_ident()
result = cmemcached_imp.Client.get_multi_raw(self, keys)
return dict((k, restore(v, flag))
for k, (v, flag) in result.iteritems())
def gets(self, key):
self._record_thread_ident()
val, flag, cas = cmemcached_imp.Client.gets_raw(self, key)
return restore(val, flag), cas
def get_list(self, keys):
self._record_thread_ident()
result = self.get_multi(keys)
return [result.get(key) for key in keys]
def expire(self, key):
self._record_thread_ident()
return self.touch(key, -1)
def reset(self):
self.clear_thread_ident()
def clear_thread_ident(self):
self._thread_ident = None
self._thread_ident_stack = None
def _record_thread_ident(self):
if self._thread_ident is None:
self._thread_ident = self._get_current_thread_ident()
def _check_thread_ident(self):
if self._get_current_thread_ident() != self._thread_ident:
raise ThreadUnsafe("mc client created in %s\n%s, called in %s" %
(self._thread_ident,
self._created_stack,
self._get_current_thread_ident()))
def _get_current_thread_ident(self):
return (os.getpid(), threading.current_thread().name)
|
Python
| 0.000004
|
@@ -2547,47 +2547,8 @@
mp)%0A
- print %3E%3E sys.stderr, val, flag%0A
|
85d65439614a87c8a8884fa36c85d2f9f196c53c
|
add some tests around is_valid()
|
armstrong/apps/donations/tests/forms.py
|
armstrong/apps/donations/tests/forms.py
|
import fudge
import random
from ._utils import TestCase
from .. import forms
class BaseDonationFormTestCase(TestCase):
def test_applies_promo_code(self):
promo_code = self.random_discount
data = self.get_base_random_data()
data["promo_code"] = promo_code.code
form = forms.BaseDonationForm(data)
donation = form.save()
self.assertEqual(promo_code, donation.code)
def test_applies_promo_code_with_prefixed_form(self):
prefix = "random%d" % random.randint(1, 9)
promo_code = self.random_discount
data = {
"%s-amount" % prefix: "100",
"%s-name" % prefix: "Bob Example",
"%s-promo_code" % prefix: promo_code.code,
}
form = forms.BaseDonationForm(prefix=prefix, data=data)
donation = form.save()
self.assertEqual(promo_code, donation.code)
def test_errors_if_more_than_two_digits_are_provided(self):
form = forms.BaseDonationForm(data={"amount": "100.123"})
self.assertFalse(form.is_valid(donation_only=True))
self.assertTrue("amount" in form.errors)
def test_donation_type_is_used_if_present(self):
random_type = self.random_type
form = forms.BaseDonationForm(data={
"amount": "100",
"name": "Bob Example",
"donation_type": random_type.name,
})
donation = form.save()
self.assertEqual(random_type, donation.donation_type)
def test_donation_type_works_with_prefixed_forms(self):
random_type = self.random_type
prefix = "random%d" % random.randint(1, 9)
form = forms.BaseDonationForm(prefix=prefix, data={
"%s-amount" % prefix: "100",
"%s-name" % prefix: "Bob Example",
"%s-donation_type" % prefix: random_type.name,
})
donation = form.save()
self.assertEqual(random_type, donation.donation_type)
def test_billing_address_form_is_a_donoraddressform(self):
f = forms.BaseDonationForm()
self.assertIsA(f.billing_address_form, forms.DonorAddressForm)
def test_mailing_address_form_is_aa_donoraddressform(self):
f = forms.BaseDonationForm()
self.assertIsA(f.mailing_address_form, forms.DonorAddressForm)
class CreditCardDonationFormTestCase(TestCase):
def setUp(self):
data = self.get_base_random_data()
self.donor = self.random_donor
data = self.get_base_random_data(name=self.donor.name)
self.amount = data["amount"]
self.donation_form = forms.CreditCardDonationForm(data)
self.card_number = data["card_number"]
self.ccv_code = data["ccv_code"]
self.expiration_month = data["expiration_month"]
self.expiration_year = data["expiration_year"]
# TODO: test get_data_for_charge directly
class DonorAddressFormsetTestCase(TestCase):
def test_has_two_forms_by_default(self):
formset = forms.DonorAddressFormset()
self.assertEqual(2, len(formset.forms))
|
Python
| 0.000016
|
@@ -50,16 +50,41 @@
stCase%0A%0A
+from .. import constants%0A
from ..
@@ -2306,16 +2306,1248 @@
sForm)%0A%0A
+ def test_is_valid_uses_mailing_address_form_by_default(self):%0A is_valid_true = fudge.Fake().provides(%22is_valid%22).returns(True)%0A is_valid_false = fudge.Fake().provides(%22is_valid%22).returns(False)%0A form = forms.BaseDonationForm(data=%7B%0A %22name%22: %22Foo%22,%0A %22amount%22: %2210.00%22,%0A %7D)%0A attrs = %5B%22billing_address_form%22, %22donor_form%22, %22mailing_address_form%22%5D%0A for attr in attrs:%0A setattr(form, attr, is_valid_true)%0A self.assertTrue(form.is_valid())%0A%0A form.mailing_address_form = is_valid_false%0A self.assertFalse(form.is_valid())%0A%0A def test_is_valid_ignores_mailing_if_same_checked(self):%0A is_valid_true = fudge.Fake().provides(%22is_valid%22).returns(True)%0A is_valid_false = fudge.Fake().provides(%22is_valid%22).returns(False)%0A form = forms.BaseDonationForm(data=%7B%0A %22name%22: %22Foo%22,%0A %22amount%22: %2210.00%22,%0A constants.MAILING_SAME_AS_BILLING: u%221%22,%0A %7D)%0A attrs = %5B%22billing_address_form%22, %22donor_form%22%5D%0A for attr in attrs:%0A setattr(form, attr, is_valid_true)%0A form.mailing_address_form = is_valid_false%0A self.assertFalse(form.is_valid())%0A%0A
%0Aclass C
|
5de02410d6a1b78b8ac5f2c6fed6d119a76565a5
|
Remove print statement
|
agenda/events/templatetags/calendar.py
|
agenda/events/templatetags/calendar.py
|
#
# Copyright (C) 2009 Novopia Solutions Inc.
#
# Author: Pierre-Luc Beaudoin <pierre-luc.beaudoin@nov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -----
# This file is derived from http://www.djangosnippets.org/snippets/129/
# A code snipped that comes without licence information
from datetime import date, timedelta
from django import template
from agenda.events.models import Event
from django.db.models import Q
register = template.Library()
def get_last_day_of_month(year, month):
if (month == 12):
year += 1
month = 1
else:
month += 1
return date(year, month, 1) - timedelta(1)
def month_cal(year, month, region=None):
first_day_of_month = date(year, month, 1)
last_day_of_month = get_last_day_of_month(year, month)
first_day_of_calendar = (first_day_of_month
- timedelta(first_day_of_month.weekday()+1))
last_day_of_calendar = (last_day_of_month
+ timedelta(7 - last_day_of_month.weekday()))
print last_day_of_month.isoweekday()
today = date.today()
# Filter local events for given region, include national and
# international events
if region is not None:
q = Q(city__region=region)
else:
q = Q()
event_list = (Event.objects
.filter(start_time__gte=first_day_of_calendar)
.filter(end_time__lte=last_day_of_calendar)
.filter(moderated=True)
.filter(q))
month_cal = []
week = []
week_headers = []
i = 0
day = first_day_of_calendar
while day <= last_day_of_calendar:
if i < 7:
week_headers.append(day)
cal_day = {}
cal_day['day'] = day
cal_day['event'] = False
day_events = []
for event in event_list:
if day >= event.start_time.date() and day <= event.end_time.date():
day_events.append(event)
cal_day['events'] = day_events
cal_day['in_month'] = (day.month == month)
cal_day['is_past'] = (day < today)
cal_day['is_today'] = (day == today)
week.append(cal_day)
if day.weekday() == 5:
month_cal.append(week)
week = []
i += 1
day += timedelta(1)
return {'calendar': month_cal, 'headers': week_headers, 'region': region}
register.inclusion_tag('calendar.html')(month_cal)
|
Python
| 0.007015
|
@@ -1629,16 +1629,18 @@
()))%0A
+ #
print l
|
7847d22f95f44792e35108af24267161411c5bf1
|
Remove settings override with no effect
|
analytical/tests/test_tag_gosquared.py
|
analytical/tests/test_tag_gosquared.py
|
"""
Tests for the GoSquared template tags and filters.
"""
from django.contrib.auth.models import User, AnonymousUser
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.gosquared import GoSquaredNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(GOSQUARED_SITE_TOKEN='ABC-123456-D')
class GoSquaredTagTestCase(TagTestCase):
"""
Tests for the ``gosquared`` template tag.
"""
def test_tag(self):
r = self.render_tag('gosquared', 'gosquared')
self.assertTrue('GoSquared.acct = "ABC-123456-D";' in r, r)
def test_node(self):
r = GoSquaredNode().render(Context({}))
self.assertTrue('GoSquared.acct = "ABC-123456-D";' in r, r)
@override_settings(GOSQUARED_SITE_TOKEN=None)
def test_no_token(self):
self.assertRaises(AnalyticalException, GoSquaredNode)
@override_settings(GOSQUARED_SITE_TOKEN='this is not a token')
def test_wrong_token(self):
self.assertRaises(AnalyticalException, GoSquaredNode)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_auto_identify(self):
r = GoSquaredNode().render(Context({'user': User(username='test',
first_name='Test', last_name='User')}))
self.assertTrue('GoSquared.UserName = "Test User";' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_manual_identify(self):
r = GoSquaredNode().render(Context({
'user': User(username='test', first_name='Test', last_name='User'),
'gosquared_identity': 'test_identity',
}))
self.assertTrue('GoSquared.UserName = "test_identity";' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True, USER_ID=None)
def test_identify_anonymous_user(self):
r = GoSquaredNode().render(Context({'user': AnonymousUser()}))
self.assertFalse('GoSquared.UserName = ' in r, r)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = GoSquaredNode().render(context)
self.assertTrue(r.startswith(
'<!-- GoSquared disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
|
Python
| 0
|
@@ -1840,22 +1840,8 @@
True
-, USER_ID=None
)%0A
|
8713f44fbd35f012ac7e01a64cffcfdf846fee9f
|
Remove a relative import that escaped test.test_importlib.
|
Lib/test/test_importlib/__init__.py
|
Lib/test/test_importlib/__init__.py
|
import os
import sys
from .. import support
import unittest
def test_suite(package=__package__, directory=os.path.dirname(__file__)):
suite = unittest.TestSuite()
for name in os.listdir(directory):
if name.startswith(('.', '__')):
continue
path = os.path.join(directory, name)
if (os.path.isfile(path) and name.startswith('test_') and
name.endswith('.py')):
submodule_name = os.path.splitext(name)[0]
module_name = "{0}.{1}".format(package, submodule_name)
__import__(module_name, level=0)
module_tests = unittest.findTestCases(sys.modules[module_name])
suite.addTest(module_tests)
elif os.path.isdir(path):
package_name = "{0}.{1}".format(package, name)
__import__(package_name, level=0)
package_tests = getattr(sys.modules[package_name], 'test_suite')()
suite.addTest(package_tests)
else:
continue
return suite
def test_main():
start_dir = os.path.dirname(__file__)
top_dir = os.path.dirname(os.path.dirname(start_dir))
test_loader = unittest.TestLoader()
support.run_unittest(test_loader.discover(start_dir, top_level_dir=top_dir))
|
Python
| 0
|
@@ -23,10 +23,12 @@
rom
-..
+test
imp
|
4d597a8e71c0020b0f1d36e1cca64b5a353b0643
|
modify version
|
Abe/version.py
|
Abe/version.py
|
__version__ = '0.8pre'
|
Python
| 0
|
@@ -15,9 +15,7 @@
'0.8
-pre
+8
'%0A
|
8c7abe561cd95331fef17b4fd1c7fe67386826a2
|
change the input url
|
AddDataTest.py
|
AddDataTest.py
|
__author__ = 'chuqiao'
import EventsPortal
from datetime import datetime
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('adddata')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('adddata.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# EventsPortal.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
logger()
logger.info('start at %s' % datetime.now())
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/upcoming?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://localhost:8983/solr/event_portal");
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","localhost:8983/solr/event_portal")
logger.info('finish at %s' % datetime.now())
if __name__ == '__main__':
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","localhost:8983/solr/event_portal")
|
Python
| 0.999997
|
@@ -2054,33 +2054,38 @@
rg/events%22,%22
-localhost
+139.162.217.53
:8983/solr/e
@@ -2080,33 +2080,33 @@
:8983/solr/event
-_
+s
portal%22)%0Alogger.
@@ -2332,25 +2332,30 @@
vents%22,%22
-localhost
+139.162.217.53
:8983/so
@@ -2354,26 +2354,26 @@
:8983/solr/event
-_
+s
portal%22)%0A
|
d98cdb7eae40b5bb11b5d1fc0eacc35ef6bf310d
|
Order filter for report page
|
wye/reports/views.py
|
wye/reports/views.py
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from wye.organisations.models import Organisation
from wye.workshops.models import Workshop
from wye.profiles.models import Profile
import datetime
from wye.base.constants import WorkshopStatus
@login_required
def index(request, days):
print(request.user.is_staff)
if not request.user.is_staff:
return ""
d = datetime.datetime.now() - datetime.timedelta(days=int(days))
organisations = Organisation.objects.filter(
active=True).filter(created_at__gte=d)
workshops = Workshop.objects.filter(
is_active=True).filter(
expected_date__gte=d).filter(
expected_date__lt=datetime.datetime.now()).filter(
status__in=[WorkshopStatus.COMPLETED,
WorkshopStatus.FEEDBACK_PENDING])
profiles = Profile.objects.filter(user__date_joined__gte=d)
no_of_participants = sum([w.no_of_participants for w in workshops])
template_name = 'reports/index.html'
context_dict = {}
context_dict['organisations'] = organisations
context_dict['workshops'] = workshops
context_dict['profiles'] = profiles
context_dict['no_of_participants'] = no_of_participants
context_dict['date'] = d
workshops = Workshop.objects.filter(
is_active=True)
return render(request, template_name, context_dict)
|
Python
| 0
|
@@ -844,16 +844,42 @@
ENDING%5D)
+.order_by('expected_date')
%0A pro
|
0403ba1bcbc1089f42cdb0ed7a37439363f4e963
|
Fix bug
|
reid/utils/data/dataset.py
|
reid/utils/data/dataset.py
|
from __future__ import print_function
import os.path as osp
import numpy as np
from ..serialization import read_json
def _pluck(identities, indices, relabel=False):
ret = []
for index, pid in enumerate(indices):
pid_images = identities[pid]
for camid, cam_images in enumerate(pid_images):
for fname in cam_images:
name = osp.splitext(fname)[0]
x, y, _ = map(int, name.split('_'))
assert pid == x and camid == y
if relabel:
ret.append((fname, index, camid))
else:
ret.append((fname, pid, camid))
return ret
class Dataset(object):
def __init__(self, root, split_id=0):
self.root = root
self.split_id = split_id
self.meta = None
self.split = None
self.train, self.val, self.trainval = [], [], []
self.query, self.gallery = [], []
self.num_train_ids, self.num_val_ids, self.num_trainval_ids = 0, 0, 0
@property
def images_dir(self):
return osp.join(self.root, 'images')
def load(self, num_val=0.3, verbose=True):
splits = read_json(osp.join(self.root, 'splits.json'))
if self.split_id >= len(splits):
raise ValueError("split_id exceeds total splits {}"
.format(len(splits)))
self.split = splits[self.split_id]
# Randomly split train / val
trainval_pids = np.asarray(self.split['trainval'])
np.random.shuffle(trainval_pids)
num = len(trainval_pids)
if isinstance(num_val, float):
num_val = int(round(num * num_val))
if num_val >= num or num_val < 0:
raise ValueError("num_val exceeds total identities {}"
.format(num))
train_pids = sorted(trainval_pids[:-num_val])
val_pids = sorted(trainval_pids[-num_val:])
self.meta = read_json(osp.join(self.root, 'meta.json'))
identities = self.meta['identities']
self.train = _pluck(identities, train_pids, relabel=True)
self.val = _pluck(identities, val_pids, relabel=True)
self.trainval = _pluck(identities, trainval_pids, relabel=True)
self.query = _pluck(identities, self.split['query'])
self.gallery = _pluck(identities, self.split['gallery'])
self.num_train_ids = len(train_pids)
self.num_val_ids = len(val_pids)
self.num_trainval_ids = len(trainval_pids)
if verbose:
print(self.__class__.__name__, "dataset loaded")
print(" subset | # ids | # images")
print(" ---------------------------")
print(" train | {:5d} | {:8d}"
.format(self.num_train_ids, len(self.train)))
print(" val | {:5d} | {:8d}"
.format(self.num_val_ids, len(self.val)))
print(" trainval | {:5d} | {:8d}"
.format(self.num_trainval_ids, len(self.val)))
print(" query | {:5d} | {:8d}"
.format(len(self.split['query']), len(self.query)))
print(" gallery | {:5d} | {:8d}"
.format(len(self.split['gallery']), len(self.gallery)))
def _check_integrity(self):
return osp.isdir(osp.join(self.root, 'images')) and \
osp.isfile(osp.join(self.root, 'meta.json')) and \
osp.isfile(osp.join(self.root, 'splits.json'))
|
Python
| 0.000001
|
@@ -3000,32 +3000,37 @@
l_ids, len(self.
+train
val)))%0A
|
de51591761c8a079ca5f594aa3e302ec9a37d38e
|
Update REMINDERS.py
|
Misc/REMINDERS.py
|
Misc/REMINDERS.py
|
#Python reminders
# Collapse the list for output
''.join(list)
#Flatten list of lists
flat_list = [item for subitem in lists for item in subitem]
#Exammple for checking list for single digit (string)
def checker(string):
print("Boom!" if "7" in str(string) else "there is no 7 in the list")
checker([2, 55, 60, 97, 86, 3, 4, 6, 6])
#Scan list for longest string and print max item
print(max(len(i) for i in string.split()))
#Search for highest index of substring
string.rindex(substring) -- If not found, raises exception
string.rfind(substring) -- If not found, returns -1
#Dont forget min and max
max(x) for largest
min(x) for smallest
#Count occurrences of item in list
[a,b,c,d,e].count(item)
sum([x.count(letter) for x in lst])
#Count occurrences of list 1 items in list 2
value = len([i for i in list2 if i in list1 ])
#Put anything in square brackets to search list index
a[0]
#Reverse list of ints by converting to string and list reversing
str(num)[::-1]
#Strange list rotation
def rotate_left3(nums):
a = nums[0]
nums[0] = nums[1]
nums[1] = nums[2]
nums[2] = a
return nums -> [1,2,3] = [2,3,1]
#Evaluator positioning
if someting:
return (item 1 >= 10)
else:
return (item > 1 and item <= 5)
#One line conditionals
#get item index from lst, not there, return -1
return lst.index(item) if item in lst else -1
#Use F strings
x = "hello"
w = "world"
print(f"{x} {w}")
#Use Swapcase to flip lowercase and uppercase
string.swapcase()
#List comprehensions
#Reverse all words >= 5 letters and print out the modified sentence
[word[::-1] if len(word) >= 5 else word for word in words]
#Create a dictionary of letters and their frequency of occurrence in a string
{i : message.count(i) for i in set(message)}
#List all indexes of a specified value
def get_indicies(lst, el):
[i for i in range(len(lst)) if lst[i] == el]
#Edabit solution for odd occurrence of item in list
[i for i in lst if lst.count(i) % 2 > 0]
#Only return the integers in a list
[x for x in l if type(x) == int]
#Flatten list of lists
flat_list = [item for subitem in lists for item in subitem]
#Split int into list of digits
split_number = [int(d) for d in str(n)]
#Dictionary values for item
value = [str(dictionary[i]) for i in string]
#list character index from left to right, right to left
[mylist.index(char), mylist.rindex(char)]
#Letter replacements/translations
def correct(string):
return string.translate(str.maketrans("501", "SOI"))
#Collapse multiple inputs into one
#from this
def predict_age(age_1, age_2, age_3, age_4, age_5, age_6, age_7, age_8):
#to this
def predict_age(*ages)
#Remove char
def remove_exclamation_marks(s):
return s.replace('!', '')
#Remove duplicate words, keep index position
def remove_duplicate_words(s):
return ' '.join(sorted(set(s.split()), key = s.index))
#Quick Lambda implementation on multiple args - with f"string"
def newvibe(a,b,c):
var1 = lambda x: (x * 5)
print(f"This is a: {var1(a)}, This is b: {var1(b)}, This is c: {var1(c)}")
|
Python
| 0
|
@@ -735,16 +735,161 @@
lst%5D)%0A%0A
+#Count Sublists in list%0Areturn str(lst).count('%5B') - 1%0Areturn sum(1 for i in lst if type(i)==list)%0Areturn sum(isinstance(i, list) for i in lst)%0A%0A
#Count o
|
af669e8ab8a502505389adf63e0d5216765fdba4
|
fix exclude events
|
viradacultural_social_api/views.py
|
viradacultural_social_api/views.py
|
from rest_framework import viewsets
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import FbUser, Event
from .serializer import FbUserSerializer
from rest_framework import permissions
import facebook
class MinhaViradaView(APIView):
permission_classes = (permissions.AllowAny,)
def get(self, request):
fb_user_uid = request.query_params.get('uid')
if fb_user_uid:
try:
fb_user = FbUser.objects.get(uid=fb_user_uid)
serializer = FbUserSerializer(fb_user)
return Response(serializer.data)
except FbUser.DoesNotExist:
return Response('{}')
else:
return Response('{}')
def post(self, request):
fb_user_uid = request.data.get('uid')
data = request.data
data['fb_user_uid'] = fb_user_uid
if fb_user_uid:
fb_user, _ = FbUser.objects.get_or_create(uid=fb_user_uid)
serializer = FbUserSerializer(fb_user, data=data)
if serializer.is_valid():
serializer.save()
events = request.data.get('events')
for event in events:
Event.objects.get_or_create(event_id=event, fb_user=fb_user)
return Response('{status: success}')
else:
return Response('{status: fail}')
# class FriendsOnEventViewSet(APIView):
# """
# API endpoint that allows users to be viewed or edited.
# """
# queryset = FbUser.objects.all()
# serializer_class = UserSerializer
#
# def get(self, request, *args, **kwargs):
# fb_user_id = request.query_params('uid')
# oauth_access_token = request.query_params('fbtoken')
# graph = facebook.GraphAPI(oauth_access_token)
# profile = graph.get_object("me")
# friends = graph.get_connections("me", "friends")
#
#
# class FriendsPositionsView(viewsets.ModelViewSet):
# """
# API endpoint that allows users to be viewed or edited.
# """
# queryset = FbUser.objects.all()
# serializer_class = UserSerializer
#
# def get(self, request, *args, **kwargs):
# fb_user_id = request.query_params('uid')
# oauth_access_token = request.query_params('fbtoken')
# graph = facebook.GraphAPI(oauth_access_token)
# profile = graph.get_object("me")
# friends = graph.get_connections("me", "friends")
# from django.contrib.gis.geos import fromstr
# pnt = fromstr('POINT(-90.5 29.5)', srid=4326)
|
Python
| 0.000024
|
@@ -728,21 +728,19 @@
sponse('
-%7B%7D
')%0A
+
@@ -778,10 +778,8 @@
se('
-%7B%7D
')%0A%0A
@@ -923,16 +923,55 @@
ser_uid%0A
+ # import ipdb;ipdb.set_trace()%0A
@@ -1243,16 +1243,52 @@
vents')%0A
+ events_objects = %5B%5D%0A
@@ -1336,16 +1336,38 @@
+events_objects.append(
Event.ob
@@ -1410,32 +1410,101 @@
fb_user=fb_user)
+)%0A Event.objects.exclude(event_id__in=events).delete()
%0A ret
|
10f7938e37180c0cb3b701223cf6d1855e7d8f93
|
Drop python_2_unicode_compatible for Settings, fix docs build on rtfd
|
watchdog_kj_kultura/main/models.py
|
watchdog_kj_kultura/main/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from model_utils.models import TimeStampedModel
from tinymce.models import HTMLField
from django.contrib.sites.models import Site
class SettingsQuerySet(models.QuerySet):
pass
@python_2_unicode_compatible
class Settings(TimeStampedModel):
site = models.OneToOneField(Site, verbose_name=_("Site"))
home_content = HTMLField(verbose_name=_("Content of home page"))
objects = SettingsQuerySet.as_manager()
class Meta:
verbose_name = _("Settings")
verbose_name_plural = _("Settings")
ordering = ['created', ]
|
Python
| 0
|
@@ -82,70 +82,8 @@
s _%0A
-from django.utils.encoding import python_2_unicode_compatible%0A
from
@@ -266,37 +266,8 @@
s%0A%0A%0A
-@python_2_unicode_compatible%0A
clas
|
c36ba274119b2c8596b7ec95bab9763a3842df66
|
remove more run_as root
|
speedydeploy/deployment.py
|
speedydeploy/deployment.py
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import inspect
import os
import sys
import time
from fabric import api as fab
from fabric.contrib import files as fab_files
from fab_deploy.system import ssh_add_key
from fab_deploy.utils import run_as
from taskset import TaskSet, task
from base import _, OS, Debian, Ubuntu, Ubuntu104, Daemon, CommandNamespace
from utils import upload_template, upload_first
def command(func=None, namespace=None, same_name=False, aliases=()):
def decorator(view_func):
#@wraps(view_func)
def f(self, *args, **kwargs):
ns_obj = getattr(fab.env, namespace, None)
if ns_obj is None:
ns_obj = CommandNamespace.get(namespace)()
return getattr(ns_obj, view_func.__name__)(*args, **kwargs)
attr_name = '_'.join(filter(None, (namespace,
view_func.__name__)))
setattr(Deployment, attr_name, f)
if same_name:
setattr(Deployment, view_func.__name__, f)
for alias in aliases:
setattr(Deployment, alias, f)
return view_func
if namespace is None:
#XXX need more clever solution
namespace = inspect.stack()[1][0].f_locals['namespace']
if func:
return decorator(func)
return decorator
class Deployment(TaskSet):
def __init__(self):
self.expose_to(self.__module__)
def _is_task(self, func):
return inspect.ismethod(func) and not func.func_name.startswith('_')
def _task_for_method(self, method):
return method
def ssh_add_key(self, pub_key_file):
""" Adds a ssh key from passed file to user's
authorized_keys on server. """
with open(os.path.normpath(pub_key_file), 'rt') as f:
ssh_key = f.read()
if fab.env.user == 'root':
ssh_dir = '/root/.ssh'
else:
if 'home_dir' in fab.env:
ssh_dir = _('%(home_dir)s/.ssh')
else:
ssh_dir = _('/home/%(user)s/.ssh')
remote_os = fab.env.os
remote_os.mkdir(ssh_dir)
fab_files.append('%s/authorized_keys' % ssh_dir, ssh_key)
with fab.settings(warn_only=True): # no chmod in system
remote_os.set_permissions(ssh_dir, pattern='700')
remote_os.set_permissions('%s/authorized_keys' % ssh_dir,
pattern='600')
@run_as('root')
def update_rsa_key(self, pub_key_file):
""" Root adds a ssh key from passed file to user's
authorized_keys on server."""
self.ssh_add_key(pub_key_file)
@run_as('root')
def os_add_user(self):
fab.env.os.add_user(fab.env.user)
def add_deploy_key(self):
fab.run('ssh-keygen -q')
output = fab.run('cat ~/.ssh/id_rsa.pub')
fab.local('echo %s > deploy_key' % output)
@run_as('root')
def add_user(self):
self.os_add_user()
self.update_rsa_key()
def backup(self):
fab.env.db.backup()
fab.env.project.backup()
def update_env(self):
fab.env.project.install()
if 'provider' in fab.env and fab.env.provider.can_addpackages:
fab.env.project.install_development_libraries()
fab.env.project.install_setuptools()
fab.env.project.install_virtualenv()
def create_virtual_env(self):
self.update_env()
self.create_env()
self.deploy()
self.update_virtual_env()
def create(self, key=None):
if 'provider' in fab.env and fab.env.provider.can_adduser:
if key:
self.update_rsa_key(key) # for root
self.os_add_user()
#self.os_add_group('speedydeploy')
#self.os_add_user_to_group(fab.env.user, 'speedydeploy')
if key:
self.ssh_add_key(key)
if 'db' in fab.env:
self.db_create_user(fab.env.user, fab.env.db_pass)
self.db_create_db(fab.env.user, fab.env.user, fab.env.db_pass)
self.create_virtual_env()
# need setting for this
fab.run(_('echo root > %(remote_dir)s/.forward'))
def update(self):
project = fab.env.project
if project.use_django:
project.django.reload()
if project.use_celery:
self.celery_configure()
if project.use_sphinxsearch:
self.sphinxsearch_configure()
if project.use_memcache:
self.memcache_configure()
if project.use_server:
self.server_configure()
self.server_reload()
|
Python
| 0
|
@@ -2660,36 +2660,16 @@
_file)%0A%0A
- @run_as('root')%0A
def
@@ -2895,36 +2895,16 @@
utput)%0A%0A
- @run_as('root')%0A
def
@@ -3870,24 +3870,87 @@
d_key(key)%0A%0A
+ fab.run(_('echo root %3E %25(remote_dir)s/.forward'))%0A%0A
if '
@@ -4143,99 +4143,8 @@
()%0A%0A
- # need setting for this%0A fab.run(_('echo root %3E %25(remote_dir)s/.forward'))%0A%0A
|
5f8580e9d28d08e13f40692a2247f41ea8c5b4b9
|
Remove extra newline.
|
tests/test_additional_io.py
|
tests/test_additional_io.py
|
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Jul 28, 2014 11:50:37 EDT$"
import nanshe.nanshe.additional_io
class TestAdditionalIO(object):
num_files = 10
def setup(self):
import tempfile
self.temp_dir = tempfile.mkdtemp()
self.temp_files = []
for i in xrange(TestAdditionalIO.num_files):
self.temp_files.append(tempfile.NamedTemporaryFile(suffix = ".tif", dir = self.temp_dir))
self.temp_files.sort(cmp = lambda a, b: 2*(a.name > b.name) - 1)
def test_expand_pathname_list(self):
import itertools
matched_filenames = nanshe.nanshe.additional_io.expand_pathname_list(self.temp_dir + "/*.tif")
matched_filenames.sort(cmp = lambda a, b: 2*(a > b) - 1)
assert(len(matched_filenames) == len(self.temp_files))
for each_l, each_f in itertools.izip(matched_filenames, self.temp_files):
assert(each_l == each_f.name)
def teardown(self):
import shutil
for i in xrange(len(self.temp_files)):
self.temp_files[i].close()
self.temp_files = []
shutil.rmtree(self.temp_dir)
self.temp_dir = ""
|
Python
| 0
|
@@ -1186,9 +1186,8 @@
ir = %22%22%0A
-%0A
|
db06f101a07f0e122c35e8b662ed4fe6a26c4811
|
Add SkipTest to test_content_disposition
|
tests/test_awss3_storage.py
|
tests/test_awss3_storage.py
|
import os
import uuid
import mock
import requests
from nose import SkipTest
from depot._compat import PY2
S3Storage = None
FILE_CONTENT = b'HELLO WORLD'
class TestS3FileStorage(object):
def setup(self):
try:
global S3Storage
from depot.io.awss3 import S3Storage
except ImportError:
raise SkipTest('Boto not installed')
env = os.environ
access_key_id = env.get('AWS_ACCESS_KEY_ID')
secret_access_key = env.get('AWS_SECRET_ACCESS_KEY')
if access_key_id is None or secret_access_key is None:
raise SkipTest('Amazon S3 credentials not available')
PID = os.getpid()
NODE = str(uuid.uuid1()).rsplit('-', 1)[-1] # Travis runs multiple tests concurrently
self.default_bucket_name = 'filedepot-%s' % (access_key_id.lower(), )
self.cred = (access_key_id, secret_access_key)
self.fs = S3Storage(access_key_id, secret_access_key,
'filedepot-testfs-%s-%s-%s' % (access_key_id.lower(), NODE, PID))
def test_fileoutside_depot(self):
fid = str(uuid.uuid1())
key = self.fs._bucket_driver.new_key(fid)
key.set_contents_from_string(FILE_CONTENT)
f = self.fs.get(fid)
assert f.read() == FILE_CONTENT
def test_invalid_modified(self):
fid = str(uuid.uuid1())
key = self.fs._bucket_driver.new_key(fid)
key.set_metadata('x-depot-modified', 'INVALID')
key.set_contents_from_string(FILE_CONTENT)
f = self.fs.get(fid)
assert f.last_modified is None, f.last_modified
def test_creates_bucket_when_missing(self):
with mock.patch('boto.s3.connection.S3Connection.lookup', return_value=None):
with mock.patch('boto.s3.connection.S3Connection.lookup',
return_value='YES') as mock_create:
fs = S3Storage(*self.cred)
mock_create.assert_called_with(self.default_bucket_name)
def test_default_bucket_name(self):
with mock.patch('boto.s3.connection.S3Connection.lookup', return_value='YES'):
fs = S3Storage(*self.cred)
assert fs._bucket_driver.bucket == 'YES'
def test_public_url(self):
fid = str(uuid.uuid1())
key = self.fs._bucket_driver.new_key(fid)
key.set_contents_from_string(FILE_CONTENT)
f = self.fs.get(fid)
assert '.s3.amazonaws.com' in f.public_url, f.public_url
assert f.public_url.endswith('/%s' % fid), f.public_url
def test_content_disposition(self):
if not PY2:
return
file_id = self.fs.create(b'content', u'test.txt', 'text/plain')
test_file = self.fs.get(file_id)
response = requests.get(test_file.public_url)
assert response.headers['Content-Disposition'] == "inline;filename=test.txt;filename*=utf-8''test.txt"
def teardown(self):
keys = [key.name for key in self.fs._bucket_driver.bucket]
if keys:
self.fs._bucket_driver.bucket.delete_keys(keys)
try:
self.fs._conn.delete_bucket(self.fs._bucket_driver.bucket.name)
except:
pass
|
Python
| 0.000001
|
@@ -2613,21 +2613,59 @@
r
-eturn
+aise SkipTest('Test is for Python2.X only')
%0A
|
1b11207f40d956267ae046bc51313a3cccc4776a
|
Fix typo in cdl filename
|
cdl_parser.py
|
cdl_parser.py
|
#!/usr/bin/env python
"""
Class to load and parse Common Data Language (CDL) files and
tokenize the dimensions and variables
Written by Brian Powell on 04/30/13
Copyright (c)2013 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import datetime
import re
def cdl_parser(filename):
"""
Given a netcdf-compliant CDL file, parse it to determine the structure:
dimensions, variables, attributes, and global attributes
Parameters
----------
filename : string
name and path of CDL file to parse
Returns
-------
dims, vars, attr: dict
dictionaries description dimensions, variables, and attributes
"""
dim_pat=re.compile(r"\s*(\w+)\s*=\s*(\w*)\s*;")
var_pat=re.compile(r"\s*(\w+)\s*(\w+)\({0,1}([\w\s,]*)\){0,1}\s*;")
attr_pat=re.compile(r"\s*(\w+):(\w+)\s*=\s*\"*([^\"]*)\"*\s*;")
global_attr_pat=re.compile(r"\s*:(\w+)\s*=\s*\"*([^\"]*)\"*\s*;")
mode=None
dims=dict()
attr=dict()
vars=list()
vcount=dict()
types={"float":"f4", "double":"f8", "short":"i2", "int":"i4", "char":"S1"}
for line in open(file, 'r'):
# Check if this is a dimension definition line. If it is, add
# the dimension to the definition
parser = dim_pat.match(line)
if parser != None:
tokens = parser.groups()
if tokens[1].upper() == "UNLIMITED":
dims[tokens[0]]=0
else:
dims[tokens[0]]=int(tokens[1])
continue
# Check if this is a variable definition line. If it is, add
# the variable to the definition
parser = var_pat.match(line)
if parser != None:
tokens = parser.groups()
nvar = { "name": tokens[1],
"type": types[tokens[0]],
"dims": tokens[2].strip().split(", ") }
vars.append(nvar)
vcount[tokens[1]]=len(vars)-1
continue
# If this is an attribute, add the info to the appropriate variable
parser = attr_pat.match(line)
if parser != None:
tokens = parser.groups()
if "attr" not in vars[vcount[tokens[0]]]:
vars[vcount[tokens[0]]]["attr"] = dict()
vars[vcount[tokens[0]]]["attr"][tokens[1]] = tokens[2]
continue
# If this is a global attribute, add the info
parser = global_attr_pat.match(line)
if parser != None:
tokens = parser.groups()
attr[tokens[0]]=tokens[1]
continue
return dims, vars, attr
if __name__ == "__main__":
cdl_parser("out.cdl")
|
Python
| 0.997939
|
@@ -1149,16 +1149,20 @@
pen(file
+name
, 'r'):%0A
|
dfea10193f2aebca4b03818db5280106aef75d72
|
make copy take a slice of the memory vs the ref to the memory array
|
mythril/laser/ethereum/state/memory.py
|
mythril/laser/ethereum/state/memory.py
|
"""This module contains a representation of a smart contract's memory."""
from typing import cast, List, Union, overload
from z3 import Z3Exception
from mythril.laser.ethereum import util
from mythril.laser.smt import (
BitVec,
Bool,
Concat,
Extract,
If,
simplify,
symbol_factory,
)
class Memory:
"""A class representing contract memory with random access."""
def __init__(self):
""""""
self._memory = [] # type: List[Union[int, BitVec]]
def __len__(self):
"""
:return:
"""
return len(self._memory)
def extend(self, size):
"""
:param size:
"""
self._memory.extend(bytearray(size))
def get_word_at(self, index: int) -> Union[int, BitVec]:
"""Access a word from a specified memory index.
:param index: integer representing the index to access
:return: 32 byte word at the specified index
"""
try:
return symbol_factory.BitVecVal(
util.concrete_int_from_bytes(
bytes([util.get_concrete_int(b) for b in self[index : index + 32]]),
0,
),
256,
)
except TypeError:
result = simplify(
Concat(
[
b if isinstance(b, BitVec) else symbol_factory.BitVecVal(b, 8)
for b in cast(
List[Union[int, BitVec]], self[index : index + 32]
)
]
)
)
assert result.size() == 256
return result
def write_word_at(self, index: int, value: Union[int, BitVec, bool, Bool]) -> None:
"""Writes a 32 byte word to memory at the specified index`
:param index: index to write to
:param value: the value to write to memory
"""
try:
# Attempt to concretize value
if isinstance(value, bool):
_bytes = (
int(1).to_bytes(32, byteorder="big")
if value
else int(0).to_bytes(32, byteorder="big")
)
else:
_bytes = util.concrete_int_to_bytes(value)
assert len(_bytes) == 32
self[index : index + 32] = list(bytearray(_bytes))
except (Z3Exception, AttributeError): # BitVector or BoolRef
value = cast(Union[BitVec, Bool], value)
if isinstance(value, Bool):
value_to_write = If(
value,
symbol_factory.BitVecVal(1, 256),
symbol_factory.BitVecVal(0, 256),
)
else:
value_to_write = value
assert value_to_write.size() == 256
for i in range(0, value_to_write.size(), 8):
self[index + 31 - (i // 8)] = Extract(i + 7, i, value_to_write)
@overload
def __getitem__(self, item: int) -> Union[int, BitVec]:
...
@overload
def __getitem__(self, item: slice) -> List[Union[int, BitVec]]:
...
def __getitem__(
self, item: Union[int, slice]
) -> Union[BitVec, int, List[Union[int, BitVec]]]:
"""
:param item:
:return:
"""
if isinstance(item, slice):
start, step, stop = item.start, item.step, item.stop
if start is None:
start = 0
if stop is None: # 2**256 is just a bit too big
raise IndexError("Invalid Memory Slice")
if step is None:
step = 1
return [cast(Union[int, BitVec], self[i]) for i in range(start, stop, step)]
try:
return self._memory[item]
except IndexError:
return 0
def __setitem__(
self,
key: Union[int, slice],
value: Union[BitVec, int, List[Union[int, BitVec]]],
):
"""
:param key:
:param value:
"""
if isinstance(key, slice):
start, step, stop = key.start, key.step, key.stop
if start is None:
start = 0
if stop is None:
raise IndexError("Invalid Memory Slice")
if step is None:
step = 1
assert type(value) == list
for i in range(0, stop - start, step):
self[start + i] = cast(List[Union[int, BitVec]], value)[i]
else:
if isinstance(value, int):
assert 0 <= value <= 0xFF
if isinstance(value, BitVec):
assert value.size() == 8
self._memory[key] = cast(Union[int, BitVec], value)
|
Python
| 0.000002
|
@@ -589,16 +589,124 @@
emory)%0A%0A
+ def __copy__(self):%0A copy = Memory()%0A copy._memory = self._memory%5B:%5D%0A return copy%0A%0A
def
|
10e2638b8bafb15f66deb4a9ca442593a0ab52f2
|
test fix
|
tests/test_core/test_ffi.py
|
tests/test_core/test_ffi.py
|
from textwrap import dedent
from os.path import dirname, join
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
from cffi import FFI
from pywincffi.core.ffi import Library
from pywincffi.core.testutil import TestCase
from pywincffi.exceptions import ResourceNotFoundError
class TestFFI(TestCase):
"""
Tests the ``pywinffi.core.ffi.ffi`` global.
"""
def test_unicode(self):
ffi, _ = Library.load()
self.assertTrue(ffi._windows_unicode)
def test_instance(self):
ffi, _ = Library.load()
self.assertIsInstance(ffi, FFI)
class TestSourcePaths(TestCase):
"""
Tests for ``pywincffi.core.ffi.Library.[HEADERS|SOURCES]``
"""
def test_sources_exist(self):
for path in Library.SOURCES:
try:
with open(path, "r"):
pass
except (OSError, IOError, WindowsError) as error:
self.fail("Failed to load %s: %s" % (path, error))
def test_headers_exist(self):
for path in Library.HEADERS:
try:
with open(path, "r"):
pass
except (OSError, IOError, WindowsError) as error:
self.fail("Failed to load %s: %s" % (path, error))
class TestLibraryLoad(TestCase):
"""
Tests for ``pywincffi.core.ffi.Library.load``
"""
def setUp(self):
self._cache = Library.CACHE
Library.CACHE = (None, None)
def tearDown(self):
Library.CACHE = self._cache
def test_header_not_found(self):
with patch.object(Library, "HEADERS", ("foobar", )):
with self.assertRaises(ResourceNotFoundError):
Library.load()
def test_loads_library(self):
fake_header = dedent("""
#define HELLO_WORLD 42
""")
with patch.object(Library, "_load_files", return_value=fake_header):
ffi, library = Library.load()
self.assertEqual(library.HELLO_WORLD, 42)
def test_caches_library(self):
self.assertIsNone(Library.CACHE)
fake_header = dedent("""
#define HELLO_WORLD 42
""")
with patch.object(Library, "_load_files", return_value=fake_header):
ffi1, lib1 = Library.load()
ffi2, lib2 = Library.load()
self.assertIs(ffi1, ffi2)
self.assertIs(lib1, lib2)
|
Python
| 0.000001
|
@@ -2059,50 +2059,8 @@
f):%0A
- self.assertIsNone(Library.CACHE)%0A%0A
|
78c3e6b7b3a12d74416f43995d50b04bdb817f0b
|
Fix test failure
|
tests/test_pkg_resources.py
|
tests/test_pkg_resources.py
|
import sys
import tempfile
import os
import zipfile
import pkg_resources
try:
unicode
except NameError:
unicode = str
class EggRemover(unicode):
def __call__(self):
if self in sys.path:
sys.path.remove(self)
if os.path.exists(self):
os.remove(self)
class TestZipProvider(object):
finalizers = []
@classmethod
def setup_class(cls):
"create a zip egg and add it to sys.path"
egg = tempfile.NamedTemporaryFile(suffix='.egg', delete=False)
zip_egg = zipfile.ZipFile(egg, 'w')
zip_info = zipfile.ZipInfo()
zip_info.filename = 'mod.py'
zip_info.date_time = 2013, 5, 12, 13, 25, 0
zip_egg.writestr(zip_info, 'x = 3\n')
zip_info = zipfile.ZipInfo()
zip_info.filename = 'data.dat'
zip_info.date_time = 2013, 5, 12, 13, 25, 0
zip_egg.writestr(zip_info, 'hello, world!')
zip_egg.close()
egg.close()
sys.path.append(egg.name)
cls.finalizers.append(EggRemover(egg.name))
@classmethod
def teardown_class(cls):
for finalizer in cls.finalizers:
finalizer()
def test_resource_filename_rewrites_on_change(self):
"""
If a previous call to get_resource_filename has saved the file, but
the file has been subsequently mutated with different file of the
same size and modification time, it should not be overwritten on a
subsequent call to get_resource_filename.
"""
import mod
manager = pkg_resources.ResourceManager()
zp = pkg_resources.ZipProvider(mod)
filename = zp.get_resource_filename(manager, 'data.dat')
assert os.stat(filename).st_mtime == 1368379500
f = open(filename, 'wb')
f.write('hello, world?')
f.close()
os.utime(filename, (1368379500, 1368379500))
filename = zp.get_resource_filename(manager, 'data.dat')
f = open(filename)
assert f.read() == 'hello, world!'
manager.cleanup_resources()
class TestResourceManager(object):
def test_get_cache_path(self):
mgr = pkg_resources.ResourceManager()
path = mgr.get_cache_path('foo')
type_ = str(type(path))
message = "Unexpected type from get_cache_path: " + type_
assert isinstance(path, (unicode, str)), message
|
Python
| 0.000128
|
@@ -1773,17 +1773,16 @@
name, 'w
-b
')%0A
|
f356adde3cb4776ad0a34b47be54a6e14972ce17
|
Improve Python 3.x compatibility
|
tests/unit/test_excutils.py
|
tests/unit/test_excutils.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.common import excutils
from tests import utils
class SaveAndReraiseTest(utils.BaseTestCase):
def test_save_and_reraise_exception(self):
e = None
msg = 'foo'
try:
try:
raise Exception(msg)
except:
with excutils.save_and_reraise_exception():
pass
except Exception, _e:
e = _e
self.assertEqual(str(e), msg)
def test_save_and_reraise_exception_dropped(self):
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except:
with excutils.save_and_reraise_exception():
raise Exception(msg)
except Exception, _e:
e = _e
self.assertEqual(str(e), msg)
|
Python
| 0.000022
|
@@ -1028,33 +1028,35 @@
except Exception
-,
+ as
_e:%0A
@@ -1426,17 +1426,19 @@
xception
-,
+ as
_e:%0A
|
0178cb071952e282111a8c01efff4995a15bcdce
|
add tests for winreg branch
|
tests/util/test_get_dirs.py
|
tests/util/test_get_dirs.py
|
import unittest
import EXOSIMS.util.get_dirs as gd
import os
from unittest.mock import *
import numpy as np
class TestGetDirs(unittest.TestCase):
"""
Tests the get_dir tool.
Sonny Rappaport, Cornell, July 2021
"""
def test_get_home_dir(self):
"""
Tests that get_home_dir works in muiltiple OS environments.
Test method: Uses unittest's mock library to create fake OS environment
and paths to see if get_dirs returns the correct home directory. Because
get_dirs returns assertionerrors when the homedir isn't real, use the
assertion message itself to check that the homedir is correct.
This assumes that the os library does its job correctly as the mocking
library will overwrite whatever os has stored for testing purposes.
This method also assumes that winreg works as expected.
"""
#collect assertion errors and verify at the end that we only get the
#expected assertion errors.
#this tests the assertion error as well- it should be called for all
#of these cases as I use imaginary pathnames
assertErrors = []
#mock directories
directories = \
[
{'HOME':'posixhome'},
{},
{'HOME':'myshome','MSYSTEM':'test'},
{'HOMESHARE':'sharehome'},
{'USERPROFILE':'userhome'},
{'HOME': 'otherOShome'},
{}
]
#mock os names
os_name = ['posix','posix','nt','nt','nt','door','door']
#names for home directory- 'none' shouldn't show up
home_names = ['posixhome','none','myshome','sharehome','userhome',
'otherOShome', 'none']
#test all paths except for winreg
for i, dic in enumerate(directories):
with patch.dict(os.environ,dic,clear=True), \
patch.object(os,'name',os_name[i]):
#i==1 and i==6 correspond to where homedir isn't in environ
if i == 1 or i == 6:
with self.assertRaises(OSError):
gd.get_home_dir()
else:
try: gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
#add all assertion errors so far to the expected list of assertion
#errors
exp_asrt = []
for s in home_names:
if s == 'none':
continue
exp_asrt.append("Identified "+s+ " as home directory, but it does" +
" not exist or is not accessible/writeable")
#test winreg branch
#first, test that if winreg doesn't except, homedir is set
#(mock a key: make keys do nothing. mock queryvalueex: return test homedir)
with patch.dict(os.environ,{},clear=True), \
patch.object(os,'name','nt'), \
patch('winreg.OpenKey'), \
patch('winreg.QueryValueEx') as mockquery:
mockquery.return_value= ['winregHome']
try: gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
exp_asrt.append("Identified "+"winregHome"+ " as home directory, but it does" +
" not exist or is not accessible/writeable")
np.testing.assert_array_equal(assertErrors ,exp_asrt)
def test_get_paths(self):
"""
Tests that get_paths returns the proper (relative) paths.
Test method: Calls the method and tests to see if the path dictionary
matches expectations for various trivial inputs.
"""
#test no parameter output
dict_paths = gd.get_paths()
outputs = dict_paths.values()
outputs_rel = []
for x in outputs:
outputs_rel.append(os.path.relpath(x))
if os.name == 'nt':
self.assertEqual(first = outputs_rel,
second = ['.', '.', '.', '.', '.', '..\\.EXOSIMS\\cache', '.'])
else:
self.assertEqual(first = outputs_rel,
second = ['.', '.', '.', '.', '.', '../.EXOSIMS/cache', '.'])
|
Python
| 0
|
@@ -101,16 +101,30 @@
py as np
+%0Aimport winreg
%0A%0Aclass
@@ -3233,49 +3233,1012 @@
-exp_asrt.append(%22Identified %22+%22winregHome
+#second, test that home is tried if an exception is raised and attempt %0A #at homedir setting is made %0A%0A with patch.dict(os.environ,%7B'HOME':'winreghome2'%7D,clear=True), %5C%0A patch.object(os,'name','nt'), %5C%0A patch('winreg.OpenKey'), %5C%0A patch('winreg.QueryValueEx') as mockquery:%0A mockquery.side_effect = Exception%0A try: gd.get_home_dir()%0A except AssertionError as e:%0A assertErrors.append(str(e))%0A%0A with patch.dict(os.environ,%7B%7D,clear=True), %5C%0A patch.object(os,'name','nt'), %5C%0A patch('winreg.OpenKey'), %5C%0A patch('winreg.QueryValueEx') as mockquery:%0A mockquery.side_effect = Exception%0A with self.assertRaises(OSError):%0A gd.get_home_dir()%0A%0A exp_asrt.append(%22Identified %22+%22winregHome%22+ %22 as home directory, but it does%22 +%0A %22 not exist or is not accessible/writeable%22)%0A%0A exp_asrt.append(%22Identified %22+%22winreghome2
%22+ %22
|
e33064debba9a973d26cf18fbfa093d2139fafaa
|
Remove bees
|
jacquard/tests/test_layering.py
|
jacquard/tests/test_layering.py
|
import re
import dis
import pathlib
import pytest
import jacquard
try:
import networkx
except ImportError:
networkx = None
DEPENDENCIES = (
('__main__', 'cli'),
('buckets', 'commands'),
('buckets', 'odm'),
('buckets', 'storage'),
('buckets', 'constraints'),
('cli', 'commands'),
('cli', 'plugin'),
('directory', 'plugin'),
('directory', 'commands'),
('experiments', 'buckets'),
('experiments', 'constraints'),
('odm', 'storage'),
('service', 'buckets'),
('service', 'odm'),
('service', 'users'),
('service', 'experiments'),
('storage', 'commands'),
('storage', 'plugin'),
('users', 'buckets'),
('users', 'commands'),
('users', 'storage'),
('wsgi', 'service'),
)
EXCLUDED_MODULES = (
'utils',
'config',
)
def build_dependency_graph():
assert networkx is not None
graph = networkx.DiGraph()
for from_component, to_component in DEPENDENCIES:
graph.add_edge(from_component, to_component)
return graph
@pytest.mark.skipif(networkx is None, reason="networkx is not installed")
def test_layers_are_acyclic():
graph = build_dependency_graph()
try:
cycle = networkx.find_cycle(graph)
except networkx.NetworkXNoCycle:
cycle = None
if cycle:
raise AssertionError(
"Cycle in component graph: {cycle}".format(
cycle=' → '.join([x[0] for x in cycle] + [cycle[-1][1]]),
),
)
RE_JACQUARD_MODULE = re.compile('^jacquard.([a-zA-Z0-9_]+)')
@pytest.mark.skipif(networkx is None, reason="networkx is not installed")
def test_layers():
root = pathlib.Path(jacquard.__file__).parent
imports = set()
forbidden_imports = []
for source_file in root.glob('**/*.py'):
if 'tests' in source_file.parts:
continue
with source_file.open('r') as f:
contents = f.read()
relative_parts = list(source_file.relative_to(root).parts)
# Remove .py from the last path component
if relative_parts[-1].endswith('.py'):
relative_parts[-1] = relative_parts[-1][:-3]
# Remove __init__ if it's the last component
if relative_parts[-1] == '__init__':
relative_parts = relative_parts[:-1]
relative_parts = ['jacquard'] + relative_parts
module_name = '.'.join(relative_parts)
code = compile(contents, module_name, 'exec')
for instruction in dis.get_instructions(code):
if instruction.opname == 'IMPORT_NAME':
import_target = instruction.argval
if not import_target.startswith('jacquard'):
continue
target_elements = import_target.split('.')
if (
len(target_elements) > 2 and
target_elements[1] != relative_parts[1]
):
forbidden_imports.append((module_name, import_target))
imports.add((module_name, import_target))
if forbidden_imports:
raise AssertionError("{count} forbidden import(s): {illegals}".format(
count=len(forbidden_imports),
illegals=', '.join(
'{importer} → {importee}'.format(
importer=importer,
importee=importee,
)
for importer, importee in forbidden_imports
),
))
dependency_graph = build_dependency_graph()
for importer, importee in imports:
importer_match = RE_JACQUARD_MODULE.match(importer)
importee_match = RE_JACQUARD_MODULE.match(importee)
if importer_match is None or importee_match is None:
continue
importer_component = importer_match.group(1)
importee_component = importee_match.group(1)
if importer_component in EXCLUDED_MODULES:
continue
if importee_component in EXCLUDED_MODULES:
continue
try:
import_path = networkx.shortest_path(
dependency_graph,
importer_component,
importee_component,
)
except networkx.NetworkXNoPath:
raise AssertionError(
"Layering violation: {importer_component} cannot depend on "
"{importee_component} ({importer_module} currently imports "
"{importee_module})".format(
importer_component=importer_component,
importee_component=importee_component,
importer_module=importer,
importee_module=importee,
),
) from None
raise AssertionError("Bees")
|
Python
| 0.000159
|
@@ -4725,38 +4725,4 @@
one%0A
-%0A raise AssertionError(%22Bees%22)%0A
|
514064ce5a0bc7d3ecab10d1b810e5b751ed79af
|
update supported thumbnail types
|
contrib/frontends/django/nntpchan/nntpchan/thumbnail.py
|
contrib/frontends/django/nntpchan/nntpchan/thumbnail.py
|
from django.conf import settings
import subprocess
import os
img_ext = ['png', 'jpg', 'jpeg', 'gif', 'webp', 'ico', 'pdf', 'ps']
vid_ext = ['mp4', 'webm', 'm4v', 'ogv', 'avi', 'txt']
def generate(fname, tname, placeholder):
"""
generate thumbnail
"""
ext = fname.split('.')[-1]
cmd = None
if ext in img_ext:
cmd = [settings.CONVERT_PATH, '-thumbnail', '200', fname, tname]
elif ext in vid_ext:
cmd = [settings.FFMPEG_PATH, '-i', fname, '-vf', 'scale=300:200', '-vframes', '1', tname]
if cmd is None:
os.link(placeholder, tname)
else:
subprocess.run(cmd, check=True)
|
Python
| 0
|
@@ -111,21 +111,8 @@
ico'
-, 'pdf', 'ps'
%5D%0Avi
|
f95b8ed7845b748b2fb86a4af0793db8fee078b4
|
Fix to issue deprecated .as_matrix() to df.values()
|
pd2ppt/pd2ppt.py
|
pd2ppt/pd2ppt.py
|
import six
import pandas as pd
from math import *
from pptx import Presentation
from pptx.util import Cm, Pt
round_to_n = lambda x, n: round(x, -int(floor(log10(abs(x)))) + (n - 1))
def _do_formatting(value, format_str):
"""Format value according to format_str, and deal
sensibly with format_str if it is missing or invalid.
"""
if format_str == '':
if type(value) in six.integer_types:
format_str = ','
elif type(value) is float:
format_str = 'f'
elif type(value) is str:
format_str = 's'
elif format_str[0] == '.':
if format_str.endswith('R'):
if type(value) in six.integer_types:
value = round_to_n(value, int(format_str[1]))
format_str = ','
if not format_str.endswith('G'):
format_str = format_str + "G"
try:
value = format(value, format_str)
except:
value = format(value, '')
return value
def process_position_parameter(param):
"""Process positioning parameters (left, top, width, height) given to
df_to_table.
If an integer, returns the right instance of the Cm class to allow it to be
treated as cm. If missing, then default to 4cm. Otherwise, pass through
whatever it gets.
"""
if param is None:
return Cm(4)
elif type(param) is int:
return Cm(param)
else:
return param
def df_to_table(slide, df, left=None, top=None, width=None, height=None,
colnames=None, col_formatters=None, rounding=None,
name=None):
"""Converts a Pandas DataFrame to a PowerPoint table on the given
Slide of a PowerPoint presentation.
The table is a standard Powerpoint table, and can easily be modified with
the Powerpoint tools, for example: resizing columns, changing formatting etc.
Parameters
----------
slide: ``pptx.slide.Slide``
slide object from the python-pptx library containing the slide on which
you want the table to appear
df: pandas ``DataFrame``
DataFrame with the data
left: int, optional
Position of the left-side of the table, either as an integer in cm, or
as an instance of a pptx.util Length class (pptx.util.Inches for
example). Defaults to 4cm.
top: int, optional
Position of the top of the table, takes parameters as above.
width: int, optional
Width of the table, takes parameters as above.
height: int, optional
Height of the table, takes parameters as above.
col_formatters: list, optional
A n_columns element long list containing format specifications for each
column. For example ['', ',', '.2'] does no special formatting for the
first column, uses commas as thousands separators in the second column,
and formats the third column as a float with 2 decimal places.
rounding: list, optional
A n_columns element long list containing a number for each integer
column that requires rounding that is then multiplied by -1 and passed
to round(). The practical upshot of this is that you can give something
like ['', 3, ''], which does nothing for the 1st and 3rd columns (as
they aren't integer values), but for the 2nd column, rounds away the 3
right-hand digits (eg. taking 25437 to 25000).
name: str, optional
A name to be given to the table in the Powerpoint file. This is not
displayed, but can help extract the table later to make further changes.
Returns
-------
pptx.shapes.graphfrm.GraphicFrame
The python-pptx table (GraphicFrame) object that was created (which can
then be used to do further manipulation if desired)
"""
left = process_position_parameter(left)
top = process_position_parameter(top)
width = process_position_parameter(width)
height = process_position_parameter(height)
rows, cols = df.shape
shp = slide.shapes.add_table(rows+1, cols, left, top, width, height)
if colnames is None:
colnames = list(df.columns)
# Insert the column names
for col_index, col_name in enumerate(colnames):
shp.table.cell(0,col_index).text = col_name
m = df.as_matrix()
for row in range(rows):
for col in range(cols):
val = m[row, col]
if col_formatters is None:
text = str(val)
else:
text = _do_formatting(val, col_formatters[col])
shp.table.cell(row+1, col).text = text
if name is not None:
shp.name = name
return shp
def df_to_powerpoint(filename, df, **kwargs):
"""Converts a Pandas DataFrame to a table in a new, blank PowerPoint
presentation.
Creates a new PowerPoint presentation with the given filename, with a single
slide containing a single table with the Pandas DataFrame data in it.
The table is a standard Powerpoint table, and can easily be modified with
the Powerpoint tools, for example: resizing columns, changing formatting
etc.
Parameters
----------
filename: Filename to save the PowerPoint presentation as
df: pandas ``DataFrame``
DataFrame with the data
**kwargs
All other arguments that can be taken by ``df_to_table()`` (such as
``col_formatters`` or ``rounding``) can also be passed here.
Returns
-------
pptx.shapes.graphfrm.GraphicFrame
The python-pptx table (GraphicFrame) object that was created (which can
then be used to do further manipulation if desired)
"""
pres = Presentation()
blank_slide_layout = pres.slide_layouts[6]
slide = pres.slides.add_slide(blank_slide_layout)
table = df_to_table(slide, df, **kwargs)
pres.save(filename)
return table
|
Python
| 0.000001
|
@@ -4264,17 +4264,14 @@
df.
-as_matrix
+values
()%0A%0A
|
31c70f509a849de98e0327902c48bdfcb8bd99a9
|
fix another off by one with blur
|
pendulum_demo.py
|
pendulum_demo.py
|
#!/usr/bin/env python
import sys
import math
import time
import StringIO
from PIL import Image
import scipy.constants
from mathics.world import World
from mathics.viewport import Viewport
from mathics.machines import Pendulum, Timer, Point, Vector
def serve_gif(frames, duration, nq=0):
from PIL import Image
from images2gif import writeGif
gif = StringIO.StringIO()
timer_start = time.time()
writeGif(gif, frames, duration/len(frames), nq=nq)
with open('image.gif', 'wb') as f:
gif.seek(0)
f.write(gif.read())
timer_end = time.time()
print "stored gif in %i seconds." % (timer_end - timer_start)
# server image.gif
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
PORT_NUMBER = 8000
#This class will handles any incoming request from
#the browser
class myHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','image/gif')
self.end_headers()
gif.seek(0)
self.wfile.write(gif.read())
return
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
#Wait forever for incoming http requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == '__main__':
supersample = 2
width = 200
font_size = 11
step = 0.05
duration = 4
blur = 0
x = int(supersample*width)
y = (6 * x / 5)
world = World(x, y, Viewport.WHITE, ("/usr/share/fonts/truetype/freefont/FreeSansBold.ttf", supersample * font_size))
viewport_different = Viewport(-4, 1.9, 8, -0.5, (0,200,0))
viewport = Viewport(-3, 3, 3, -3, Viewport.BEIGE)
world.add_viewport(viewport, 0, y/6, x, y)
world.add_viewport(viewport_different, 0, 0, x, y/6)
seconds_pendulum = Pendulum(Point(0,1), Vector.from_polar((2/(2*math.pi)) * (2/(2*math.pi)) * scipy.constants.g, math.radians(320)))
world.add_machine(seconds_pendulum)
seconds2_pendulum = Pendulum(Point(0,2), Vector.from_polar((4/(2*math.pi)) * (4/(2*math.pi)) * scipy.constants.g, math.radians(300)))
world.add_machine(seconds2_pendulum)
timer = Timer(Point(2,2))
world.add_machine(timer)
viewport.add_axis(0.2, 1)
viewport.add_visualization(seconds_pendulum.visualization_basic)
viewport.add_visualization(seconds2_pendulum.visualization_basic)
viewport.add_visualization(timer.visualization_basic)
viewport_different.add_visualization(seconds_pendulum.visualization_different)
viewport_different.add_visualization(seconds2_pendulum.visualization_different)
timer_start = time.time()
duration = step * math.ceil(duration/step)
frames = world.get_frames(0, duration, step, blur, 1.0/supersample)
timer_end = time.time()
print "generated %i frames in %i seconds. %f fps" % (len(frames) * (blur+1) - blur, timer_end - timer_start, (len(frames)*blur)/duration)
serve_gif(frames, duration)
|
Python
| 0.000001
|
@@ -3154,20 +3154,24 @@
frames)*
+(
blur
++1)
)/durati
|
14b473fc1b3a084a22c3e1ef37e2589d91650b2f
|
Add td_includes argument to allow more flexible relative include paths for td files.
|
third_party/mlir/tblgen.bzl
|
third_party/mlir/tblgen.bzl
|
"""BUILD extensions for MLIR table generation."""
def gentbl(name, tblgen, td_file, tbl_outs, td_srcs = [], strip_include_prefix = None):
"""gentbl() generates tabular code from a table definition file.
Args:
name: The name of the build rule for use in dependencies.
tblgen: The binary used to produce the output.
td_file: The primary table definitions file.
tbl_outs: A list of tuples (opts, out), where each opts is a string of
options passed to tblgen, and the out is the corresponding output file
produced.
td_srcs: A list of table definition files included transitively.
strip_include_prefix: attribute to pass through to cc_library.
"""
srcs = []
srcs += td_srcs
if td_file not in td_srcs:
srcs += [td_file]
# Add google_mlir/include directory as include so derived op td files can
# import relative to that.
td_includes = "-I external/local_config_mlir/include -I external/org_tensorflow "
td_includes += "-I $$(dirname $(location %s)) " % td_file
for (opts, out) in tbl_outs:
rule_suffix = "_".join(opts.replace("-", "_").replace("=", "_").split(" "))
native.genrule(
name = "%s_%s_genrule" % (name, rule_suffix),
srcs = srcs,
outs = [out],
tools = [tblgen],
message = "Generating code from table: %s" % td_file,
cmd = (("$(location %s) %s %s $(location %s) -o $@") % (
tblgen,
td_includes,
opts,
td_file,
)),
)
# List of opts that do not generate cc files.
skip_opts = ["-gen-op-doc"]
hdrs = [f for (opts, f) in tbl_outs if opts not in skip_opts]
native.cc_library(
name = name,
# include_prefix does not apply to textual_hdrs.
hdrs = hdrs if strip_include_prefix else [],
strip_include_prefix = strip_include_prefix,
textual_hdrs = hdrs,
)
|
Python
| 0
|
@@ -101,16 +101,34 @@
cs = %5B%5D,
+ td_includes = %5B%5D,
strip_i
@@ -150,16 +150,16 @@
None):%0A
-
%22%22%22g
@@ -643,16 +643,82 @@
tively.%0A
+ td_includes: A list of include paths for relative includes.%0A
st
@@ -1002,16 +1002,20 @@
includes
+_str
= %22-I e
@@ -1081,27 +1081,115 @@
w %22%0A
-td_includes
+for td_include in td_includes:%0A td_includes_str += %22-I %25s %22 %25 td_include%0A td_includes_str
+= %22-I
@@ -1666,16 +1666,16 @@
tblgen,%0A
-
@@ -1693,16 +1693,20 @@
includes
+_str
,%0A
|
9920f7f8bab741146b436c864fb1ff4682a4bda9
|
Use a bigger buffer
|
tile-hash-proxy/__init__.py
|
tile-hash-proxy/__init__.py
|
import SimpleHTTPServer
import SocketServer
import requests
import md5
def calc_hash_terrain(s):
# When Ian built the hash for terrain tiles he used the path without
# the leading slash and the first 6 chars of the hex digest instead of 5
m = md5.new()
m.update(s[1:])
md5_hash = m.hexdigest()
return md5_hash[:6]
def calc_hash_vector(s):
m = md5.new()
m.update(s)
md5_hash = m.hexdigest()
return md5_hash[:5]
date_prefix = ''
base_url = ''
calc_hash = None
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
server_version = "0.1"
def do_GET(self):
query_params = self.path.split('?')
old_path = query_params[0]
md5_hash = calc_hash(old_path)
new_path = '%(date)s/%(md5)s%(path)s' % dict(
date=date_prefix,
md5=md5_hash,
path=self.path
)
url = '%s/%s' % (base_url, new_path)
res = requests.get(url)
self.send_response(res.status_code, res.reason)
for k, v in res.headers.iteritems():
if k != 'Server' and k != 'Date':
self.send_header(k, v)
if 'access-control-allow-origin' not in res.headers:
self.send_header('access-control-allow-origin', '*')
self.end_headers()
kilobyte = 1024 * 1000
chunk_size = 1 * kilobyte
for chunk in res.iter_content(chunk_size):
self.wfile.write(chunk)
self.wfile.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'port',
type=int,
help="Port to listen on")
parser.add_argument(
'date_prefix',
help="Date prefix string to append to the base URL")
parser.add_argument(
'base_url',
help="Base S3 URL to make requests to")
parser.add_argument(
'--terrain',
dest='variant',
action='store_const',
const='terrain',
default='vector',
help="Use Terrain tiles variant of hashing")
args = parser.parse_args()
date_prefix = args.date_prefix
base_url = args.base_url
if args.variant == 'vector':
calc_hash = calc_hash_vector
elif args.variant == 'terrain':
calc_hash = calc_hash_terrain
else:
print "Uh oh I don't know how to hash %s" % args.variant
httpd = SocketServer.TCPServer(("", args.port), Handler)
print "Serving at http://localhost:%d/" % args.port
httpd.serve_forever()
|
Python
| 0.000051
|
@@ -1344,16 +1344,17 @@
size = 1
+6
* kilob
|
45b778c637d263208699a16ba926f0da10d5b0f4
|
Fix incorrect behaviour with check.py
|
tmc/exercise_tests/check.py
|
tmc/exercise_tests/check.py
|
import re
import xml.etree.ElementTree as ET
from os import path
from tmc.exercise_tests.basetest import BaseTest, TestResult
class CheckTest(BaseTest):
def __init__(self):
super().__init__("Check")
def applies_to(self, exercise):
return path.isfile(path.join(exercise.path(), "Makefile"))
def test(self, exercise):
_, _, err = self.run(["make", "clean", "all", "run-test"], exercise)
ret = []
testpath = path.join(exercise.path(), "test", "tmc_test_results.xml")
if not path.isfile(testpath):
return [TestResult(success=False, message=err)]
if len(err) > 0:
ret.append(TestResult(message=err, warning=True))
xmlsrc = ""
with open(testpath) as fp:
xmlsrc = fp.read()
xmlsrc = re.sub(r"&(\s)", r"&\1", xmlsrc)
ns = "{http://check.sourceforge.net/ns}"
matchtest = ns + "test"
matchdesc = ns + "description"
matchmsg = ns + "message"
root = ET.fromstring(xmlsrc)
for test in root.iter(matchtest):
success = True
name = test.find(matchdesc).text
message = None
if test.get("result") == "failure":
success = False
message = test.find(matchmsg).text
message = message.replace(r"&", "&")
ret.append(TestResult(success=success, name=name, message=message))
return ret
|
Python
| 0.000001
|
@@ -1084,35 +1084,8 @@
t):%0A
- success = True%0A
@@ -1129,35 +1129,8 @@
ext%0A
- message = None%0A
@@ -1163,11 +1163,12 @@
t%22)
-==
+in %5B
%22fai
@@ -1172,16 +1172,26 @@
failure%22
+, %22error%22%5D
:%0A
@@ -1324,16 +1324,94 @@
%22, %22&%22)%0A
+ else:%0A success = True%0A message = %22%22%0A
|
5ffeebd7581dd3168bbcd974625879774ed5df6c
|
Update instance dict with field generators
|
tohu/v3/custom_generator.py
|
tohu/v3/custom_generator.py
|
import attr
import logging
import re
from .base import TohuBaseGenerator, SeedGenerator
__all__ = ['CustomGenerator']
logger = logging.getLogger('tohu')
def set_item_class_name_on_custom_generator_class(cls):
"""
Set the attribute `cls.__tohu_items_name__` to a string which defines the name
of the namedtuple class which will be used to produce items for the custom
generator.
By default this will be the first part of the class name (before '...Generator'),
for example:
FoobarGenerator -> Foobar
QuuxGenerator -> Quux
However, it can be set explicitly by the user by defining `__tohu_items_name__`
in the class definition, for example:
class Quux(CustomGenerator):
__tohu_items_name__ = 'MyQuuxItem'
"""
if '__tohu_items_name__' in cls.__dict__:
logger.debug(
f"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')")
else:
m = re.match('^(.*)Generator$', cls.__name__)
if m is not None:
cls.__tohu_items_name__ = m.group(1)
logger.debug(f"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)")
else:
raise ValueError("Cannot derive class name for items to be produced by custom generator. "
"Please set '__tohu_items_name__' at the top of the custom generator's "
"definition or change its name so that it ends in '...Generator'")
def make_item_class(clsname, attr_names):
"""
Parameters
----------
clsname: string
Name of the class to be created
attr_names: list of strings
Names of the attributes of the class to be created
"""
item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True)
def new_repr(self):
all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()])
return f'{clsname}({all_fields})'
orig_eq = item_cls.__eq__
def new_eq(self, other):
"""
Custom __eq__() method which also allows comparisons with
tuples and dictionaries. This is mostly for convenience
during testing.
"""
if isinstance(other, self.__class__):
return orig_eq(self, other)
else:
if isinstance(other, tuple):
return attr.astuple(self) == other
elif isinstance(other, dict):
return attr.asdict(self) == other
else:
return NotImplemented
item_cls.__repr__ = new_repr
item_cls.__eq__ = new_eq
item_cls.keys = lambda self: attr_names
item_cls.__getitem__ = lambda self, key: getattr(self, key)
item_cls.as_dict = lambda self: attr.asdict(self)
item_cls.to_series = lambda self: pd.Series(attr.asdict(self))
return item_cls
class CustomGenerator(TohuBaseGenerator):
def __init__(self, *args, **kwargs):
super().__init__()
self.orig_args = args
self.orig_kwargs = kwargs
self.seed_generator = SeedGenerator()
self.field_gen_templates = {}
# Extract field generators from class dict
for name, g in self.__class__.__dict__.items():
if isinstance(g, TohuBaseGenerator):
self.field_gen_templates[name] = g
# Extract field generators from instance dict
for name, g in self.__dict__.items():
if isinstance(g, TohuBaseGenerator):
self.field_gen_templates[name] = g
self.field_gens = {name: g.spawn() for (name, g) in self.field_gen_templates.items()}
set_item_class_name_on_custom_generator_class(self.__class__)
self._set_item_class()
def _set_item_class(self):
"""
cls:
The custom generator class for which to create an item-class
"""
clsname = self.__tohu_items_name__
attr_names = self.field_gens.keys()
self.item_cls = make_item_class(clsname, attr_names)
def __next__(self):
field_values = [next(g) for g in self.field_gens.values()]
return self.item_cls(*field_values)
def reset(self, seed):
super().reset(seed)
self.seed_generator.reset(seed)
for name, gen in self.field_gens.items():
next_seed = next(self.seed_generator)
gen.reset(next_seed)
def spawn(self):
return self.__class__(*self.orig_args, **self.orig_kwargs)
|
Python
| 0
|
@@ -3709,16 +3709,62 @@
items()%7D
+%0A self.__dict__.update(self.field_gens)
%0A%0A
|
b09484dbdd49319278f46ade165b5543c385614e
|
fix testing of dynamic models
|
webapp/apps/dynamic/tests/utils.py
|
webapp/apps/dynamic/tests/utils.py
|
from __future__ import print_function
from django.test import TestCase
from django.test import Client
import mock
import os
os.environ["NUM_BUDGET_YEARS"] = '2'
from ...taxbrain.models import TaxSaveInputs
from ...taxbrain.models import convert_to_floats
from ...taxbrain.helpers import (expand_1D, expand_2D, expand_list, package_up_vars,
format_csv, arrange_totals_by_row, default_taxcalc_data)
from ...taxbrain.compute import DropqCompute, MockCompute, ElasticMockCompute
from ..compute import MockDynamicCompute
import taxcalc
from taxcalc import Policy
START_YEAR = u'2016'
def do_micro_sim(client, reform):
'''do the proper sequence of HTTP calls to run a microsim'''
#Monkey patch to mock out running of compute jobs
import sys
from webapp.apps.taxbrain import views
webapp_views = sys.modules['webapp.apps.taxbrain.views']
webapp_views.dropq_compute = MockCompute()
from webapp.apps.dynamic import views
dynamic_views = sys.modules['webapp.apps.dynamic.views']
dynamic_views.dropq_compute = MockCompute(num_times_to_wait=1)
response = client.post('/taxbrain/', reform)
# Check that redirect happens
assert response.status_code == 302
assert response.url[:-2].endswith("taxbrain/")
return response
def do_behavioral_sim(client, microsim_response, pe_reform, start_year=START_YEAR):
# Link to dynamic simulation
model_num = microsim_response.url[-2:]
dynamic_landing = '/dynamic/{0}?start_year={1}'.format(model_num, start_year)
response = client.get(dynamic_landing)
assert response.status_code == 200
# Go to behavioral input page
dynamic_behavior = '/dynamic/behavioral/{0}?start_year={1}'.format(model_num, start_year)
response = client.get(dynamic_behavior)
assert response.status_code == 200
# Do the partial equilibrium job submission
response = client.post(dynamic_behavior, pe_reform)
assert response.status_code == 302
print(response)
assert response.url[:-2].endswith("processing/")
#Check that we are not done processing
not_ready_page = client.get(response.url)
not_ready_page.status_code == 200
#Check should get a redirect this time
response = client.get(response.url)
assert response.status_code == 302
assert response.url[:-2].endswith("behavior_results/")
return response
def do_elasticity_sim(client, microsim_response, egdp_reform, start_year=START_YEAR):
# Link to dynamic simulation
model_num = microsim_response.url[-2:]
dynamic_landing = '/dynamic/{0}?start_year={1}'.format(model_num, start_year)
response = client.get(dynamic_landing)
assert response.status_code == 200
# Go to macro input page
dynamic_egdp = '/dynamic/macro/{0}?start_year={1}'.format(model_num, start_year)
response = client.get(dynamic_egdp)
assert response.status_code == 200
# Do the macro job submission
response = client.post(dynamic_egdp, egdp_reform)
assert response.status_code == 302
print(response)
assert response.url[:-2].endswith("macro_processing/")
#Check that we are not done processing
not_ready_page = client.get(response.url)
not_ready_page.status_code == 200
#Check should get a redirect this time
next_response = client.get(response.url)
assert next_response.status_code == 302
assert next_response.url[:-2].endswith("macro_results/")
return next_response
def do_ogusa_sim(client, microsim_response, ogusa_reform, start_year,
increment=0, exp_status_code=302):
import sys
from webapp.apps.taxbrain import views
webapp_views = sys.modules['webapp.apps.taxbrain.views']
webapp_views.dropq_compute = MockCompute()
from webapp.apps.dynamic import views
dynamic_views = sys.modules['webapp.apps.dynamic.views']
dynamic_views.dynamic_compute = MockDynamicCompute(increment=increment)
# Go to the dynamic landing page
model_num = microsim_response.url[-2:]
dynamic_landing = '/dynamic/{0}?start_year={1}'.format(model_num, start_year)
response = client.get(dynamic_landing)
assert response.status_code == 200
# Go to OGUSA input page
dynamic_ogusa = '/dynamic/ogusa/{0}?start_year={1}'.format(model_num, start_year)
response = client.get(dynamic_ogusa)
assert response.status_code == 200
# Submit the OGUSA job submission
response = client.post(dynamic_ogusa, ogusa_reform)
assert response.status_code == exp_status_code
return response
|
Python
| 0.000001
|
@@ -1212,32 +1212,71 @@
tus_code == 302%0A
+ idx = response.url%5B:-1%5D.rfind('/')%0A
assert respo
@@ -1276,34 +1276,35 @@
t response.url%5B:
--2
+idx
%5D.endswith(%22taxb
@@ -1307,17 +1307,16 @@
taxbrain
-/
%22)%0A r
@@ -3998,24 +3998,72 @@
anding page%0A
+ idx = microsim_response.url%5B:-1%5D.rfind('/')%0A
model_nu
@@ -4080,35 +4080,40 @@
im_response.url%5B
--2:
+idx+1:-1
%5D%0A dynamic_la
@@ -4125,32 +4125,33 @@
= '/dynamic/%7B0%7D
+/
?start_year=%7B1%7D'
@@ -4328,24 +4328,25 @@
ic/ogusa/%7B0%7D
+/
?start_year=
|
6fc67196062dcfbf6d14679e3c735bdcd27ca5a2
|
add one file type in basic mode
|
HomeworkCleaner.py
|
HomeworkCleaner.py
|
#! D:\Python33\python.exe
# -*- coding: utf-8 -*-
# Module : HomeworkCleaner.py
# Author : bss
# Project : TA
# State :
# Creation Date : 2014-12-26
# Last modified: 2014-12-29 17:44:18
# Description :
#
import os
import zipfile
g_basicList = ['obj','tlog','pdb','ilk','idb','log','lastbuildstate',
'manifest','res','rc','cache','cs','resources','baml','lref',
'exe.config','filelistabsolute.txt','pch','cpp','h']
# 是否要删除此文件
def checkClearDir(path, filenames, choose):
s = os.sep
path = path.lower() # windows
if (path.endswith('d.dll')):
dllname = path.split(os.sep)[-1][:-5]
for filename in filenames:
if dllname == filename[:-4]:
return True
if (path.endswith('.dll') or path.endswith('.exe')):
return False
if (path.endswith('.sdf') or
path.endswith('.opensdf') or
path.endswith('.aps') or
path.find(s+'ipch'+s) >= 0 or
path.endswith('.pdb')):
return True
if (path.find('debug'+s) >= 0 or
path.find('release'+s) >= 0) :
if 1 == choose: # basic
for ext in g_basicList:
if path.endswith('.' + ext):
return True
print('keep ' + path.split(os.sep)[-1])
else: # aggressive, only keep dll and exe
return True
return False
def zipDir(cd, dirname, choose):
zipName = os.path.join(cd, dirname + '.zip')
f = zipfile.ZipFile(zipName, 'w', zipfile.ZIP_DEFLATED)
searchDir = os.path.join(cd, dirname)
for dirpath, dirnames, filenames in os.walk(searchDir):
for filename in filenames:
fileAbsPath = os.path.join(dirpath, filename)
if not checkClearDir(fileAbsPath, filenames, choose):
f.write(fileAbsPath, fileAbsPath[searchDir.__len__():])
f.close()
def main():
cd = os.path.abspath('.')
print(cd)
print('https://github.com/bssthu/HomeworkCleaner')
choose = input('1. Basic; 2. Aggressive ')
try:
choose = int(choose)
except:
pass
if (choose != 1 and choose != 2):
print('bye~')
return
for dirname in os.listdir(cd):
if os.path.isdir(os.path.join(cd, dirname)) and (dirname != '.git'):
print(dirname + ':')
zipDir(cd, dirname, choose)
print('nice!')
if __name__ == '__main__':
main()
os.system('pause')
|
Python
| 0.000001
|
@@ -195,24 +195,24 @@
201
-4-12-29 17:44:18
+5-01-02 11:56:12
%0A# D
@@ -456,16 +456,45 @@
cpp','h'
+,%0A 'unsuccessfulbuild'
%5D%0A%0A# %E6%98%AF%E5%90%A6%E8%A6%81
|
9827fc7b386b0576758092ff785726863dabf797
|
Update version numbers for 0.10 dev
|
IPython/Release.py
|
IPython/Release.py
|
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Copyright (c) 2001 Janko Hauser <jhauser@zscout.de> and Nathaniel Gray
# <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
development = False # change this to False to do a release
version_base = '0.9.1'
branch = 'ipython'
revision = '1143'
if development:
if branch == 'ipython':
version = '%s.bzr.r%s' % (version_base, revision)
else:
version = '%s.bzr.r%s.%s' % (version_base, revision, branch)
else:
version = version_base
description = "An interactive computing environment for Python"
long_description = \
"""
The goal of IPython is to create a comprehensive environment for
interactive and exploratory computing. To support this goal, IPython
has two main components:
* An enhanced interactive Python shell.
* An architecture for interactive parallel computing.
The enhanced interactive Python shell has the following main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs and wxPython GUIs.
* Integrated access to the pdb debugger and the Python profiler.
The parallel computing architecture has the following main features:
* Quickly parallelize Python code from an interactive Python/IPython session.
* A flexible and dynamic process model that be deployed on anything from
multicore workstations to supercomputers.
* An architecture that supports many different styles of parallelism, from
message passing to task farming.
* Both blocking and fully asynchronous interfaces.
* High level APIs that enable many things to be parallelized in a few lines
of code.
* Share live parallel jobs with other users securely.
* Dynamically load balanced task farming system.
* Robust error handling in parallel code.
The latest development version is always available from IPython's `Launchpad
site <http://launchpad.net/ipython>`_.
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez@colorado.edu'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com')
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@scipy.org'
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell','Parallel','Distributed']
|
Python
| 0
|
@@ -1000,20 +1000,19 @@
pment =
-Fals
+Tru
e # c
@@ -1069,11 +1069,10 @@
'0.
-9.
1
+0
'%0Abr
@@ -1106,10 +1106,10 @@
'11
-43
+95
'%0A%0Ai
|
85aa5449a040247a6156801e88857048a7db6dd5
|
update revnum
|
IPython/Release.py
|
IPython/Release.py
|
# -*- coding: utf-8 -*-
"""Release data for the IPython project.
$Id: Release.py 2409 2007-05-28 18:45:23Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Copyright (c) 2001 Janko Hauser <jhauser@zscout.de> and Nathaniel Gray
# <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '2408'
version = '0.8.2.svn.r' + revision.rstrip('M')
description = "An enhanced interactive Python shell."
long_description = \
"""
IPython provides a replacement for the interactive Python interpreter with
extra functionality.
Main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available at the IPython subversion
repository_.
.. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez@colorado.edu'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com')
}
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell']
|
Python
| 0.000001
|
@@ -81,30 +81,30 @@
y 24
-09
+46
2007-0
-5-28 18:45:23
+6-14 22:30:58
Z vi
@@ -1058,10 +1058,10 @@
'24
-08
+45
'%0A%0Av
|
5e409ec1d8d53cd3005022ff090043a9e5f5cb31
|
Update nyan.py
|
NyanCheck/nyan.py
|
NyanCheck/nyan.py
|
#!/usr/bin/python3
from gi.repository import Gtk
from gi.repository import GObject
import webbrowser
import urllib.request
import re
def getNyan():
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36"
r = urllib.request.Request("http://nyanyan.it/", headers={'User-Agent': USER_AGENT, 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'})
data = urllib.request.urlopen(r)
data = data.read()
found = re.findall( '<div class="tytul">.*<div class="stronicowanieD" style="width:700px;margin-left:20px">', str(data) )
return found[0]
class nyanIcon:
def __init__( self ):
self.site = getNyan()
self.trayicon = Gtk.StatusIcon()
self.trayicon.set_from_file( "normal.png" )
self.trayicon.set_visible( True )
self.trayicon.connect( "activate", self.openNyan )
self.trayicon.connect( "popup-menu", self.options )
GObject.timeout_add( 5000, self.checkNyan )
Gtk.main()
def options( self, icon, button, time ):
self.menu = Gtk.Menu()
exit = Gtk.MenuItem()
exit.set_label( "Exit" )
exit.connect( "activate", Gtk.main_quit )
self.menu.append( exit )
self.menu.show_all()
def pos( menu, icon):
return (Gtk.StatusIcon.position_menu(menu, icon))
self.menu.popup(None, None, pos, self.trayicon, button, time)
def checkNyan( self, *args ):
"""Checks for new posts on http://nyanyan.it/
Takes no arguments and return true if there is new post.
"""
tempsite = getNyan()
if tempsite != self.site:
self.site = tempsite
self.trayicon.set_from_file( "new.png" )
GObject.timeout_add( 60000*5, self.checkNyan )
def openNyan( self, *args ):
self.trayicon.set_from_file( "normal.png" )
webbrowser.open( "http://nyanyan.it/" )
app = nyanIcon()
|
Python
| 0.000001
|
@@ -1353,124 +1353,8 @@
):%0A
-%09%09%22%22%22Checks for new posts on http://nyanyan.it/%0A%09%09%0A%09%09Takes no arguments and return true if there is new post.%0A%09%09%22%22%22%0A
%09%09te
@@ -1648,12 +1648,13 @@
= nyanIcon()
+%0A
|
992dc795d1f7c7ef670832a5144b7e72a9374af8
|
update test_forms
|
wizard_builder/tests/test_forms.py
|
wizard_builder/tests/test_forms.py
|
from django.test import TestCase
from .. import managers
class FormSerializationTest(TestCase):
manager = managers.FormManager
fixtures = [
'wizard_builder_data',
]
expected_data = [{
'descriptive_text': 'answer wisely',
'field_id': 'question_2',
'id': 2,
'page': 2,
'position': 0,
'question_text': 'do androids dream of electric sheep?',
'text': 'do androids dream of electric sheep?',
'type': 'singlelinetext',
'is_dropdown': False,
'choices': [],
}]
@classmethod
def setUpClass(cls):
super().setUpClass()
form = cls.manager.get_form_models()[1]
cls.actual_data = form.serialized
def test_same_size(self):
actual_data = self.actual_data
expected_data = self.expected_data
self.assertEqual(
len(actual_data),
len(expected_data),
)
def test_same_questions(self):
actual_data = self.actual_data
expected_data = self.expected_data
for index, expected_question in enumerate(expected_data):
actual_question = actual_data[index]
self.assertEqual(
actual_question,
expected_question,
)
|
Python
| 0.000001
|
@@ -501,38 +501,8 @@
t',%0A
- 'is_dropdown': False,%0A
|
a9465bcfe387a3eb8ba730eeda5285be079044d3
|
test cleanup
|
wizard_builder/tests/test_views.py
|
wizard_builder/tests/test_views.py
|
from unittest import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from .. import view_helpers
class ViewTest(TestCase):
fixtures = [
'wizard_builder_data',
]
@classmethod
def setUpClass(cls):
settings.SITE_ID = 1
super().setUpClass()
def test_storage_receives_post_data(self):
step = '1'
url = reverse('wizard_update', kwargs={'step': step})
data = {'question_2': 'aloe ipsum speakerbox'}
storage_data = {step: data}
self.client.post(url, data)
self.assertEqual(
self.client.session['data'],
storage_data,
)
|
Python
| 0.000001
|
@@ -356,127 +356,68 @@
def
-test_storage_receives_post_data(self):%0A step = '1'%0A url = reverse('wizard_update', kwargs=%7B'step': step%7D)
+setUp(self):%0A super().setUp()%0A self.step = '1'
%0A
@@ -421,16 +421,21 @@
+self.
data = %7B
@@ -473,32 +473,37 @@
erbox'%7D%0A
+self.
storage_data = %7B
@@ -507,18 +507,143 @@
= %7Bs
-tep: data%7D
+elf.step: self.data%7D%0A%0A def test_storage_receives_post_data(self):%0A url = reverse('wizard_update', kwargs=%7B'step': self.step%7D)
%0A
@@ -669,16 +669,21 @@
st(url,
+self.
data)%0A
@@ -747,16 +747,16 @@
data'%5D,%0A
-
@@ -759,16 +759,21 @@
+self.
storage_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.