commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
cb85810364a235426147a440da797d35d114c5a6
|
Test Commit
|
raspberry/asip/RelationSemanticTag.py
|
raspberry/asip/RelationSemanticTag.py
|
Python
| 0.000001
|
@@ -0,0 +1,32 @@
+from SemanticTag import *%0A%0A#Test
|
|
1d5ea05e42def0048c8ccd3e3d51b6511c190f57
|
Update _test_utils.py
|
rhea/utils/test/_test_utils.py
|
rhea/utils/test/_test_utils.py
|
import os
import shutil
from glob import glob
import argparse
from myhdl import traceSignals, Simulation
def run_testbench(bench, timescale='1ns', args=None):
if args is None:
args = tb_argparser().parse_args()
vcd = tb_clean_vcd(bench.__name__)
if args.trace:
# @todo: the following (timescale) needs to be set
traceSignals.timescale = timescale
traceSignals.name = vcd
gens = traceSignals(bench)
else:
gens = bench()
Simulation(gens).run()
def tb_argparser():
""" common command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--trace',action='store_true')
parser.add_argument('--test',action='store_true')
parser.add_argument('--convert', action='store_true')
return parser
def tb_move_generated_files():
""" move generated files
This function should be used with caution, it blindly moves
all the *.vhd, *.v, and *.png files. These files typically
do not exist in the project except the cosim directories and
the documentation directories. Most of the time it is safe
to use this function to clean up after a test.
"""
# move all VHDL files
for vf in glob('*.vhd'):
if os.path.isfile(os.path.join('output/vhd/', vf)):
os.remove(os.path.join('output/vhd/', vf))
shutil.move(vf, 'output/vhd/')
# move all Verilog files
for vf in glob('*.v'):
if os.path.isfile(os.path.join('output/ver/', vf)):
os.remove(os.path.join('output/ver/', vf))
shutil.move(vf, 'output/ver/')
# move all png files
for pf in glob('*.png'):
if os.path.isfile(os.path.join('output/png/', pf)):
os.remove(os.path.join('output/png/', pf))
shutil.move(vf, 'output/png/')
def tb_clean_vcd(name):
""" clean up vcd files """
vcdpath = 'output/vcd'
if not os.path.isdir(vcdpath):
os.makedirs(vcdpath)
for vv in glob(os.path.join(vcdpath, '*.vcd.*')):
os.remove(vv)
nmpth = os.path.join(vcdpath, '{}.vcd'.format(name))
if os.path.isfile(nmpth):
os.remove(nmpth)
# return the VCD path+name minus extension
return nmpth[:-4]
def tb_mon_():
""" """
pass
|
Python
| 0.000005
|
@@ -152,24 +152,225 @@
args=None):%0A
+ %22%22%22 run (simulate) a testbench%0A The args need to be retrieved outside the testbench%0A else the test will fail with the pytest runner, if %0A no args are passed a default will be used%0A %22%22%22%0A
if args
@@ -393,27 +393,24 @@
args =
-tb_
argparse
r().pars
@@ -405,23 +405,30 @@
arse
-r().parse_args(
+.Namespace(trace=False
)%0A
|
ea3cc841693b9adadb681349ac6cd79d985e7e7c
|
Handle case when there's no 'ids' key
|
rinoh/frontend/rst/__init__.py
|
rinoh/frontend/rst/__init__.py
|
# This file is part of RinohType, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from functools import wraps
from docutils.core import publish_doctree
from rinoh.text import MixedStyledText
from rinoh.flowable import StaticGroupedFlowables
from rinoh.style import PARENT_STYLE
from rinoh.util import all_subclasses
class CustomElement(object):
@classmethod
def map_node(cls, node):
return cls.MAPPING[node.__class__.__name__](node)
def __init__(self, doctree_node):
self.node = doctree_node
def __getattr__(self, name):
for child in self.node.children:
if child.tagname == name:
return self.map_node(child)
raise AttributeError('No such element: {}'.format(name))
def __getitem__(self, name):
return self.node[name]
def __iter__(self):
try:
for child in self.parent.node.children:
if child.tagname == self.node.tagname:
yield self.map_node(child)
except AttributeError:
# this is the root element
yield self
@property
def attributes(self):
return self.node.attributes
@property
def parent(self):
if self.node.parent is not None:
return self.map_node(self.node.parent)
@property
def text(self):
return self.node.astext()
def get(self, key, default=None):
return self.node.get(key, default)
def getchildren(self):
return [self.map_node(child) for child in self.node.children]
def process_content(self, style=None):
preserve_space = self.get('xml:space', None) == 'preserve'
return MixedStyledText([text
for text in (child.styled_text(preserve_space)
for child in self.getchildren())
if text], style=style)
@property
def location(self):
return '{}:{} <{}>'.format(self.node.source, self.node.line,
self.node.tagname)
def set_source(method):
"""Decorator that sets the `source` attribute of the returned object to
`self`"""
@wraps(method)
def method_wrapper(obj, *args, **kwargs):
result = method(obj, *args, **kwargs)
try:
result.source = obj
except AttributeError:
pass
return result
return method_wrapper
class BodyElement(CustomElement):
@set_source
def flowable(self):
flowable = self.build_flowable()
ids = self.get('ids')
if ids:
# assert len(ids) == 1
flowable.id = ids[0]
return flowable
def build_flowable(self):
raise NotImplementedError('tag: %s' % self.tag)
class BodySubElement(CustomElement):
def process(self):
raise NotImplementedError('tag: %s' % self.tag)
class InlineElement(CustomElement):
@property
def text(self):
return super().text.replace('\n', ' ')
@set_source
def styled_text(self, preserve_space=False):
return self.build_styled_text()
def build_styled_text(self):
raise NotImplementedError('tag: %s' % self.tag)
class GroupingElement(BodyElement):
style = None
grouped_flowables_class = StaticGroupedFlowables
def build_flowable(self, **kwargs):
flowables = [item.flowable() for item in self.getchildren()]
return self.grouped_flowables_class(flowables,
style=self.style, **kwargs)
from . import nodes
CustomElement.MAPPING = {cls.__name__.lower(): cls
for cls in all_subclasses(CustomElement)}
CustomElement.MAPPING['Text'] = nodes.Text
class ReStructuredTextParser(object):
def parse(self, filename):
with open(filename) as file:
doctree = publish_doctree(file.read(), source_path=filename)
return self.from_doctree(doctree)
@staticmethod
def replace_secondary_ids(tree):
id_aliases = {}
for node in tree.traverse():
try:
primary_id, *alias_ids = node.get('ids')
for alias_id in alias_ids:
id_aliases[alias_id] = primary_id
except (AttributeError, KeyError, ValueError):
pass
# replace alias IDs used in references with the corresponding primary ID
for node in tree.traverse():
try:
refid = node.get('refid')
if refid in id_aliases:
node.attributes['refid'] = id_aliases[refid]
except AttributeError:
pass
def from_doctree(self, doctree):
self.replace_secondary_ids(doctree)
return CustomElement.map_node(doctree.document)
|
Python
| 0
|
@@ -4324,26 +4324,33 @@
= node.
-get(
+attributes%5B
'ids'
-)
+%5D
%0A
|
65c7d07636ea972113b2263b995eaa7eef10c590
|
Remove short option string for cpu type
|
configs/common/Options.py
|
configs/common/Options.py
|
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
# system options
parser.add_option("-c", "--cpu-type", type="choice", default="atomic",
choices = ["atomic", "timing", "detailed", "inorder"],
help = "type of cpu to run with")
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--fastmem", action="store_true")
parser.add_option("--clock", action="store", type="string", default='2GHz')
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
# Run duration options
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T",
help="Stop after T ticks")
parser.add_option("--maxtime", type="float")
parser.add_option("-I", "--maxinsts", action="store", type="int", default=None,
help="Total number of instructions to simulate (default: run forever)")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="Parameter available in simulation with m5 initparam")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> will take checkpoint at cycle M and every N cycles thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("-s", "--standard-switch", action="store_true",
help="switch from timing CPU to Detailed CPU")
parser.add_option("-w", "--warmup", action="store", type="int",
help="if -s, then this is the warmup period. else, this is ignored",
default=5000000000)
parser.add_option("-p", "--prog-interval", type="int", help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treate value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
|
Python
| 0.000006
|
@@ -1607,14 +1607,8 @@
ion(
-%22-c%22,
%22--c
|
ef335362b5f601da41377984b8d9cc675d9ed669
|
Create ddns_sync.py
|
ddns_sync.py
|
ddns_sync.py
|
Python
| 0.000009
|
@@ -0,0 +1,1346 @@
+#!/usr/bin/env python3%0A%0Aimport boto3%0Afrom get import getjson%0A%0Aquery = %22http://evolutiva.mx/getip/%22%0Adata = getjson(query)%0A%0Aif not data:%0A exit()%0A%0Anew_ip = dict(data)%5B'ip'%5D%0Aold_ip = None%0A%0Ar53 = boto3.client('route53') #.connect_to_region('us-west-2')%0A%0Atry:%0A for res in r53.list_resource_record_sets(HostedZoneId='/hostedzone/Z2XLK91YNO8JY8')%5B'ResourceRecordSets'%5D:%0A if res%5B'Type'%5D == 'A' and res%5B'Name'%5D == 'cuchulainn.evolutiva.mx.':%0A old_ip = res%5B'ResourceRecords'%5D%5B0%5D%5B'Value'%5D%0Aexcept:%0A pass%0A%0Aif new_ip == old_ip:%0A print('Sin Cambios')%0Aelse:%0A # Ex: %7B'ResourceRecords': %5B%7B'Value': '187.207.0.253'%7D%5D, 'TTL': 300, 'Name': 'cuchulainn.evolutiva.mx.', 'Type': 'A'%7D%0A CB = %7B%0A 'Changes':%5B%7B%0A 'Action': 'UPSERT',%0A 'ResourceRecordSet': %7B%0A 'Name': 'cuchulainn.evolutiva.mx',%0A 'Type': 'A',%0A 'TTL':300,%0A 'ResourceRecords': %5B%0A %7B%0A 'Value': new_ip%0A %7D%0A %5D%0A %7D%0A %7D%5D%0A %7D%0A response = r53.change_resource_record_sets(HostedZoneId='/hostedzone/Z2XLK91YNO8JY8', ChangeBatch=CB)%0A print(response)%0A
|
|
8821fd5e4678dd8a2baf78d3ed068b652a10d1cd
|
Add initial games unit
|
units/games.py
|
units/games.py
|
Python
| 0
|
@@ -0,0 +1,481 @@
+%0Aimport random%0A%0Adef eightball():%0A%09responses = %5B%22It is certain%22, %22It is decidedly so%22, %22Without a doubt%22, %22Yes, definitely%22, %22You may rely on it%22, %22As I see it, yes%22, %22Most likely%22, %22Outlook good%22, %22Yes%22, %22Signs point to yes%22, %22Reply hazy try again%22, %22Ask again later%22, %22Better not tell you now%22, %22Cannot predict now%22, %22Concentrate and ask again%22, %22Don't count on it%22, %22My reply is no%22, %22My sources say no%22, %22Outlook not so good%22, %22Very doubtful%22%5D%0A%09return random.choice(responses)%0A%0A
|
|
0420aa1bf7bb8027379de52de783da87ce253f62
|
add batch upload script
|
uploadBatch.py
|
uploadBatch.py
|
Python
| 0.000001
|
@@ -0,0 +1,2183 @@
+# This is a python script for uploading batch data to Genotet server.%0A# The user may write a *.tsv file, with each line as:%0A# file_path data_name file_type description%0A# The command line would be:%0A# python uploadBatch.py username *.tsv%0A# And then enter your password for Genotet.%0A%0Afrom requests_toolbelt import MultipartEncoder%0Aimport requests%0Aimport sys%0Aimport getpass%0Aimport json%0A%0A%0Adef upload_file(file_path, data_name, file_type, description, cookies):%0A upload_url = 'http://localhost:3000/genotet/upload'%0A file_path_parts = file_path.split('%5C/')%0A file_name = file_path_parts%5Blen(file_path_parts) - 1%5D%0A params = MultipartEncoder(%0A fields=%7B'type': file_type,%0A 'name': data_name,%0A 'description': description,%0A 'username': 'anonymous',%0A 'file': (file_name, open(file_path, 'rb'), 'text/plain')%7D)%0A headers = %7B'Content-Type': params.content_type%7D%0A cookie = %7B'genotet-session': cookies%5B'genotet-session'%5D%7D%0A response = requests.post(upload_url, data=params, headers=headers, cookies=cookie)%0A print response.status_code%0A return True%0A%0A%0Adef auth(username, password):%0A auth_url = 'http://localhost:3000/genotet/user'%0A params = %7B%0A 'type': 'sign-in',%0A 'username': username,%0A 'password': password%0A %7D%0A params = %7B'data': json.dumps(params)%7D%0A response = requests.get(auth_url, params=params)%0A if response.status_code != 200:%0A return False%0A return response.cookies, True%0A %0A%0Adef main(argv):%0A if len(argv) %3C 3:%0A print 'input not enough'%0A return%0A username = argv%5B1%5D%0A password = getpass.getpass('Password:')%0A cookies, auth_result = auth(username, password)%0A if not auth_result:%0A print 'username/password not correct'%0A return%0A else:%0A print 'sign in success'%0A file_path = argv%5B2%5D%0A tsv_file = open(file_path, 'r')%0A for line in tsv_file:%0A parts = line.split(' ')%0A result = upload_file(parts%5B0%5D, parts%5B1%5D, parts%5B2%5D, parts%5B3%5D, cookies)%0A if not result:%0A print 'failed to upload ' + parts%5B0%5D%0A return%0A%0Aif __name__ == '__main__':%0A main(sys.argv)%0A
|
|
da373b924cf4dffe639e29543b5fc0e728be1ed9
|
Add orgviz.randomnodes
|
orgviz/randomnodes.py
|
orgviz/randomnodes.py
|
Python
| 0.001097
|
@@ -0,0 +1,1728 @@
+import random%0Aimport datetime%0A%0A%0Aclass RandomDatetime(object):%0A%0A def __init__(self, datewidth=7):%0A self.datewidth = datewidth%0A self.now = datetime.datetime.now()%0A%0A def datetime(self):%0A delta = datetime.timedelta(random.randrange(- self.datewidth,%0A self.datewidth + 1))%0A return self.now + delta%0A%0A def date(self):%0A return datetime.date(*self.datetime().timetuple()%5B:3%5D)%0A%0A%0Adef node(level, heading, scheduled=None, deadline=None, closed=None,%0A clock=None):%0A datestr = lambda x: x.strftime('%3C%25Y-%25m-%25d %25a%3E')%0A yield '*' * level%0A yield ' '%0A yield heading%0A yield '%5Cn'%0A if scheduled or deadline or closed:%0A yield ' ' * level%0A for (name, date) in %5B('CLOSED', closed),%0A ('DEADLINE', deadline),%0A ('SCHEDULED', scheduled)%5D:%0A if date:%0A yield ' '%0A yield name%0A yield ': '%0A yield datestr(date)%0A if scheduled or deadline or closed:%0A yield '%5Cn'%0A%0A%0Adef makeorg(num):%0A heading_pops = %5B'aaa', 'bbb', 'ccc'%5D%0A true_or_false = %5BTrue, False%5D%0A rd = RandomDatetime()%0A for i in range(num):%0A kwds = %7B%7D%0A if i == 0:%0A kwds%5B'level'%5D = 1%0A else:%0A kwds%5B'level'%5D = random.randrange(1, 4)%0A kwds%5B'heading'%5D = random.choice(heading_pops)%0A for sdc in %5B'scheduled', 'deadline', 'closed'%5D:%0A if random.choice(true_or_false):%0A kwds%5Bsdc%5D = rd.date()%0A for s in node(**kwds):%0A yield s%0A%0A%0Adef writeorg(file, *args, **kwds):%0A file.writelines(makeorg(*args, **kwds))%0A%0A%0Adef run(num):%0A import sys%0A writeorg(sys.stdout, num)%0A
|
|
a5bffdaa29d2f270a6f8781c34a2756a66a00a87
|
Bump version
|
flexget/_version.py
|
flexget/_version.py
|
"""
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.19.5.dev'
|
Python
| 0
|
@@ -439,12 +439,12 @@
'2.
-19.5
+20.0
.dev
|
6b5c46238975eb63b36f43eb79002946a744fd68
|
Prepare v2.10.47.dev
|
flexget/_version.py
|
flexget/_version.py
|
"""
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.10.46'
|
Python
| 0.000002
|
@@ -443,7 +443,11 @@
10.4
-6
+7.dev
'%0A
|
ea41e4cdc515ca8514c3613a1f474fb3627b7dda
|
Remove autosynth / tweaks for 'README.rst' / 'setup.py'. (#5957)
|
tasks/synth.py
|
tasks/synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
for version in ['v2beta2', 'v2beta3']:
library = gapic.py_library(
'tasks', version,
config_path=f'artman_cloudtasks_{version}.yaml')
s.copy(library, excludes=['docs/conf.py', 'docs/index.rst'])
# Fix unindentation of bullet list second line
s.replace(
f'google/cloud/tasks_{version}/gapic/cloud_tasks_client.py',
'( \* .*\n )([^\s*])',
'\g<1> \g<2>')
s.replace(
f'google/cloud/tasks_{version}/gapic/cloud_tasks_client.py',
'(Google IAM .*?_) ',
'\g<1>_ ')
# Issues with Anonymous ('__') links. Change to named.
s.replace(
f"google/cloud/tasks_{version}/proto/*.py",
">`__",
">`_")
# Issue in v2beta2
s.replace(
f'google/cloud/tasks_v2beta2/gapic/cloud_tasks_client.py',
r'(Sample filter \\"app_engine_http_target: )\*\\".',
'\g<1>\\*\\".')
# Wrapped link fails due to space in link (v2beta2)
s.replace(
f"google/cloud/tasks_v2beta2/proto/queue_pb2.py",
'(uests in queue.yaml/xml) <\n\s+',
'\g<1>\n <')
# Set Release Status
release_status = 'Development Status :: 3 - Alpha'
s.replace('setup.py',
'(release_status = )(.*)$',
f"\\1'{release_status}'")
# Add Dependencies
s.replace('setup.py',
'dependencies = \[\n*(^.*,\n)+',
"\\g<0> 'grpc-google-iam-v1<0.12dev,>=0.11.4',\n")
# Fix the enable API link
s.replace(
'README.rst',
r'.. _Enable the Cloud Tasks API.: https://cloud.google.com/tasks',
'.. _Enable the Cloud Tasks API.: https://console.cloud.google.com/apis/'
'library/cloudtasks.googleapis.com')
|
Python
| 0
|
@@ -809,16 +809,107 @@
plates()
+%0Aexcludes = %5B%0A 'README.rst',%0A 'setup.py',%0A 'docs/conf.py',%0A 'docs/index.rst',%0A%5D
%0A%0Afor ve
@@ -1090,42 +1090,16 @@
des=
-%5B'docs/conf.py', 'docs/index.rst'%5D
+excludes
)%0A%0A
@@ -1953,571 +1953,4 @@
%3C')%0A
-%0A# Set Release Status%0Arelease_status = 'Development Status :: 3 - Alpha'%0As.replace('setup.py',%0A '(release_status = )(.*)$',%0A f%22%5C%5C1'%7Brelease_status%7D'%22)%0A%0A# Add Dependencies%0As.replace('setup.py',%0A 'dependencies = %5C%5B%5Cn*(%5E.*,%5Cn)+',%0A %22%5C%5Cg%3C0%3E 'grpc-google-iam-v1%3C0.12dev,%3E=0.11.4',%5Cn%22)%0A%0A# Fix the enable API link%0As.replace(%0A 'README.rst',%0A r'.. _Enable the Cloud Tasks API.: https://cloud.google.com/tasks',%0A '.. _Enable the Cloud Tasks API.: https://console.cloud.google.com/apis/'%0A 'library/cloudtasks.googleapis.com')%0A
|
761ec2bd6492b041eb658ee836a63ffb877469d5
|
Add management command to load all version fixtures
|
cbv/management/commands/load_all_django_versions.py
|
cbv/management/commands/load_all_django_versions.py
|
Python
| 0.000003
|
@@ -0,0 +1,834 @@
+import os%0Aimport re%0A%0Afrom django.conf import settings%0Afrom django.core.management import call_command, BaseCommand%0A%0A%0Aclass Command(BaseCommand):%0A %22%22%22Load the Django project fixtures and all version fixtures%22%22%22%0A%0A def handle(self, **options):%0A fixtures_dir = os.path.join(settings.DIRNAME, 'cbv', 'fixtures')%0A self.stdout.write('Loading project.json')%0A call_command('loaddata', 'cbv/fixtures/project.json')%0A version_fixtures = %5Bre.match(r'((?:%5Cd+%5C.)%7B2,3%7Djson)', filename) for filename in os.listdir(fixtures_dir)%5D%0A for match in version_fixtures:%0A try:%0A fixture = match.group()%0A except AttributeError:%0A continue%0A self.stdout.write('Loading %7B%7D'.format(fixture))%0A call_command('loaddata', 'cbv/fixtures/%7B%7D'.format(fixture))%0A
|
|
5fc15bdf5bc1582764319a326fc383384963bffe
|
Add `argumentCollection` suggestion for function calls, closes #29
|
src/basecompletions/basecompletions.py
|
src/basecompletions/basecompletions.py
|
import sublime
import json
from ..completions import CompletionList
from ..inline_documentation import Documentation
from .. import utils
COMPLETION_FILES = ["cfml_tags","cfml_functions","cfml_member_functions"]
DOC_STYLES = {
"side_color": "#4C9BB0",
"header_color": "#306B7B",
"header_bg_color": "#E4EEF1",
"text_color": "#272B33"
}
completions = {}
cgi = {}
def get_tags(view, prefix, position, info):
completion_list = completions["cfml_tags"]
return CompletionList(completion_list, 0, False)
def get_tag_attributes(view, prefix, position, info):
if not info["tag_name"]:
return None
if info["tag_in_script"] and not info["tag_name"].startswith("cf"):
info["tag_name"] = "cf" + info["tag_name"]
# tag attribute value completions
if info["tag_attribute_name"]:
if (info["tag_name"] in completions["cfml_tag_attribute_values"]
and info["tag_attribute_name"] in completions["cfml_tag_attribute_values"][info["tag_name"]]):
completion_list = completions["cfml_tag_attribute_values"][info["tag_name"]][info["tag_attribute_name"]]
return CompletionList(completion_list, 0, False)
return None
# tag attribute completions
if info["previous_char"] in [" ", "(", "\t", "\n"]:
completion_list = completions["cfml_tag_attributes"].get(info["tag_name"], None)
if completion_list:
return CompletionList(completion_list, 0, False)
return None
def get_script_completions(view, prefix, position, info):
completion_list = []
completion_list.extend(completions["cfml_functions"])
completion_list.extend(completions["cfml_cf_tags_in_script"])
completion_list.extend(completions["cfml_tags_in_script"])
return CompletionList(completion_list, 0, False)
def get_dot_completions(view, prefix, position, info):
if len(info["dot_context"]) == 1 and info["dot_context"][0].name == "cgi":
return CompletionList(completions["cgi"], 1, True)
completion_list = completions["cfml_member_functions"]
return CompletionList(completion_list, 0, False)
def get_inline_documentation(view, position):
if view.match_selector(position, "meta.property.constant"):
word = view.word(position)
dot_context = utils.get_dot_context(view, word.begin() - 1)
if len(dot_context) == 1 and dot_context[0].name == "cgi":
key = "cgi." + view.substr(word).lower()
if key in cgi:
doc = dict(DOC_STYLES)
doc.update(cgi[key])
return Documentation(doc, None, 1)
return None
def load_completions():
global completions, cgi
completions_data = {filename: load_json_data(filename) for filename in COMPLETION_FILES}
# tags
completions["cfml_tags"] = []
completions["cfml_tags_in_script"] = []
completions["cfml_cf_tags_in_script"] = []
completions["cfml_tag_attributes"] = {}
completions["cfml_tag_attribute_values"] = {}
for tag_name in sorted(completions_data["cfml_tags"].keys()):
if isinstance(completions_data["cfml_tags"][tag_name], list):
completions_data["cfml_tags"][tag_name] = {"attributes": completions_data["cfml_tags"][tag_name], "attribute_values": {}}
tag_attributes = completions_data["cfml_tags"][tag_name]["attributes"]
completions["cfml_tags"].append(make_tag_completion(tag_name, tag_attributes[0]))
completions["cfml_tags_in_script"].append(make_tag_completion(tag_name[2:], tag_attributes[0]))
completions["cfml_cf_tags_in_script"].append(make_cf_script_tag_completion(tag_name, tag_attributes[0]))
completions["cfml_tag_attributes"][tag_name] = [(a + '\trequired', a + '="$1"') for a in tag_attributes[0]]
completions["cfml_tag_attributes"][tag_name].extend([(a + '\toptional', a + '="$1"') for a in tag_attributes[1]])
# attribute values
tag_attribute_values = completions_data["cfml_tags"][tag_name]["attribute_values"]
completions["cfml_tag_attribute_values"][tag_name] = {}
for attribute_name in sorted(tag_attribute_values.keys()):
completions["cfml_tag_attribute_values"][tag_name][attribute_name] = [(v + '\t' + attribute_name, v) for v in tag_attribute_values[attribute_name]]
# functions
completions["cfml_functions"] = [(funct + '\tfn (cfml)', funct + completions_data["cfml_functions"][funct]) for funct in sorted(completions_data["cfml_functions"].keys())]
# member functions
mem_func_comp = []
for member_function_type in sorted(completions_data["cfml_member_functions"].keys()):
for funct in sorted(completions_data["cfml_member_functions"][member_function_type].keys()):
mem_func_comp.append( (funct + '\t' + member_function_type + '.fn (cfml)', funct + completions_data["cfml_member_functions"][member_function_type][funct]))
completions["cfml_member_functions"] = mem_func_comp
# CGI scope
cgi = load_json_data("cgi")
completions["cgi"] = [(scope_variable.split(".").pop().upper() + "\tCGI", scope_variable.split(".").pop().upper()) for scope_variable in sorted(cgi.keys())]
def load_json_data(filename):
json_data = sublime.load_resource("Packages/" + utils.get_plugin_name() + "/src/basecompletions/json/" + filename + ".json")
return json.loads(json_data)
def make_tag_completion(tag, required_attrs):
attrs = ''
for index, attr in enumerate(required_attrs, 1):
attrs += ' ' + attr + '="$' + str(index) + '"'
return (tag + '\ttag (cfml)', tag + attrs)
def make_cf_script_tag_completion(tag, required_attrs):
attrs = []
for index, attr in enumerate(required_attrs, 1):
attrs.append(' ' + attr + '="$' + str(index) + '"')
return (tag + '\ttag (cfml)', tag + "(" + ",".join(attrs) + "$0 )")
|
Python
| 0
|
@@ -1449,24 +1449,248 @@
n_list = %5B%5D%0A
+%0A%09if view.match_selector(position, %22meta.function-call.parameters.cfml,meta.function-call.parameters.method.cfml%22):%0A%09%09completion_list.append((%22argumentCollection%5Ctparameter struct%22, %22argumentCollection = $%7B1:parameters%7D%22))%0A%0A
%09completion_
|
9e2fe5de082c736ec44dbf150d8350a0e164d2ae
|
Create beta_which_operator.py
|
Solutions/beta/beta_which_operator.py
|
Solutions/beta/beta_which_operator.py
|
Python
| 0.000065
|
@@ -0,0 +1,173 @@
+def whichOper(a, b, oper):%0A return %7B'a':lambda x,y: x+y,%0A 's':lambda x,y: x-y,%0A 'm':lambda x,y: x*y,%0A 'd':lambda x,y: x/y%7D%5Boper%5B0%5D%5D(a,b)%0A
|
|
e3dcc7ef44bbc8772fd5ad4f0941e5d98bf1ccdd
|
add migration
|
scholarly_citation_finder/apps/tasks/migrations/0003_auto_20160224_1349.py
|
scholarly_citation_finder/apps/tasks/migrations/0003_auto_20160224_1349.py
|
Python
| 0.000001
|
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('tasks', '0002_task_starttime'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='task',%0A name='taskmeta',%0A ),%0A migrations.AddField(%0A model_name='task',%0A name='taskmeta_id',%0A field=models.CharField(default='', max_length=100),%0A preserve_default=False,%0A ),%0A %5D%0A
|
|
d60ffedf64269b85d40eda4caa01b548d27bd2a5
|
clean up imports
|
tabular_predDB/python_utils/convergence_test_utils.py
|
tabular_predDB/python_utils/convergence_test_utils.py
|
import numpy, pdb
import tabular_predDB.python_utils.data_utils as du
import tabular_predDB.python_utils.xnet_utils as xu
import tabular_predDB.LocalEngine as LE
import tabular_predDB.cython_code.State as State
from sklearn import metrics
def truth_from_permute_indices(data_inverse_permutation_indices, num_rows,num_cols,num_views, num_clusters):
# We assume num_rows is divisible by num_clusters and num_cols is divisible by num_views
num_cols_per_view = num_cols/num_views
view_assignments = []
for viewindx in range(num_views):
view_assignments = view_assignments + [viewindx]*num_cols_per_view
num_rows_per_cluster = num_rows/num_clusters
reference_list = []
for clusterindx in range(num_clusters):
reference_list = reference_list + [clusterindx]*num_rows_per_cluster
X_D_truth = []
for viewindx in range(num_views):
X_D_truth.append([a for (b,a) in sorted(zip(data_inverse_permutation_indices[viewindx], reference_list))])
return view_assignments, X_D_truth
def ARI_CrossCat(Xc, Xrv, XRc, XRrv):
''' Adjusted Rand Index (ARI) calculation for a CrossCat clustered table
To calculate ARI based on the CrossCat partition, each cell in the
table is considered as an instance to be assigned to a cluster. A cluster
is defined by both the view index AND the category index. In other words,
if, and only if, two cells, regardless of which columns and rows they belong
to, are lumped into the same view and category, the two cells are considered
to be in the same cluster.
For a table of size Nrow x Ncol
Xc: (1 x Ncol) array of view assignment for each column.
Note: It is assumed that the view indices are consecutive integers
starting from 0. Hence, the number of views is equal to highest
view index plus 1.
Xrv: (Nrow x Nview) array where each row is the assignmennt of categories for the
corresponding row in the data table. The i-th element in a row
corresponds to the category assignment of the i-th view of that row.
XRc and XRrv have the same format as Xr and Xrv respectively.
The ARI index is calculated from the comparison of the table clustering
define by (XRc, XRrv) and (Xc, Xrv).
'''
Xrv = Xrv.T
XRrv = XRrv.T
# Find the highest category index of all views
max_cat_index = numpy.max(Xrv)
# re-assign category indices so that they have different values in
# different views
Xrv = Xrv + numpy.arange(0,Xrv.shape[1])*(max_cat_index+1)
# similarly for the reference partition
max_cat_index = numpy.max(XRrv)
XRrv = XRrv + numpy.arange(0,XRrv.shape[1])*(max_cat_index+1)
# Table clustering assignment for the first partition
CellClusterAssgn = numpy.zeros((Xrv.shape[0], Xc.size))
for icol in range(Xc.size):
CellClusterAssgn[:,icol]=Xrv[:,Xc[icol]]
# Flatten the table to a 1-D array compatible with the ARI function
CellClusterAssgn = CellClusterAssgn.reshape(CellClusterAssgn.size)
# Table clustering assignment for the second partition
RefCellClusterAssgn = numpy.zeros((Xrv.shape[0], Xc.size))
for icol in range(Xc.size):
RefCellClusterAssgn[:,icol]=XRrv[:,XRc[icol]]
# Flatten the table
RefCellClusterAssgn = RefCellClusterAssgn.reshape(RefCellClusterAssgn.size)
# Compare the two partitions using ARI
ARI = metrics.adjusted_rand_score(RefCellClusterAssgn, CellClusterAssgn)
ARI_viewonly = metrics.adjusted_rand_score(Xc, XRc)
return ARI, ARI_viewonly
def multi_chain_ARI(X_L_list, X_D_List, view_assignment_truth, X_D_truth, return_list=False):
num_chains = len(X_L_list)
ari_table = numpy.zeros(num_chains)
ari_views = numpy.zeros(num_chains)
for chainindx in range(num_chains):
view_assignments = X_L_list[chainindx]['column_partition']['assignments']
curr_ari_table, curr_ari_views = ARI_CrossCat(numpy.asarray(view_assignments), numpy.asarray(X_D_List[chainindx]), numpy.asarray(view_assignment_truth), numpy.asarray(X_D_truth))
ari_table[chainindx] = curr_ari_table
ari_views[chainindx] = curr_ari_views
ari_table_mean = numpy.mean(ari_table)
ari_views_mean = numpy.mean(ari_views)
if return_list:
return ari_table, ari_views
else:
return ari_table_mean, ari_views_mean
|
Python
| 0.000001
|
@@ -9,207 +9,8 @@
umpy
-, pdb%0A%0Aimport tabular_predDB.python_utils.data_utils as du%0Aimport tabular_predDB.python_utils.xnet_utils as xu%0Aimport tabular_predDB.LocalEngine as LE%0Aimport tabular_predDB.cython_code.State as State
%0Afro
@@ -35,16 +35,17 @@
etrics%0A%0A
+%0A
def trut
|
0711b2eee10e5e48186d78144697a35640a33cb1
|
Add a passthrough manager
|
mysql_fuzzycount/managers.py
|
mysql_fuzzycount/managers.py
|
Python
| 0.000002
|
@@ -0,0 +1,190 @@
+from model_utils.managers import PassThroughManager%0A%0Afrom mysql_fuzzycount.queryset import FuzzyCountQuerySet%0A%0A%0AFuzzyCountManager = PassThroughManager.for_queryset_class(FuzzyCountQuerySet)%0A
|
|
56b2897655940962a8cfa06cc8a9fcfe22262412
|
Create config_local.py
|
pgadmin4/config_local.py
|
pgadmin4/config_local.py
|
Python
| 0.000003
|
@@ -0,0 +1,2411 @@
+# -*- coding: utf-8 -*-%0A%0A##########################################################################%0A#%0A# pgAdmin 4 - PostgreSQL Tools%0A#%0A# Copyright (C) 2013 - 2016, The pgAdmin Development Team%0A# This software is released under the PostgreSQL Licence%0A#%0A# config_local.py - Core application configuration settings%0A#%0A##########################################################################%0A%0Aimport os%0Afrom distutils.util import strtobool%0Afrom logging import *%0A%0A# Data directory for storage of config settings etc. This shouldn't normally%0A# need to be changed - it's here as various other settings depend on it.%0ADATA_DIR = os.getenv('PG_ADMIN_DATA_DIR', '/pgadmin/')%0A%0A%0A##########################################################################%0A# Log settings%0A##########################################################################%0A%0ADEBUG = strtobool(os.getenv('DEBUG', %22False%22))%0A%0A# Log to stdout so that logging is handled by Docker logging drivers%0ALOG_FILE = '/dev/stdout'%0A%0A##########################################################################%0A# Server settings%0A##########################################################################%0A%0ASERVER_MODE = False%0A%0ADEFAULT_SERVER = '0.0.0.0'%0ADEFAULT_SERVER_PORT = int(os.getenv('PG_ADMIN_PORT', 5050))%0A%0A%0A##########################################################################%0A# User account and settings storage%0A##########################################################################%0A%0ASQLITE_PATH = os.path.join(DATA_DIR, 'config', 'pgadmin4.db')%0A%0ASESSION_DB_PATH = '/dev/shm/pgAdmin4_session'%0A%0A##########################################################################%0A# Upgrade checks%0A##########################################################################%0A%0A# Disable upgrade checks; container should be immutable%0AUPGRADE_CHECK_ENABLED = False%0A%0A##########################################################################%0A# Storage Manager storage url config settings%0A# If user sets STORAGE_DIR to empty it will show all volumes if platform%0A# is Windows, '/' if it is Linux, Mac or any other unix type system.%0A%0A# For example:%0A# 1. STORAGE_DIR = get_drive(%22C%22) or get_drive() # return C:/ by default%0A# where C can be any drive character such as %22D%22, %22E%22, %22G%22 etc%0A# 2. Set path manually like%0A# STORAGE_DIR = %22/path/to/directory/%22%0A##########################################################################%0ASTORAGE_DIR = os.path.join(DATA_DIR, 'storage')%0A
|
|
1de5b9746c33add889837e5e0feaf1796fb00eb8
|
add script to generate breakseq index
|
scripts/breakseq2_gen_bplib.py
|
scripts/breakseq2_gen_bplib.py
|
Python
| 0.000001
|
@@ -0,0 +1,782 @@
+#!/usr/bin/env python%0A%0Aimport argparse%0Afrom breakseq2 import breakseq_index, _version%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser(description=%22Generate breakpoint library FASTA from breakpoint GFF%22,%0A formatter_class=argparse.ArgumentDefaultsHelpFormatter)%0A breakseq_index.add_options(parser)%0A parser.add_argument(%22--reference%22, help=%22Reference FASTA%22, required=True)%0A parser.add_argument(%22--output%22, help=%22Output FASTA to generate. Leave unspecified for stdout%22)%0A parser.add_argument('--version', action='version', version='%25(prog)s ' + _version.__version__)%0A args = parser.parse_args()%0A%0A breakseq_index.generate_bplib(args.bplib_gff, args.reference, args.output, args.junction_length, args.format_version)%0A
|
|
33f455cba56c7ae557cfe9f5494b6a045c68f1d2
|
add simple hello world
|
examples/hello.py
|
examples/hello.py
|
Python
| 0.999998
|
@@ -0,0 +1,220 @@
+from flower import run, schedule, tasklet%0A%0Adef say(s):%0A for i in range(5):%0A schedule()%0A print(s)%0A%0Adef main():%0A tasklet(say)(%22world%22)%0A say(%22hello%22)%0A%0A run()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
152dafeeb35647dbcfb25549f7f1e73a397428a0
|
Add urls for the demo
|
demo_zinnia_bitly/urls.py
|
demo_zinnia_bitly/urls.py
|
Python
| 0
|
@@ -0,0 +1,1577 @@
+%22%22%22Urls for the zinnia-bitly demo%22%22%22%0Afrom django.conf import settings%0Afrom django.contrib import admin%0Afrom django.conf.urls import url%0Afrom django.conf.urls import include%0Afrom django.conf.urls import patterns%0Afrom django.views.generic.base import RedirectView%0A%0Afrom zinnia.sitemaps import TagSitemap%0Afrom zinnia.sitemaps import EntrySitemap%0Afrom zinnia.sitemaps import CategorySitemap%0Afrom zinnia.sitemaps import AuthorSitemap%0A%0Aadmin.autodiscover()%0A%0Aurlpatterns = patterns(%0A '',%0A url(r'%5E$', RedirectView.as_view(url='/blog/')),%0A url(r'%5Eblog/', include('zinnia.urls', namespace='zinnia')),%0A url(r'%5Ecomments/', include('django.contrib.comments.urls')),%0A url(r'%5Ei18n/', include('django.conf.urls.i18n')),%0A url(r'%5Eadmin/', include(admin.site.urls)),%0A)%0A%0Asitemaps = %7B%0A 'tags': TagSitemap,%0A 'blog': EntrySitemap,%0A 'authors': AuthorSitemap,%0A 'categories': CategorySitemap%0A%7D%0A%0Aurlpatterns += patterns(%0A 'django.contrib.sitemaps.views',%0A url(r'%5Esitemap.xml$', 'index',%0A %7B'sitemaps': sitemaps%7D),%0A url(r'%5Esitemap-(?P%3Csection%3E.+)%5C.xml$', 'sitemap',%0A %7B'sitemaps': sitemaps%7D),%0A)%0A%0Aurlpatterns += patterns(%0A '',%0A url(r'%5E400/$', 'django.views.defaults.bad_request'),%0A url(r'%5E403/$', 'django.views.defaults.permission_denied'),%0A url(r'%5E404/$', 'django.views.defaults.page_not_found'),%0A url(r'%5E500/$', 'django.views.defaults.server_error'),%0A)%0A%0Aif settings.DEBUG:%0A urlpatterns += patterns(%0A '',%0A url(r'%5Emedia/(?P%3Cpath%3E.*)$', 'django.views.static.serve',%0A %7B'document_root': settings.MEDIA_ROOT%7D)%0A )%0A
|
|
64b14b64c00bc6885acc1ff4d9b76898f66a8a86
|
add new package (#15737)
|
var/spack/repos/builtin/packages/opendx/package.py
|
var/spack/repos/builtin/packages/opendx/package.py
|
Python
| 0
|
@@ -0,0 +1,602 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0A%0Aclass Opendx(AutotoolsPackage):%0A %22%22%22Open Visualization Data Explorer.%22%22%22%0A%0A homepage = %22https://github.com/Mwoolsey/OpenDX%22%0A git = %22https://github.com/Mwoolsey/OpenDX.git%22%0A%0A version('master', branch='master')%0A%0A depends_on('motif') # lesstif also works, but exhibits odd behaviors%0A depends_on('gl')%0A%0A @run_before('autoreconf')%0A def distclean(self):%0A make('distclean')%0A
|
|
7c56318cb545011e64e3a491058054ad3d7cd9c0
|
Create new package. (#5987)
|
var/spack/repos/builtin/packages/r-aims/package.py
|
var/spack/repos/builtin/packages/r-aims/package.py
|
Python
| 0
|
@@ -0,0 +1,1979 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RAims(RPackage):%0A %22%22%22This package contains the AIMS implementation. It contains%0A necessary functions to assign the five intrinsic molecular%0A subtypes (Luminal A, Luminal B, Her2-enriched, Basal-like,%0A Normal-like). Assignments could be done on individual samples%0A as well as on dataset of gene expression data.%22%22%22%0A%0A homepage = %22http://bioconductor.org/packages/AIMS/%22%0A url = %22https://git.bioconductor.org/packages/AIMS%22%0A%0A version('1.8.0', git='https://git.bioconductor.org/packages/AIMS', commit='86b866c20e191047492c51b43e3f73082c3f8357')%0A%0A depends_on('r@3.4.0:3.4.9', when='@1.8.0')%0A depends_on('r-e1071', type=('build', 'run'))%0A depends_on('r-biobase', type=('build', 'run'))%0A
|
|
eb429be1fdc7335bec5ba036fcece309778b23f0
|
Add an example that uses filterReactions AND pdep at the same time
|
examples/rmg/heptane-filterReactions/input.py
|
examples/rmg/heptane-filterReactions/input.py
|
Python
| 0
|
@@ -0,0 +1,1386 @@
+# Data sources%0Adatabase(%0A thermoLibraries = %5B'primaryThermoLibrary'%5D,%0A reactionLibraries = %5B%5D,%0A seedMechanisms = %5B%5D,%0A kineticsDepositories = %5B'training'%5D, %0A kineticsFamilies = 'default',%0A kineticsEstimator = 'rate rules',%0A)%0A%0A# Constraints on generated species%0AgeneratedSpeciesConstraints(%0A maximumCarbonAtoms = 7,%0A)%0A%0A# List of species%0Aspecies(%0A label='n-heptane',%0A structure=SMILES(%22CCCCCCC%22),%0A)%0A%0Aspecies(%0A label='Ar',%0A reactive=False,%0A structure=SMILES(%22%5BAr%5D%22),%0A)%0A%0A%0AsimpleReactor(%0A temperature=(1600,'K'),%0A pressure=(400,'Pa'),%0A initialMoleFractions=%7B%0A %22n-heptane%22: 0.02,%0A %22Ar%22: 0.98,%0A %7D,%0A terminationConversion=%7B%0A 'n-heptane': 0.99,%0A %7D,%0A terminationTime=(1e6,'s'),%0A)%0A%0AsimpleReactor(%0A temperature=(2000,'K'),%0A pressure=(400,'Pa'),%0A initialMoleFractions=%7B%0A %22n-heptane%22: 0.02,%0A %22Ar%22: 0.98,%0A %7D,%0A terminationConversion=%7B%0A 'n-heptane': 0.99,%0A %7D,%0A terminationTime=(1e6,'s'),%0A)%0A%0Asimulator(%0A atol=1e-16,%0A rtol=1e-8,%0A)%0A%0Amodel(%0A toleranceMoveToCore=0.01,%0A toleranceInterruptSimulation=0.01,%0A filterReactions=True,%0A)%0A%0ApressureDependence(%0A method='modified strong collision',%0A maximumGrainSize=(0.5,'kcal/mol'),%0A minimumNumberOfGrains=250,%0A temperatures=(300,3000,'K',8),%0A pressures=(0.001,100,'bar',5),%0A interpolation=('Chebyshev', 6, 4),%0A)%0A%0A%0A
|
|
817f0e77802e09f1c101aa3773ad89341120e0f1
|
Fix the crash when there is no initial settings created
|
accounting/apps/connect/steps.py
|
accounting/apps/connect/steps.py
|
import logging
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from accounting.apps.books.utils import organization_manager
from accounting.apps.reports.models import BusinessSettings
logger = logging.getLogger(__name__)
class StepOptions(object):
"""
Meta class options for a `BaseStep` subclass
"""
def __init__(self, meta):
self.name = getattr(meta, 'name', None)
assert(isinstance(self.name, (str)),
'`name` must be a string instance')
self.description = getattr(meta, 'description', "")
assert(isinstance(self.description, (str)),
'`description` must be a string instance')
class BaseStep(object):
"""
Abstract class to subclass to create a getting started step
"""
user = None
_completion = None
_options_class = StepOptions
class StepOptions:
name = "<Abstract>"
description = None
def __init__(self, user):
super().__init__()
self.opts = self._options_class(getattr(self, 'StepOptions', None))
self.user = user
def completed(self, request):
if self._completion is None:
self._completion = self.check_completion(request)
return self._completion
def is_completed(self):
"""pre computed value, to be called in templates"""
if self._completion is None:
logger.error("`completed` needs to be run before using "
"this method")
return False
return self._completion
def check_completion(self, request):
"""
Implement the logic of the step
and returns a boolean
"""
raise NotImplementedError
def get_action_url(self):
"""Returns the url to complete the step"""
pass
class CreateOrganizationStep(BaseStep):
"""
At least one organization has been created
"""
class StepOptions:
name = "Create an Organization"
description = "the organization is the foundation of the accounting " \
"system, tell Accountant-x more about it"
def check_completion(self, request):
orgas = organization_manager.get_user_organizations(request.user)
count = orgas.count()
return count > 0
def get_action_url(self):
return reverse('books:organization-create')
class ConfigureTaxRatesStep(BaseStep):
"""
At least one tax rate has been added (even if the rate is 0)
"""
class StepOptions:
name = "Configure Tax Rates"
description = "even if you are not subject to tax collecting rules " \
"you should create a 0% tax entry"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
count = orga.tax_rates.all().count()
return count > 0
def get_action_url(self):
return reverse('books:tax_rate-create')
class ConfigureBusinessSettingsStep(BaseStep):
"""
The associated business settings has been completed
"""
class StepOptions:
name = "Configure Business Settings"
description = "for now there is not much thing, but please create it"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
try:
settings = orga.business_settings
settings.full_clean()
except BusinessSettings.DoesNotExist:
return False
except ValidationError:
return False
return True
def get_action_url(self):
return reverse('reports:settings-business')
class ConfigureFinancialSettingsStep(BaseStep):
class StepOptions:
name = "Configure Financial Settings"
description = "tell Accountant-x what is your financial rulling rule"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
settings = orga.financial_settings
try:
settings.full_clean()
except ValidationError:
return False
return True
def get_action_url(self):
return reverse('reports:settings-financial')
class AddEmployeesStep(BaseStep):
class StepOptions:
name = "Add Employees"
description = "add at least one *employee*, even if you are giving " \
"yourself a salary that follows the profits"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
count = orga.employees.all().count()
return count > 0
def get_action_url(self):
return reverse('people:employee-create')
class ConfigurePayRunSettingsStep(BaseStep):
class StepOptions:
name = "Configure Pay Run Settings"
description = "tell to Accountant-x how you distribute salaries"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
settings = orga.payrun_settings
try:
settings.full_clean()
except ValidationError:
return False
return True
def get_action_url(self):
return reverse('reports:settings-payrun')
class AddFirstClientStep(BaseStep):
class StepOptions:
name = "Add the first Client"
description = "close to the first invoice"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
count = orga.clients.all().count()
return count > 0
def get_action_url(self):
return reverse('people:client-create')
class AddFirstInvoiceStep(BaseStep):
class StepOptions:
name = "Add the first Invoice"
description = "finally create it !"
def check_completion(self, request):
orga = organization_manager.get_selected_organization(request)
if orga is None:
return False
count = orga.invoices.all().count()
return count > 0
def get_action_url(self):
return reverse('books:invoice-create')
|
Python
| 0.000009
|
@@ -210,16 +210,22 @@
import
+(%0A
Business
@@ -232,16 +232,60 @@
Settings
+,%0A FinancialSettings,%0A PayRunSettings)
%0A%0Alogger
@@ -4193,32 +4193,49 @@
return False%0A
+ try:%0A
settings
@@ -4265,33 +4265,24 @@
ngs%0A
- try:%0A
sett
@@ -4265,36 +4265,32 @@
ngs%0A
-
settings.full_cl
@@ -4287,32 +4287,104 @@
gs.full_clean()%0A
+ except FinancialSettings.DoesNotExist:%0A return False%0A
except V
@@ -5421,32 +5421,49 @@
return False%0A
+ try:%0A
settings
@@ -5491,36 +5491,23 @@
gs%0A
- try:%0A
-
settings
@@ -5512,32 +5512,101 @@
gs.full_clean()%0A
+ except PayRunSettings.DoesNotExist:%0A return False%0A
except V
|
e5a14054e1e9e95b04baf5ec7c92a2fdde51703b
|
Update __openerp__.py
|
scanterra_modifcations/__openerp__.py
|
scanterra_modifcations/__openerp__.py
|
# -*- coding: utf-8 -*-
{
'name': 'Scanterra Modifications',
'version': '8.0.1.0.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Sales, Product, Category, Clasification',
'description': """
Scanterra Modifications
=======================
* Restringir que las tareas creadas por un usuario, no las pueda eliminar otro usuario. Es decir que cada usuario solo pueda eliminar las tareas creadas por si mismo.
* Que se registre automáticamente como una nota cuando se cambia alguna de los siguientes campos de la tarea (Resumen de la tarea (titulo), fecha limite, horas iniciales planificadas, fecha de inicio y fecha final) (actualmente solo registra en forma automática los cambios de estado).
* Ocultar el campo probabilidad en crm lead tree view
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'depends': [
'project',
'project_issue',
'crm',
],
'data': [
'security/project_security.xml',
'crm_lead_view.xml',
'phonecall_view.xml',
'project_task_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000024
|
@@ -80,17 +80,17 @@
'8.0.1.
-0
+1
.0',%0A
|
559ba309a72d277c3b5a78614889d19b8866b7ea
|
add parallel by Alexandre Gramford (mne-python)
|
scikits/statsmodels/tools/parallel.py
|
scikits/statsmodels/tools/parallel.py
|
Python
| 0
|
@@ -0,0 +1,1281 @@
+%22%22%22Parralle util function%0A%22%22%22%0A%0A# Author: Alexandre Gramfort %3Cgramfort@nmr.mgh.harvard.edu%3E%0A#%0A# License: Simplified BSD%0A%0A%0Adef parallel_func(func, n_jobs, verbose=5):%0A %22%22%22Return parallel instance with delayed function%0A%0A Util function to use joblib only if available%0A%0A Parameters%0A ----------%0A func: callable%0A A function%0A n_jobs: int%0A Number of jobs to run in parallel%0A verbose: int%0A Verbosity level%0A%0A Returns%0A -------%0A parallel: instance of joblib.Parallel or list%0A The parallel object%0A my_func: callable%0A func if not parallel or delayed(func)%0A n_jobs: int%0A Number of jobs %3E= 0%0A %22%22%22%0A try:%0A from sklearn.externals.joblib import Parallel, delayed%0A parallel = Parallel(n_jobs, verbose=verbose)%0A my_func = delayed(func)%0A%0A if n_jobs == -1:%0A try:%0A import multiprocessing%0A n_jobs = multiprocessing.cpu_count()%0A except ImportError:%0A print %22multiprocessing not installed. Cannot run in parallel.%22%0A n_jobs = 1%0A%0A except ImportError:%0A print %22joblib not installed. Cannot run in parallel.%22%0A n_jobs = 1%0A my_func = func%0A parallel = list%0A return parallel, my_func, n_jobs%0A
|
|
2c6be657e0024a1a2e162a6a508d2d5716736121
|
add wrapper class for adiabatic approximation
|
galpy/actionAngle_src/actionAngleAdiabatic.py
|
galpy/actionAngle_src/actionAngleAdiabatic.py
|
Python
| 0
|
@@ -0,0 +1,2500 @@
+###############################################################################%0A# actionAngle: a Python module to calculate actions, angles, and frequencies%0A#%0A# class: actionAngleAdiabatic%0A#%0A# wrapper around actionAngleAxi (adiabatic approximation) to do%0A# this for any (x,v)%0A#%0A# methods:%0A# JR%0A# Jphi%0A# Jz%0A# angleR%0A# anglez%0A# TR%0A# Tphi%0A# Tz%0A# I%0A# calcRapRperi%0A# calcEL%0A###############################################################################%0Aimport math as m%0Aimport numpy as nu%0Afrom actionAngleAxi import actionAngleAxi%0Afrom actionAngle import actionAngle%0Aclass actionAngleAdiabatic():%0A %22%22%22Action-angle formalism for axisymmetric potentials using the adiabatic approximation%22%22%22%0A def __init__(self,*args,**kwargs):%0A %22%22%22%0A NAME:%0A __init__%0A PURPOSE:%0A initialize an actionAngleAdiabatic object%0A INPUT:%0A pot= potential or list of potentials (planarPotentials)%0A OUTPUT:%0A HISTORY:%0A 2012-07-26 - Written - Bovy (IAS@MPIA)%0A %22%22%22%0A if not kwargs.has_key('pot'):%0A raise IOError(%22Must specify pot= for actionAngleAxi%22)%0A self._pot= kwargs%5B'pot'%5D%0A return None%0A %0A def __call__(self,*args,**kwargs):%0A %22%22%22%0A NAME:%0A __call__%0A PURPOSE:%0A evaluate the actions (jr,lz,jz)%0A INPUT:%0A Either:%0A a) R,vR,vT,z,vz%0A b) Orbit instance: initial condition used if that's it, orbit(t)%0A if there is a time given as well%0A scipy.integrate.quadrature keywords%0A OUTPUT:%0A (jr,lz,jz), where jr=%5Bjr,jrerr%5D, and jz=%5Bjz,jzerr%5D%0A HISTORY:%0A 2012-07-26 - Written - Bovy (IAS@MPIA)%0A %22%22%22%0A #Set up the actionAngleAxi object%0A meta= actionAngle(*args)%0A if isinstance(self._pot,list):%0A thispot= %5Bp.toPlanar() for p in self._pot%5D%0A else:%0A thispot= self._pot.toPlanar()%0A if isinstance(self._pot,list):%0A thisverticalpot= %5Bp.toVertical(meta._R) for p in self._pot%5D%0A else:%0A thisverticalpot= self._pot.toVertical(meta._R)%0A aAAxi= actionAngleAxi(*args,pot=thispot,%0A verticalPot=thisverticalpot)%0A return (aAAxi.JR(**kwargs),aAAxi._R*aAAxi._vT,aAAxi.Jz(**kwargs))%0A
|
|
3a240005142da25aa49938a15d39ddf68dd7cead
|
Add functional test to verify presence of policy
|
nova/tests/functional/api/openstack/placement/test_verify_policy.py
|
nova/tests/functional/api/openstack/placement/test_verify_policy.py
|
Python
| 0.003609
|
@@ -0,0 +1,1967 @@
+# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom oslo_config import cfg%0A%0Afrom nova.api.openstack.placement import direct%0Afrom nova.api.openstack.placement import handler%0Afrom nova.tests.functional.api.openstack.placement import base%0A%0A%0ACONF = cfg.CONF%0A%0A%0Aclass TestVerifyPolicy(base.TestCase):%0A %22%22%22Verify that all defined placement routes have a policy.%22%22%22%0A%0A # Paths that don't need a policy check%0A EXCEPTIONS = %5B'/', ''%5D%0A%0A def _test_request_403(self, client, method, route):%0A headers = %7B%0A 'x-auth-token': 'user',%0A 'content-type': 'application/json'%0A %7D%0A request_method = getattr(client, method.lower())%0A # We send an empty request body on all requests. Because%0A # policy handling comes before other processing, the value%0A # of the body is irrelevant.%0A response = request_method(route, data='', headers=headers)%0A self.assertEqual(%0A 403, response.status_code,%0A 'method %25s on route %25s is open for user, status: %25s' %25%0A (method, route, response.status_code))%0A%0A def test_verify_policy(self):%0A with direct.PlacementDirect(CONF, latest_microversion=True) as client:%0A for route, methods in handler.ROUTE_DECLARATIONS.items():%0A if route in self.EXCEPTIONS:%0A continue%0A for method in methods:%0A self._test_request_403(client, method, route)%0A
|
|
b416de22866f6ebc05fcb256d5ab97f391481ddc
|
Create CSVStreamReader.py
|
CSVStreamReader.py
|
CSVStreamReader.py
|
Python
| 0.000004
|
@@ -0,0 +1,3611 @@
+%0A# The MIT License (MIT)%0A# Copyright (c) 2016 Chris Webb%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A# The above copyright notice and this permission notice shall be included in all%0A# copies or substantial portions of the Software.%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A%0A%0Aclass CSVStreamReader:%0A%0A delimiter = '%5Ct'%0A text_field_identifier = '%22'%0A%0A def __init__(self, delimiter, textfieldIdentifier):%0A self.delimiter = delimiter%0A self.text_field_identifier = textfieldIdentifier%0A%0A def getTotalColumns(self, delimiter, textfieldIdentifier, stream, enc):%0A%0A curPosition = 0%0A if stream.seekable():%0A curPosition = stream.tell()%0A else:%0A return False%0A%0A textFieldDelimiterCount = 0%0A columnCount = 0%0A curChar = stream.read(1)%0A%0A while curChar != 0:%0A %0A if curChar == textfieldIdentifier:%0A textFieldDelimiterCount += 1%0A elif (textFieldDelimiterCount == 0) or (textFieldDelimiterCount %25 2 == 0):%0A if curChar == delimiter:%0A columnCount += 1%0A elif curChar == '%5Cn':%0A break%0A curChar = stream.read(1)%0A%0A columnCount += 1%0A if stream.seekable():%0A stream.seek(curPosition)%0A return columnCount%0A%0A def readLine (self, stream, startPosition):%0A%0A if not startPosition:%0A startPosition = 0%0A if startPosition %3E= stream.length:%0A return None%0A%0A count = 0%0A record = %5B%5D%0A curChar = None%0A column = 0%0A output = %5B%5D%0A endPosition = startPosition%0A curChar = stream.read(1)%0A%0A%0A while curChar != 0:%0A endPosition += 1%0A%0A if curChar == textfieldIdentifier:%0A if lastChar == curChar:%0A record.append(curChar)%0A count += 1%0A continue%0A%0A elif (count == 0) or (count %25 2 == 0):%0A if curChar == delimiter:%0A curColumn = column%0A output.push(record.join(%22%22))%0A column += 1%0A record = %5B%5D%0A continue%0A%0A elif curChar == '%5Cr':%0A continue%0A elif curChar == '%5Cn':%0A break%0A record.push(curChar)%0A lastChar = curChar%0A curChar = stream.read(1)%0A%0A output.push(record.join(%22%22))%0A return %7B %22output%22: output, %22endPosition%22: endPosition %7D%0A%0A def readLines(self, stream):%0A%0A pos = 0%0A output = %5B%5D%0A returnObj = self.readLine(input, pos)%0A%0A while returnObj:%0A pos = returnObj.endPosition + 1%0A output.push(returnObj.output)%0A%0A return output%0A
|
|
da09de30b376f1ab9e687e8064423499b4cf8d50
|
Add missing file
|
vispy/util/context.py
|
vispy/util/context.py
|
Python
| 0.000006
|
@@ -0,0 +1,3076 @@
+# -*- coding: utf-8 -*-%0A# Copyright (c) 2014, Vispy Development Team.%0A# Distributed under the (new) BSD License. See LICENSE.txt for more info.%0A%0A%22%22%22%0AFunctionality to deal with GL Contexts in vispy. This module is not in%0Aapp, because we want to make it possible to use parts of vispy without%0Arelying on app.%0A%0AThe GLContext object is more like a placeholder on which different parts%0Aof vispy (or other systems) can keep track of information related to%0Aan OpenGL context.%0A%22%22%22%0A%0Afrom copy import deepcopy%0Aimport weakref%0A%0A_default_dict = dict(red_size=8, green_size=8, blue_size=8, alpha_size=8,%0A depth_size=16, stencil_size=0, double_buffer=True,%0A stereo=False, samples=0)%0A%0A%0Adef get_default_config():%0A %22%22%22Get the default OpenGL context configuration%0A%0A Returns%0A -------%0A config : dict%0A Dictionary of config values.%0A %22%22%22%0A return deepcopy(_default_dict)%0A%0A%0Aclass GLContext(object):%0A %22%22%22An object encapsulating data necessary for a shared OpenGL context%0A%0A The data are backend dependent.%0A %22%22%22%0A %0A def __init__(self, config=None):%0A self._value = None # Used by vispy.app to store a ref%0A self._taken = None # Used by vispy.app to say what backend owns it%0A self._config = deepcopy(_default_dict)%0A self._config.update(config or %7B%7D)%0A # Check the config dict%0A for key, val in self._config.items():%0A if key not in _default_dict:%0A raise KeyError('Key %25r is not a valid GL config key.' %25 key)%0A if not isinstance(val, type(_default_dict%5Bkey%5D)):%0A raise TypeError('Context value of %25r has invalid type.' %25 key)%0A %0A def take(self, value, who, weak=False):%0A %22%22%22 Claim ownership of this context. Can only be done if the%0A context is not yet taken. The value should be a reference to%0A the actual GL context (which is stored on this object using a%0A weak reference). The string %60%60who%60%60 should specify who took it.%0A %22%22%22%0A if self.istaken:%0A raise RuntimeError('This GLContext is already taken by %25s.' %25 %0A self.istaken)%0A if not weak:%0A self._value_nonweak = value%0A self._taken = str(who)%0A self._value = weakref.ref(value)%0A %0A @property%0A def istaken(self):%0A %22%22%22 Whether the context is owned by a GUI system. If taken, this%0A returns the string name of the system that took it.%0A %22%22%22%0A return self._taken%0A %0A @property%0A def value(self):%0A %22%22%22 The value that the GUI system set when it took this coontext.%0A This is stored with a weakref, so it can be None if the value%0A has been cleaned up.%0A %22%22%22%0A if self._value:%0A return self._value()%0A %0A @property%0A def config(self):%0A %22%22%22 A dictionary describing the configuration of this GL context.%0A %22%22%22%0A return self._config%0A %0A def __repr__(self):%0A backend = self._backend or 'no'%0A return %22%3CGLContext of %25s backend at 0x%25x%3E%22 %25 (backend, id(self))%0A
|
|
30c2a0b681187180ca8228e0160962c6f25e794c
|
Update doc version.
|
docs/conf.py
|
docs/conf.py
|
# -*- coding: utf-8 -*-
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
templates_path = ['_templates']
extensions = ['sphinx.ext.autodoc',]
source_suffix = '.rst'
master_doc = 'index'
project = u'Pelican'
copyright = u'2010, Alexis Metaireau and contributors'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'pelican.png',
'github_fork': 'ametaireau/pelican',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Raclettedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Raclette.tex', u'Raclette Documentation',
u'Alexis Métaireau', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'raclette', u'Raclette Documentation',
[u'Alexis Métaireau'], 1)
]
|
Python
| 0
|
@@ -641,16 +641,48 @@
'sphinx'
+%0Aversion = %222%22%0Arelease = version
%0A%0A# -- O
|
783e7f644e2fc659d432d447bbbe6a01f2ac74c1
|
Fix #390 #450
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/admin.py
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/admin.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': 'This username has already been taken.'
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
|
Python
| 0.000004
|
@@ -269,17 +269,16 @@
ionForm%0A
-%0A
from .mo
@@ -712,17 +712,17 @@
ata%5B
-'
+%22
username
'%5D%0A
@@ -717,17 +717,17 @@
username
-'
+%22
%5D%0A
@@ -952,16 +952,18 @@
)%0Aclass
+My
UserAdmi
@@ -1026,24 +1026,212 @@
rm = MyUserCreationForm%0A
+ fieldsets = (%0A ('User Profile', %7B'fields': ('name',)%7D),%0A ) + AuthUserAdmin.fieldsets%0A list_display = ('username', 'name', 'is_superuser')%0A search_fields = %5B'name'%5D%0A
|
471c36f309360396fa173ec3b75d3783d791f5f5
|
string is powerful
|
src/strings.py
|
src/strings.py
|
Python
| 0.999999
|
@@ -0,0 +1,286 @@
+person = %22Harold Finch%22%0Aprint person%5B0%5D,person%5B1%5D; # H a%0Aprint person%5B0:6%5D # Harold%0Agreeting = %22Hello %22 + person; # Hello Harold Finch%0Aprint greeting%5B:6%5D + %22John Reese%22; # Hello John Reese%0Acontains = 'H' in person;%0Anotcontains = 'R' in person;%0Aprint contains, notcontains; # True, False
|
|
9d51e2ef626ce61dd3ae563681477b12a2352881
|
Add test_sprint
|
test_sprint.py
|
test_sprint.py
|
Python
| 0.000002
|
@@ -0,0 +1,73 @@
+def inc(val):%0A return val - 1%0A%0Adef test_inc():%0A assert inc(5) == 6%0A
|
|
3f078529e743db7450bdd3ad61983f1e4ee9fd35
|
handle 2006 elections
|
openelex/us/nc/datasource.py
|
openelex/us/nc/datasource.py
|
"""
North Carolina has CSV files containing precinct-level results for each county and all offices
for all years back to 2000, except for the 2000 primary. There is one zip file per election,
with additional text files for county and race-level summaries. For the 2000 primary, individual
Excel files are available for each office and party combination.
"""
from os.path import join
import json
import datetime
import urlparse
from openelex import PROJECT_ROOT
from openelex.base.datasource import BaseDatasource
class Datasource(BaseDatasource):
# PUBLIC INTERFACE
def mappings(self, year=None):
"""Return array of dicts containing source url and
standardized filename for raw results file, along
with other pieces of metadata
"""
mappings = []
for yr, elecs in self.elections(year).items():
mappings.extend(self._build_metadata(yr, elecs))
return mappings
def target_urls(self, year=None):
"Get list of source data urls, optionally filtered by year"
return [item['raw_url'] for item in self.mappings(year)]
def filename_url_pairs(self, year=None):
return [(item['generated_filename'], item['raw_url'])
for item in self.mappings(year)]
def mappings_for_url(self, url):
return [mapping for mapping in self.mappings() if mapping['raw_url'] == url]
# PRIVATE METHODS
def _build_metadata(self, year, elections):
meta = []
year_int = int(year)
for election in elections:
if election['slug'] == 'nc-2000-05-02-primary':
results = [x for x in self._url_paths() if x['date'] == election['start_date']]
for result in results:
generated_filename = self._generate_office_filename(election, result)
meta.append({
"generated_filename": generated_filename,
"raw_url": result['url'],
"ocd_id": 'ocd-division/country:us/state:nc',
"name": 'North Carolina',
"election": election['slug']
})
else:
results = [x for x in self._url_paths() if x['date'] == election['start_date']]
for result in results:
if result['date'] in ('2000-11-07', '2002-11-05', '2002-09-10', '2006-09-12', '2006-11-07', '2006-05-30', '2008-05-06'):
format = '.txt'
else:
format = '.csv'
generated_filename = self._generate_filename(election, format)
meta.append({
"generated_filename": generated_filename,
'raw_url': election['direct_links'][0],
'raw_extracted_filename': result['raw_extracted_filename'],
"ocd_id": 'ocd-division/country:us/state:nc',
"name": 'North Carolina',
"election": election['slug']
})
return meta
def _generate_filename(self, election, format):
if election['special']:
election_type = 'special__' + election['race_type'].replace("-","__") + '__precinct'
else:
election_type = election['race_type'].replace("-","__") + '__precinct'
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
election_type
]
name = "__".join(bits) + format
return name
def _generate_office_filename(self, election, result):
if result['party'] == '':
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
election['race_type'],
result['office'],
'precinct'
]
else:
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
result['party'],
election['race_type'],
result['office'],
'precinct'
]
name = "__".join(bits)+'.xls'
return name
def _jurisdictions(self):
"""North Carolina counties"""
m = self.jurisdiction_mappings()
mappings = [x for x in m if x['county'] != ""]
return mappings
|
Python
| 0.000002
|
@@ -2411,16 +2411,30 @@
-09-10',
+ '2006-05-02',
'2006-0
|
8f6db5945348879a7340f8a4c7da6111a06cd062
|
Add new module to statically host an arbitrary directory
|
python/smqtk/web/search_app/modules/static_host.py
|
python/smqtk/web/search_app/modules/static_host.py
|
Python
| 0
|
@@ -0,0 +1,857 @@
+import flask%0A%0A%0A__author__ = 'paul.tunison@kitware.com'%0A%0A%0Aclass StaticDirectoryHost (flask.Blueprint):%0A %22%22%22%0A Module that will host a given directory to the given URL prefix (relative to%0A the parent module's prefix).%0A%0A Instances of this class will have nothing set to their static URL path, as a%0A blank string is used. Please reference the URL prefix value.%0A%0A %22%22%22%0A%0A def __init__(self, name, static_dir, url_prefix):%0A # make sure URL prefix starts with a slash%0A if not url_prefix.startswith('/'):%0A url_prefix = '/' + url_prefix%0A%0A super(StaticDirectoryHost, self).__init__(name, __name__,%0A static_folder=static_dir,%0A static_url_path=%22%22,%0A url_prefix=url_prefix)%0A
|
|
fdb8f36fd4eed11d5d757d8477b3c2b8619aae8a
|
Add management command to populate last_modified fields
|
corehq/apps/commtrack/management/commands/product_program_last_modified.py
|
corehq/apps/commtrack/management/commands/product_program_last_modified.py
|
Python
| 0.000001
|
@@ -0,0 +1,1793 @@
+from django.core.management.base import BaseCommand%0Afrom corehq.apps.commtrack.models import Product, Program%0Afrom dimagi.utils.couch.database import iter_docs%0Afrom datetime import datetime%0Aimport json%0A%0Aclass Command(BaseCommand):%0A help = 'Populate last_modified field for products and programs'%0A%0A def handle(self, *args, **options):%0A self.stdout.write(%22Processing products...%5Cn%22)%0A%0A relevant_ids = set(%5Br%5B'id'%5D for r in Product.get_db().view(%0A 'commtrack/products',%0A reduce=False,%0A ).all()%5D)%0A%0A to_save = %5B%5D%0A%0A for product in iter_docs(Product.get_db(), relevant_ids):%0A if 'last_modified' not in product or not product%5B'last_modified'%5D:%0A print product%5B'_id'%5D%0A product%5B'last_modified'%5D = json.dumps(datetime.now().isoformat())%0A to_save.append(product)%0A%0A if len(to_save) %3E 500:%0A Product.get_db().bulk_save(to_save)%0A to_save = %5B%5D%0A%0A if to_save:%0A Product.get_db().bulk_save(to_save)%0A%0A self.stdout.write(%22Processing programs...%5Cn%22)%0A%0A relevant_ids = set(%5Br%5B'id'%5D for r in Program.get_db().view(%0A 'commtrack/programs',%0A reduce=False,%0A ).all()%5D)%0A%0A to_save = %5B%5D%0A%0A for program in iter_docs(Program.get_db(), relevant_ids):%0A if 'last_modified' not in program or not program%5B'last_modified'%5D:%0A print program%5B'_id'%5D%0A program%5B'last_modified'%5D = json.dumps(datetime.now().isoformat())%0A to_save.append(program)%0A%0A if len(to_save) %3E 500:%0A Program.get_db().bulk_save(to_save)%0A to_save = %5B%5D%0A%0A if to_save:%0A Program.get_db().bulk_save(to_save)%0A
|
|
cf6d2e732ab4b7131312323632761561f2aa3a86
|
add field user_email to DynamicSaveInputs
|
webapp/apps/dynamic/migrations/0002_dynamicsaveinputs_user_email.py
|
webapp/apps/dynamic/migrations/0002_dynamicsaveinputs_user_email.py
|
Python
| 0.000001
|
@@ -0,0 +1,441 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('dynamic', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='dynamicsaveinputs',%0A name='user_email',%0A field=models.CharField(default=None, max_length=50, null=True, blank=True),%0A ),%0A %5D%0A
|
|
6118b05f0efd1c2839eb8bc4de36723af1fcc364
|
Convert snake_case to camelCase or PascalCase (#7028) (#7034)
|
strings/snake_case_to_camel_pascal_case.py
|
strings/snake_case_to_camel_pascal_case.py
|
Python
| 0.997846
|
@@ -0,0 +1,1591 @@
+def snake_to_camel_case(input: str, use_pascal: bool = False) -%3E str:%0A %22%22%22%0A Transforms a snake_case given string to camelCase (or PascalCase if indicated)%0A (defaults to not use Pascal)%0A%0A %3E%3E%3E snake_to_camel_case(%22some_random_string%22)%0A 'someRandomString'%0A%0A %3E%3E%3E snake_to_camel_case(%22some_random_string%22, use_pascal=True)%0A 'SomeRandomString'%0A%0A %3E%3E%3E snake_to_camel_case(%22some_random_string_with_numbers_123%22)%0A 'someRandomStringWithNumbers123'%0A%0A %3E%3E%3E snake_to_camel_case(%22some_random_string_with_numbers_123%22, use_pascal=True)%0A 'SomeRandomStringWithNumbers123'%0A%0A %3E%3E%3E snake_to_camel_case(123)%0A Traceback (most recent call last):%0A ...%0A ValueError: Expected string as input, found %3Cclass 'int'%3E%0A%0A %3E%3E%3E snake_to_camel_case(%22some_string%22, use_pascal=%22True%22)%0A Traceback (most recent call last):%0A ...%0A ValueError: Expected boolean as use_pascal parameter, found %3Cclass 'str'%3E%0A %22%22%22%0A%0A if not isinstance(input, str):%0A raise ValueError(f%22Expected string as input, found %7Btype(input)%7D%22)%0A if not isinstance(use_pascal, bool):%0A raise ValueError(%0A f%22Expected boolean as use_pascal parameter, found %7Btype(use_pascal)%7D%22%0A )%0A%0A words = input.split(%22_%22)%0A%0A start_index = 0 if use_pascal else 1%0A%0A words_to_capitalize = words%5Bstart_index:%5D%0A%0A capitalized_words = %5Bword%5B0%5D.upper() + word%5B1:%5D for word in words_to_capitalize%5D%0A%0A initial_word = %22%22 if use_pascal else words%5B0%5D%0A%0A return %22%22.join(%5Binitial_word%5D + capitalized_words)%0A%0A%0Aif __name__ == %22__main__%22:%0A from doctest import testmod%0A%0A testmod()%0A
|
|
e581d41ded30aac220875c4e5f8fd41e56889742
|
Fix googlehome alarm sensor platform (#20742)
|
homeassistant/components/googlehome/sensor.py
|
homeassistant/components/googlehome/sensor.py
|
"""
Support for Google Home alarm sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.googlehome/
"""
import logging
from datetime import timedelta
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.components.googlehome import (
CLIENT, DOMAIN as GOOGLEHOME_DOMAIN, NAME)
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.entity import Entity, async_generate_entity_id
import homeassistant.util.dt as dt_util
DEPENDENCIES = ['googlehome']
SCAN_INTERVAL = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
ICON = 'mdi:alarm'
SENSOR_TYPES = {
'timer': "Timer",
'alarm': "Alarm",
}
async def async_setup_platform(hass, config,
async_add_entities, discovery_info=None):
"""Set up the googlehome sensor platform."""
if discovery_info is None:
_LOGGER.warning(
"To use this you need to configure the 'googlehome' component")
devices = []
for condition in SENSOR_TYPES:
device = GoogleHomeAlarm(hass.data[CLIENT], condition,
discovery_info)
devices.append(device)
async_add_entities(devices, True)
class GoogleHomeAlarm(Entity):
"""Representation of a GoogleHomeAlarm."""
def __init__(self, client, condition, config):
"""Initialize the GoogleHomeAlarm sensor."""
self._host = config['host']
self._client = client
self._condition = condition
self._name = None
self._state = None
self._available = True
async def async_added_to_hass(self):
"""Subscribe GoogleHome events."""
await self._client.update_info(self._host)
data = self.hass.data[GOOGLEHOME_DOMAIN][self._host]
info = data.get('info', {})
if info is None:
return
self._name = "{} {}".format(info.get('name', NAME),
SENSOR_TYPES[self._condition])
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, self._name, hass=self.hass)
async def async_update(self):
"""Update the data."""
await self._client.update_alarms(self._host)
data = self.hass.data[GOOGLEHOME_DOMAIN][self._host]
alarms = data.get('alarms')[self._condition]
if not alarms:
self._available = False
return
self._available = True
time_date = dt_util.utc_from_timestamp(min(element['fire_time']
for element in alarms)
/ 1000)
self._state = time_date.isoformat()
@property
def state(self):
"""Return the state."""
return self._state
@property
def name(self):
"""Return the name."""
return self._name
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def available(self):
"""Return the availability state."""
return self._available
@property
def icon(self):
"""Return the icon."""
return ICON
|
Python
| 0
|
@@ -222,69 +222,8 @@
ta%0A%0A
-from homeassistant.components.sensor import ENTITY_ID_FORMAT%0A
from
@@ -268,16 +268,16 @@
mport (%0A
+
CLIE
@@ -421,34 +421,8 @@
tity
-, async_generate_entity_id
%0Aimp
@@ -952,16 +952,192 @@
ponent%22)
+%0A return%0A%0A await hass.data%5BCLIENT%5D.update_info(discovery_info%5B'host'%5D)%0A data = hass.data%5BGOOGLEHOME_DOMAIN%5D%5Bdiscovery_info%5B'host'%5D%5D%0A info = data.get('info', %7B%7D)
%0A%0A de
@@ -1292,16 +1292,40 @@
ery_info
+, info.get('name', NAME)
)%0A
@@ -1517,16 +1517,22 @@
, config
+, name
):%0A
@@ -1768,285 +1768,8 @@
True
-%0A%0A async def async_added_to_hass(self):%0A %22%22%22Subscribe GoogleHome events.%22%22%22%0A await self._client.update_info(self._host)%0A data = self.hass.data%5BGOOGLEHOME_DOMAIN%5D%5Bself._host%5D%0A info = data.get('info', %7B%7D)%0A if info is None:%0A return
%0A
@@ -1805,206 +1805,43 @@
mat(
-info.get('
name
-'
,
-NAME),%0A SENSOR_TYPES%5Bself._condition%5D)%0A self.entity_id = async_generate_entity_id(%0A ENTITY_ID_FORMAT, self._name, hass=self.hass
+SENSOR_TYPES%5Bself._condition%5D
)%0A%0A
|
4a71e883a469f22995775cbc1eeb6489a2dd71d1
|
add integration test
|
tests/integration_tests/test_contrib_wandb.py
|
tests/integration_tests/test_contrib_wandb.py
|
Python
| 0.000001
|
@@ -0,0 +1,1432 @@
+import logging%0Aimport os%0Aimport shutil%0Aimport sys%0A%0Aimport ludwig.contrib%0Afrom tests.integration_tests.test_experiment import run_experiment%0Afrom tests.integration_tests.utils import image_feature%0Afrom tests.integration_tests.utils import category_feature%0Afrom tests.integration_tests.utils import generate_data%0A%0Aimport wandb%0A%0Alogger = logging.getLogger(__name__)%0Alogger.setLevel(logging.INFO)%0Alogging.getLogger(%22ludwig%22).setLevel(logging.INFO)%0A%0A%0Adef test_wandb_experiment(csv_filename):%0A # Test W&B integration%0A%0A # add wandb arg and detect flag%0A sys.argv.append('--wandb')%0A ludwig.contrib.contrib_import()%0A%0A # disable sync to cloud%0A os.environ%5B'WANDB_MODE'%5D = 'dryrun'%0A%0A # Image Inputs%0A image_dest_folder = os.path.join(os.getcwd(), 'generated_images')%0A%0A # Inputs & Outputs%0A input_features = %5Bimage_feature(folder=image_dest_folder)%5D%0A output_features = %5Bcategory_feature()%5D%0A rel_path = generate_data(input_features, output_features, csv_filename)%0A%0A # Run experiment%0A run_experiment(input_features, output_features, data_csv=rel_path)%0A%0A # Check a W&B run was created%0A assert wandb.run is not None%0A%0A # End session%0A wandb.join()%0A%0A # Delete the temporary data created%0A shutil.rmtree(image_dest_folder)%0A%0A%0Aif __name__ == '__main__':%0A %22%22%22%0A To run tests individually, run:%0A %60%60%60python -m pytest tests/integration_tests/test_contrib_wandb.py::test_name%60%60%60%0A %22%22%22%0A pass%0A
|
|
bbc4351a5611a035bbee1f18cb55b74d9583cdcd
|
Create a state for add an object
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Object.py
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Object.py
|
Python
| 0.000006
|
@@ -0,0 +1,1951 @@
+#!/usr/bin/env python%0A# encoding=utf8%0A%0Aimport json%0Aimport requests%0Afrom flexbe_core import EventState, Logger%0A%0A%0Aclass Wonderland_Add_Object(EventState):%0A%09'''%0A%09Add an object to Wonderland.%0A%09For the room, enter only ID or Name, not both.%0A%09Return the ID of the added human.%0A%0A%09%3E# name string name of the human%0A%09%3E# roomID string ID on the BDD or name of the room%0A%09%3E# x_pos int Position on X%0A%09%3E# y_pos int Position on Y%0A%09%3E# z_pos int Position on Z%0A%0A%09#%3E id int ID on the BDD of the human%0A%0A%09%3C= done data sent correctly%0A%09%3C= error error while data is reading%0A%09'''%0A%09%0A%09def __init__(self):%0A%09%09super(Wonderland_Add_Object, self).__init__(outcomes=%5B'done', 'error'%5D,%0A%09%09 output_keys=%5B'id'%5D,%0A%09%09 input_keys=%5B'name', 'roomID', 'x_pos', 'y_pos', 'z_pos'%5D)%0A%09%09# generate post key for authentication%0A%09%09self._header = %7B'api-key': 'asdf'%7D%0A%09%0A%09def execute(self, userdata):%0A%09%09# Generate URL to contact%0A%09%09%0A%09%09if isinstance(userdata.roomID, (int, long)):%0A%09%09%09dataPost = %7B'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,%0A%09%09%09 'roomID': userdata.roomID%7D%0A%09%09else:%0A%09%09%09dataPost = %7B'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,%0A%09%09%09 'roomName': userdata.roomID%7D%0A%09%09%0A%09%09# try the request%0A%09%09try:%0A%09%09%09response = requests.post(%22http://192.168.0.46:8000/api/object/%22, headers=self._header, data=dataPost)%0A%09%09except requests.exceptions.RequestException as e:%0A%09%09%09print e%0A%09%09%09return 'error'%0A%09%09%0A%09%09# read response%0A%09%09data_response = json.loads(response.content)%0A%09%09%0A%09%09# have a response%0A%09%09if not data_response%5B%22entity%22%5D:%0A%09%09%09return 'error'%0A%09%09%0A%09%09# have an id to read%0A%09%09if 'id' not in data_response%5B%22entity%22%5D:%0A%09%09%09# continue to Error%0A%09%09%09return 'error'%0A%09%09%0A%09%09# return the ID%0A%09%09userdata.id = data_response%5B%22entity%22%5D%5B'id'%5D%0A%09%09%0A%09%09return 'done'%0A
|
|
3ce9dcb1ae21ac35ddd97d648ae3ff4b5877adc5
|
add script for downloading and conversion of bhuman dataset after conversion the dataset is in the same format as the berlin-united one
|
Utils/py/BallDetection/RegressionNetwork/generate_image_db_bhuman.py
|
Utils/py/BallDetection/RegressionNetwork/generate_image_db_bhuman.py
|
Python
| 0
|
@@ -0,0 +1,2405 @@
+%22%22%22%0AConverts the b-human 2019 dataset to the naoth format so we can run performance comparisons%0A%22%22%22%0Aimport pickle%0Aimport numpy as np%0Aimport h5py%0Afrom pathlib import Path%0Afrom urllib.request import urlretrieve%0Afrom urllib.error import HTTPError, URLError%0Afrom utility_functions.loader import calculate_mean, subtract_mean%0A%0A%0Adef download_bhuman2019(origin, target):%0A def dl_progress(count, block_size, total_size):%0A print('%5Cr', 'Progress: %7B0:.2%25%7D'.format(min((count * block_size) / total_size, 1.0)), sep='', end='', flush=True)%0A%0A if not Path(target).exists():%0A target_folder = Path(target).parent%0A target_folder.mkdir(parents=True, exist_ok=True)%0A else:%0A return%0A%0A error_msg = 'URL fetch failure on %7B%7D : %7B%7D -- %7B%7D'%0A try:%0A try:%0A urlretrieve(origin, target, dl_progress)%0A print('%5CnFinished')%0A except HTTPError as e:%0A raise Exception(error_msg.format(origin, e.code, e.reason))%0A except URLError as e:%0A raise Exception(error_msg.format(origin, e.errno, e.reason))%0A except (Exception, KeyboardInterrupt):%0A if Path(target).exists():%0A Path(target).unlink()%0A raise%0A%0A%0Aif __name__ == '__main__':%0A download_bhuman2019(%22https://sibylle.informatik.uni-bremen.de/public/datasets/b-alls-2019/b-alls-2019.hdf5%22,%0A %22data/bhuman/b-alls-2019.hdf5%22)%0A download_bhuman2019(%22https://sibylle.informatik.uni-bremen.de/public/datasets/b-alls-2019/%22,%0A %22data/bhuman/readme.txt%22)%0A%0A # get data%0A f = h5py.File('data/bhuman/b-alls-2019.hdf5', 'r')%0A%0A negative_labels = np.array(f.get('negatives/labels'))%0A positive_labels = np.array(f.get('positives/labels'))%0A negative_data = np.array(f.get('negatives/data'))%0A positive_data = np.array(f.get('positives/data'))%0A%0A labels = np.append(negative_labels, positive_labels, axis=0)%0A%0A # swap dimensions to convert b-human format to berlin-united format%0A new_labels = np.copy(labels)%0A radii = labels%5B:, 0%5D%0A classes = labels%5B:, -1%5D%0A new_labels%5B:, 0%5D = classes%0A new_labels%5B:, -1%5D = radii%0A%0A images = np.append(negative_data, positive_data, axis=0)%0A mean = calculate_mean(images)%0A%0A mean_images = subtract_mean(images, mean)%0A with open(%22data/bhuman.pkl%22, %22wb%22) as f:%0A pickle.dump(mean, f)%0A pickle.dump(mean_images, f)%0A pickle.dump(new_labels, f)%0A
|
|
78bc96307fb52d95e36eab1da6fa57a66af736e8
|
Add script to delete couch phone numbers
|
corehq/apps/sms/management/commands/delete_messaging_couch_phone_numbers.py
|
corehq/apps/sms/management/commands/delete_messaging_couch_phone_numbers.py
|
Python
| 0
|
@@ -0,0 +1,2141 @@
+from corehq.apps.sms.mixin import VerifiedNumber%0Afrom corehq.apps.sms.models import PhoneNumber%0Afrom dimagi.utils.couch.database import iter_docs_with_retry, iter_bulk_delete_with_doc_type_verification%0Afrom django.core.management.base import BaseCommand%0Afrom optparse import make_option%0A%0A%0Aclass Command(BaseCommand):%0A args = %22%22%0A help = (%22Deletes all messaging phone numbers stored in couch%22)%0A option_list = BaseCommand.option_list + (%0A make_option(%22--delete-interval%22,%0A action=%22store%22,%0A dest=%22delete_interval%22,%0A type=%22int%22,%0A default=5,%0A help=%22The number of seconds to wait between each bulk delete.%22),%0A )%0A%0A def get_couch_ids(self):%0A result = VerifiedNumber.view(%0A 'phone_numbers/verified_number_by_domain',%0A include_docs=False,%0A reduce=False,%0A ).all()%0A return %5Brow%5B'id'%5D for row in result%5D%0A%0A def get_soft_deleted_couch_ids(self):%0A result = VerifiedNumber.view(%0A 'all_docs/by_doc_type',%0A startkey=%5B'VerifiedNumber-Deleted'%5D,%0A endkey=%5B'VerifiedNumber-Deleted', %7B%7D%5D,%0A include_docs=False,%0A reduce=False,%0A ).all()%0A return %5Brow%5B'id'%5D for row in result%5D%0A%0A def delete_models(self, delete_interval):%0A print 'Deleting VerifiedNumbers...'%0A count = iter_bulk_delete_with_doc_type_verification(%0A VerifiedNumber.get_db(),%0A self.get_couch_ids(),%0A 'VerifiedNumber',%0A wait_time=delete_interval,%0A max_fetch_attempts=5%0A )%0A print 'Deleted %25s documents' %25 count%0A%0A print 'Deleting Soft-Deleted VerifiedNumbers...'%0A count = iter_bulk_delete_with_doc_type_verification(%0A VerifiedNumber.get_db(),%0A self.get_soft_deleted_couch_ids(),%0A 'VerifiedNumber-Deleted',%0A wait_time=delete_interval,%0A max_fetch_attempts=5%0A )%0A print 'Deleted %25s documents' %25 count%0A%0A def handle(self, *args, **options):%0A self.delete_models(options%5B'delete_interval'%5D)%0A
|
|
20c6f14d4cc76771290f7ce6fc4f3dd5abed07b4
|
write symbol calculus with sympy.
|
cpp/src/DO/Sara/MultiViewGeometry/Estimators/five_point_algorithm.py
|
cpp/src/DO/Sara/MultiViewGeometry/Estimators/five_point_algorithm.py
|
Python
| 0
|
@@ -0,0 +1,418 @@
+from sympy import *%0A%0AX = Matrix(symbols(' '.join(%5B'X%7B%7D'.format(i) for i in range(9)%5D))).reshape(3, 3)%0AY = Matrix(symbols(' '.join(%5B'Y%7B%7D'.format(i) for i in range(9)%5D))).reshape(3, 3)%0AZ = Matrix(symbols(' '.join(%5B'Z%7B%7D'.format(i) for i in range(9)%5D))).reshape(3, 3)%0AW = Matrix(symbols(' '.join(%5B'W%7B%7D'.format(i) for i in range(9)%5D))).reshape(3, 3)%0A%0Ax, y, z = symbols('x y z')%0A%0A%0AE = x * X + y * Y + z * Z + W%0A%0Aa = det(E) %0A
|
|
25b0544c2f2c78dbc3bf971d955fda651d7ed5e9
|
fix is_open_for_signup missing param
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/adapter.py
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/adapter.py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return getattr(settings, 'ACCOUNT_ALLOW_REGISTRATION', True)
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request):
return getattr(settings, 'ACCOUNT_ALLOW_REGISTRATION', True)
|
Python
| 0.000021
|
@@ -428,32 +428,45 @@
up(self, request
+, sociallogin
):%0A retur
|
c8143c9f3eca422e48625700aeef11e528131caf
|
add zeroOutDisk.py
|
zeroOutDisk.py
|
zeroOutDisk.py
|
Python
| 0.000001
|
@@ -0,0 +1,245 @@
+#!/usr/bin/env python %0A%0Adef zeroOutDisk():%0A %22%22%22Fill selected device (/dev/) with zeros.%22%22%22%0A%09pass=1 %0A%09for int in range(wipe):%0A%09%09print 'Wiping drives pass %25s of $s%22)%25(pass, count))%0A%09%09os.system((%22dd if=/dev/zero of=%25s%22)%25(device))%0A%09%09pass+=1%0A%0A
|
|
8b26888dea4e825d06b6ebfed628a1762f6a5455
|
Solve 48.
|
048/solution.py
|
048/solution.py
|
Python
| 0.999984
|
@@ -0,0 +1,429 @@
+# coding: utf-8%0A%0A%22%22%22 Project Euler problem #48. %22%22%22%0A%0A%0Adef problem():%0A u%22%22%22 Solve the problem.%0A%0A The series, 11 + 22 + 33 + ... + 1010 = 10405071317.%0A%0A Find the last ten digits of the series, 11 + 22 + 33 + ... + 10001000.%0A%0A Answer: 9110846700%0A%0A %22%22%22%0A result = long(0)%0A for num in range(1, 1001):%0A result += long(num) ** num%0A%0A print str(result)%5B-10:%5D%0A%0A%0Aif __name__ == '__main__':%0A print problem()%0A
|
|
e0770c4a671c650f4569036350b4047fcf925506
|
Add a demo app to illustrate the result
|
AnalysisDemo.py
|
AnalysisDemo.py
|
Python
| 0
|
@@ -0,0 +1,1461 @@
+import wx%0A#import matplotlib%0A%0Aclass AnalysisDemo(wx.Frame):%0A def __init__(self, *args, **kw):%0A super(AnalysisDemo, self).__init__(*args, **kw)%0A self.initMain()%0A%0A def initMain(self):%0A pn = wx.Panel(self)%0A%0A self.showPackage = wx.RadioButton(pn, label='Organize in package')%0A self.showClass = wx.RadioButton(pn, label='Organize in class')%0A# self.canvas = matplotlib.figure.Figure()%0A self.canvas = wx.TextCtrl(pn, style=wx.TE_MULTILINE %7C wx.HSCROLL)%0A self.create = wx.Button(pn, label='Create Figure')%0A%0A self.create.Bind(wx.EVT_BUTTON, self.createFigure)%0A%0A optionBoxSizer = wx.BoxSizer(wx.VERTICAL)%0A optionBoxSizer.Add(self.showPackage, proportion=0, flag=wx.TOP, border=5)%0A optionBoxSizer.Add(self.showClass, proportion=0, flag=wx.TOP, border=5)%0A optionBoxSizer.Add(self.create, proportion=0, flag=wx.TOP, border=5)%0A%0A mainBoxSizer = wx.BoxSizer()%0A mainBoxSizer.Add(self.canvas, proportion=1, flag=wx.EXPAND %7C wx.ALL, border=5)%0A mainBoxSizer.Add(optionBoxSizer, proportion=0, flag=wx.EXPAND %7C wx.TOP %7C wx.BOTTOM %7C wx.RIGHT, border=5)%0A%0A pn.SetSizer(mainBoxSizer)%0A self.SetTitle('Analysis Demo')%0A self.SetSize((600,400))%0A self.Centre()%0A self.Show(True)%0A%0A def createFigure(self, event):%0A pass%0A%0Adef main():%0A app = wx.App()%0A AnalysisDemo(None)%0A app.MainLoop()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
0d92a54f244b1d2ba3a0e6be5a2ff9ab0766171c
|
lab3 lab3
|
Lab3/palindrome.py
|
Lab3/palindrome.py
|
Python
| 0.999727
|
@@ -0,0 +1,147 @@
+check=input(%22input word for testing%22)%0Adef del_space(word):%0A word = word.replace(%22 %22,%22%22)%0A word = word.lower()%0A print(word)%0A return word%0A
|
|
b19d6a63d80919b3e7a2f3c40cd026085a526614
|
Create Rehash.py
|
LintCode/Rehash.py
|
LintCode/Rehash.py
|
Python
| 0
|
@@ -0,0 +1,2477 @@
+'''%0AMedium Rehashing Show result %0A%0A25%25 Accepted%0AThe size of the hash table is not determinate at the very beginning. If the total size of keys is too large (e.g. size %3E= capacity / 10), we should double the size of the hash table and rehash every keys. Say you have a hash table looks like below:%0A%0Asize=3, capacity=4%0A%0A%5Bnull, 21, 14, null%5D%0A %E2%86%93 %E2%86%93%0A 9 null%0A %E2%86%93%0A null%0AThe hash function is:%0A%0Aint hashcode(int key, int capacity) %7B%0A return key %25 capacity;%0A%7D%0Ahere we have three numbers, 9, 14 and 21, where 21 and 9 share the same position as they all have the same hashcode 1 (21 %25 4 = 9 %25 4 = 1). We store them in the hash table by linked list.%0A%0Arehashing this hash table, double the capacity, you will get:%0A%0Asize=3, capacity=8%0A%0Aindex: 0 1 2 3 4 5 6 7%0Ahash : %5Bnull, 9, null, null, null, 21, 14, null%5D%0AGiven the original hash table, return the new hash table after rehashing .%0A%0AHave you met this question in a real interview? Yes%0AExample%0AGiven %5Bnull, 21-%3E9-%3Enull, 14-%3Enull, null%5D,%0A%0Areturn %5Bnull, 9-%3Enull, null, null, null, 21-%3Enull, 14-%3Enull, null%5D%0A%0ANote%0AFor negative integer in hash table, the position can be calculated as follow:%0A%0AC++/Java: if you directly calculate -4 %25 3 you will get -1. You can use function: a %25 b = (a %25 b + b) %25 b to make it is a non negative integer.%0APython: you can directly use -1 %25 3, you will get 2 automatically.%0A'''%0A%0A%0A%0A%22%22%22%0ADefinition of ListNode%0Aclass ListNode(object):%0A%0A def __init__(self, val, next=None):%0A self.val = val%0A self.next = next%0A%22%22%22%0Aclass Solution:%0A %22%22%22%0A @param hashTable: A list of The first node of linked list%0A @return: A list of The first node of linked list which have twice size%0A %22%22%22%0A def rehashing(self, hashTable):%0A if not hashTable: return %0A %0A old_size = len(hashTable)%0A new_size = old_size * 2%0A newHashtable = %5BNone for i in xrange(new_size)%5D%0A %0A for start in hashTable:%0A while start != None:%0A index = start.val %25 new_size %0A temp = newHashtable%5Bindex%5D%0A if temp != None:%0A while temp and temp.next:%0A temp = temp.next %0A temp.next = ListNode(start.val)%0A else:%0A newHashtable%5Bindex%5D = ListNode(start.val)%0A %0A start = start.next %0A %0A return newHashtable%0A %0A
|
|
af4a5e92dc7ef8bffa96c6e556671e1c49116a70
|
Test commit
|
radproc.py
|
radproc.py
|
Python
| 0
|
@@ -0,0 +1,419 @@
+#-------------------------------------------------------------------------------%0D%0A# Name: module1%0D%0A# Purpose:%0D%0A#%0D%0A# Author: heistermann%0D%0A#%0D%0A# Created: 26.10.2011%0D%0A# Copyright: (c) heistermann 2011%0D%0A# Licence: %3Cyour licence%3E%0D%0A#-------------------------------------------------------------------------------%0D%0A#!/usr/bin/env python%0D%0A%0D%0Adef main():%0D%0A pass%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A main()%0D%0A
|
|
217b95f7ecb42dd4a9a671703c86a2b83838bb28
|
rename to .py
|
python_solutions/PENULTIMATE_WORD/PENULTIMATE_WORD.py
|
python_solutions/PENULTIMATE_WORD/PENULTIMATE_WORD.py
|
Python
| 0.999999
|
@@ -0,0 +1,583 @@
+%22%22%22%0AWrite a program which finds the next-to-last word in a string.%0A%0AINPUT SAMPLE:%0A%0AYour program should accept as its first argument a path to a filename. Input%0Aexample is the following:%0A%0Asome line with text%0Aanother line%0A%0AEach line has more than one word.%0A%0AOUTPUT SAMPLE:%0A%0APrint the next-to-last word in the following way:%0A%0Awith%0Aanother%0A%22%22%22%0Afrom sys import argv%0A%0A%0Adef penultimate(input_file):%0A with open(input_file, 'r') as file:%0A for line in file:%0A words = line.rstrip().split()%0A print(words%5B-2%5D)%0A%0A%0Aif __name__ == '__main__':%0A penultimate(argv%5B1%5D)%0A
|
|
bc10094ef6250558d713f3636dbd01f295503f15
|
Create NovelAPI.py
|
NovelAPI.py
|
NovelAPI.py
|
Python
| 0
|
@@ -0,0 +1,2690 @@
+import aiohttp%0Afrom bs4 import BeautifulSoup%0A%0Aonlysession = aiohttp.ClientSession()%0Aclass NovelUpdatesAPI:%0A def __init__(self):%0A self.baseurl = 'http://www.novelupdates.com/'%0A%0A async def search_novel_updates(self, term: str):%0A term = term.replace(' ', '+')%0A params = %7B's': term, 'post_type': 'seriesplan'%7D%0A with onlysession as session:%0A async with session.get(self.baseurl, params=params) as response:%0A assert isinstance(response, aiohttp.ClientResponse)%0A assert response.status == 200%0A search = BeautifulSoup(await response.text(), 'lxml')%0A parsedsearch = search.find('a', class_='w-blog-entry-link').get('href')%0A return parsedsearch%0A%0A async def page_info_parser(self, term):%0A to_parse = await self.search_novel_updates(term)%0A with onlysession as session:%0A async with session.get(to_parse) as response:%0A assert isinstance(response, aiohttp.ClientResponse)%0A assert response.status == 200%0A parse_info = BeautifulSoup(await response.text(), 'lxml')%0A data%5B'title'%5D = parse_info.find(class_='seriestitle new')%0A data%5B'cover'%5D = parse_info.find('img').get('src')%0A data%5B'type'%5D = parse_info.find('a', class_='genre type').text()%0A data%5B'genre'%5D = parse_info.find_all('a', class_='genre').text()%0A data%5B'tags'%5D = parse_info.find_all('a', class_='genre odd').text()%0A data%5B'rating'%5D = parse_info.find('span', class_='votetext').text()%0A data%5B'language'%5D = parse_info.find('a', class_='genre lang').text()%0A data%5B'author'%5D = parse_info.find('a', class_='authtag').text()%0A data%5B'artist'%5D = parse_info.find('a', class_='artiststag').text()%0A data%5B'year'%5D = parse_info.find('div', id_='edityear').text()%0A data%5B'novel_status'%5D = parse_info.find('div', id_='editstatus').text()%0A data%5B'licensed'%5D = parse_info.find('div', id_='showlicensed').text()%0A data%5B'completely_translated'%5D = parse_info.find('div', id_='showtranslated').text()%0A data%5B'publisher'%5D = parse_info.find('a', class_='genre', id_='myopub').text()%0A data%5B'english_publisher'%5D = parse_info.find('span', class_='seriesna').text()%0A data%5B'frequency'%5D = parse_info.find('h5', class_='seriesother').text()%0A data%5B'description'%5D = parse_info.find('div', id_='editdescription').text().strip()%0A data%5B'aliases'%5D = parse_info.find('div', id_='editassociated').text()%0A data%5B'related'%5D = parse_info.find('h5', class_='seriesother').text()%0A%0A return data%0A
|
|
ea3f995775b42784d99ebc19effce949a700eb28
|
Update blocks.py
|
Urutu/cl/blocks.py
|
Urutu/cl/blocks.py
|
## OpenCL blocks are initialized here!
## Created by: Aditya Atluri
## Date: Mar 03 2014
def bx(blocks_dec, kernel):
if blocks_dec == False:
string = "int bx = get_group_id(0);\n"
kernel = kernel + string
blocks_dec = True
return kernel, blocks_dec
def by(blocks_dec, kernel):
if blocks_dec == False:
string = "int by = get_group_id(1);\n"
kernel = kernel + string
blocks_dec = True
return kernel, blocks_dec
def bz(blocks_dec, kernel):
if blocks_dec == False:
string = "int bz = get_group_id(2);\n"
kernel = kernel + string
blocks_dec = True
return kernel, blocks_dec
def blocks_decl(stmt, var_nam, var_val, blocks):
equ = stmt.index('=')
if var_nam.count('Bx') < 1:
pos = stmt.index('Bx')
pos_val = stmt[pos + 1 + equ]
var_nam.append(stmt[pos])
var_val.append(int(pos_val))
blocks[0] = int(pos_val)
if var_nam.count('By') < 1:
pos = stmt.index('By')
pos_val = stmt[pos + 1 + equ]
var_nam.append(stmt[pos])
var_val.append(int(pos_val))
blocks[1] = int(pos_val)
if var_nam.count('Bz') < 1:
pos = stmt.index('Bz')
pos_val = stmt[pos + 1 + equ]
var_nam.append(stmt[pos])
var_val.append(int(pos_val))
blocks[2] = int(pos_val)
return var_nam, var_val, blocks
|
Python
| 0.000001
|
@@ -157,28 +157,69 @@
nt bx =
+(
get_g
-roup_id
+lobal_id(0) - get_local_id(0)) / get_local_size
(0);%5Cn%22%0A
@@ -367,28 +367,69 @@
nt by =
+(
get_g
-roup_id
+lobal_id(1) - get_local_id(1)) / get_local_size
(1);%5Cn%22%0A
@@ -581,20 +581,61 @@
z =
+(
get_g
-roup_id
+lobal_id(2) - get_local_id(2)) / get_local_size
(2);
|
f9bad030fffbf6b50ec5833c4024656c725c7679
|
Rename and add item3.
|
Python/item3.py
|
Python/item3.py
|
Python
| 0
|
@@ -0,0 +1,416 @@
+#!/usr/local/bin/python%0A# -*- coding:utf-8 -*-%0Aimport sys%0A%0Adef to_unicode(unicode_or_str):%0A if isinstance(unicode_or_str,str):%0A v=unicode_or_str.decode('utf-8')%0A else:%0A v=unicode_or_str%0A return v%0A%0Adef to_str(unicode_or_str):%0A if isinstance(unicode_or_str,unicode):%0A v=unicode_or_str.encode('utf-8')%0A else:%0A v=unicode_or_str%0A return v%0A%0Astr='%E5%8D%81%E5%A4%A7'%0A#print to_unicode(str)%0Aprint sys.getdefaultencoding()%0A
|
|
a4fbaaa77673dfd68ea2a453a892ca7216ea6110
|
fix traceback in write
|
addons/sale_timesheet/models/sale_timesheet.py
|
addons/sale_timesheet/models/sale_timesheet.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import models, api, fields
from openerp.tools.translate import _
from openerp.exceptions import UserError
class ResCompany(models.Model):
_inherit = 'res.company'
@api.model
def _get_uom_hours(self):
try:
return self.env.ref("product.product_uom_hour")
except ValueError:
return False
project_time_mode_id = fields.Many2one('product.uom', string='Timesheet UoM', default=_get_uom_hours)
class HrEmployee(models.Model):
_inherit = 'hr.employee'
# FIXME: this field should be in module hr_timesheet, not sale_timesheet
timesheet_cost = fields.Float(string='Timesheet Cost', default=0.0)
class ProductTemplate(models.Model):
_inherit = 'product.template'
track_service = fields.Selection(selection_add=[('timesheet', 'Timesheets on contract')])
@api.onchange('type', 'invoice_policy')
def onchange_type_timesheet(self):
if self.type == 'service' and self.invoice_policy == 'cost':
self.track_service = 'timesheet'
if self.type != 'service':
self.track_service = 'manual'
return {}
class AccountAnalyticLine(models.Model):
_inherit = 'account.analytic.line'
def _get_sale_order_line(self, vals=None):
result = dict(vals or {})
if self.is_timesheet:
if result.get('so_line'):
sol = self.env['sale.order.line'].browse([result['so_line']])
else:
sol = self.so_line
if not sol and self.account_id:
sol = self.env['sale.order.line'].search([
('order_id.project_id', '=', self.account_id.id),
('state', '=', 'sale'),
('product_id.track_service', '=', 'timesheet'),
('product_id.type', '=', 'service')],
limit=1)
if sol:
result.update({
'so_line': sol.id,
'product_id': sol.product_id.id,
})
result = self._get_timesheet_cost(result)
result = super(AccountAnalyticLine, self)._get_sale_order_line(vals=result)
return result
def _get_timesheet_cost(self, vals=None):
result = dict(vals or {})
if result.get('is_timesheet') or self.is_timesheet:
if result.get('amount'):
return result
unit_amount = result.get('unit_amount', 0.0) or self.unit_amount
user_id = result.get('user_id') or self.user_id.id
user = self.env['res.users'].browse([user_id])
emp = self.env['hr.employee'].search([('user_id', '=', user_id)], limit=1)
cost = emp and emp.timesheet_cost or 0.0
uom = (emp or user).company_id.project_time_mode_id
# Nominal employee cost = 1 * company project UoM (project_time_mode_id)
result.update(
amount=(-unit_amount * cost),
product_uom_id=uom.id
)
return result
@api.multi
def write(self, values):
values = self._get_timesheet_cost(vals=values)
return super(AccountAnalyticLine, self).write(values)
@api.model
def create(self, values):
values = self._get_timesheet_cost(vals=values)
return super(AccountAnalyticLine, self).create(values)
class SaleOrder(models.Model):
_inherit = 'sale.order'
timesheet_ids = fields.Many2many('account.analytic.line', compute='_compute_timesheet_ids', string='Timesheet activities associated to this sale')
timesheet_count = fields.Float(string='Timesheet activities', compute='_compute_timesheet_ids')
@api.multi
@api.depends('project_id.line_ids')
def _compute_timesheet_ids(self):
for order in self:
order.timesheet_ids = self.env['account.analytic.line'].search([('is_timesheet', '=', True), ('account_id', '=', order.project_id.id)]) if order.project_id else []
order.timesheet_count = round(sum([line.unit_amount for line in order.timesheet_ids]), 2)
@api.multi
@api.constrains('order_line')
def _check_multi_timesheet(self):
for order in self:
count = 0
for line in order.order_line:
if line.product_id.track_service == 'timesheet':
count += 1
if count > 1:
raise UserError(_("You can use only one product on timesheet within the same sale order. You should split your order to include only one contract based on time and material."))
return {}
@api.multi
def action_confirm(self):
result = super(SaleOrder, self).action_confirm()
for order in self:
if not order.project_id:
for line in order.order_line:
if line.product_id.track_service == 'timesheet':
order._create_analytic_account(prefix=order.product_id.default_code or None)
break
return result
@api.multi
def action_view_timesheet(self):
self.ensure_one()
imd = self.env['ir.model.data']
action = imd.xmlid_to_object('hr_timesheet.act_hr_timesheet_line_evry1_all_form')
list_view_id = imd.xmlid_to_res_id('hr_timesheet.hr_timesheet_line_tree')
form_view_id = imd.xmlid_to_res_id('hr_timesheet.hr_timesheet_line_form')
result = {
'name': action.name,
'help': action.help,
'type': action.type,
'views': [[list_view_id, 'tree'], [form_view_id, 'form']],
'target': action.target,
'context': action.context,
'res_model': action.res_model,
}
if self.timesheet_count > 0:
result['domain'] = "[('id','in',%s)]" % self.timesheet_ids.ids
else:
result = {'type': 'ir.actions.act_window_close'}
return result
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
@api.multi
def _compute_analytic(self, domain=None):
if not domain:
# To filter on analyic lines linked to an expense
domain = [('so_line', 'in', self.ids), '|', ('amount', '<=', 0.0), ('is_timesheet', '=', True)]
return super(SaleOrderLine, self)._compute_analytic(domain=domain)
@api.model
def _get_analytic_track_service(self):
return super(SaleOrderLine, self)._get_analytic_track_service() + ['timesheet']
|
Python
| 0
|
@@ -3168,32 +3168,62 @@
(self, values):%0A
+ for line in self:%0A
values =
@@ -3215,36 +3215,36 @@
values =
-self
+line
._get_timesheet_
@@ -3261,38 +3261,35 @@
values)%0A
-return
+
super(AccountAn
@@ -3300,20 +3300,20 @@
icLine,
-self
+line
).write(
@@ -3319,16 +3319,36 @@
(values)
+%0A return True
%0A%0A @a
|
2caf6ab1e43ad29864e4dc652a60e445adfa31bb
|
Add session merger tool
|
SessionTools/session_merger.py
|
SessionTools/session_merger.py
|
Python
| 0
|
@@ -0,0 +1,2459 @@
+import gzip%0Aimport os%0Aimport random%0Aimport time%0Aimport sys%0A%0AVERBOSE = True%0A%0Adef log(s):%0A if VERBOSE:%0A print (time.strftime(%22%25Y-%25m-%25d %25H:%25M:%25S%22) + %22 %22 + str(s))%0A%0Aif len(sys.argv) != 3:%0A print (%22Usage: python session_merger.py PathToInSessions PathToTargetSessions%22)%0A exit(1)%0A%0AinSessionsPath = sys.argv%5B1%5D%0Aprint(inSessionsPath)%0AoutSessionsPath = sys.argv%5B2%5D%0Aprint(outSessionsPath)%0A%0AinChecksumMap = %7B%7D%0AoutChecksumMap = %7B%7D%0A%0Adef countLinesInGzipFile(path):%0A nr_lines = 0%0A with gzip.open(path) as f:%0A for _ in f:%0A nr_lines = nr_lines + 1%0A return nr_lines%0A%0Adef checksumFiles(paths):%0A c = 0%0A checksumMap = %7B%7D%0A for path in paths:%0A c += 1%0A%0A if c %25 1000 == 0:%0A log ('checksumming' + c + %22:%22 + str(c/len(paths)) )%0A if os.path.isfile(path):%0A checksumMap%5Bpath%5D = countLinesInGzipFile(path)%0A else:%0A checksumMap%5Bpath%5D = 0%0A return checksumMap%0A%0AinPaths = %5B%5D%0Ai = 0%0A%0Alog('Enumerating and checksumming the input files')%0Afor root, subdirs, files in os.walk(inSessionsPath):%0A for ff in files:%0A i = i + 1%0A if i %25 1000 == 0:%0A log (i)%0A path = os.path.join(root, ff)%0A if (not path.endswith('.gz')):%0A continue%0A inPaths.append(path)%0A%0Alog('Paths to process: ' + str(len(inPaths)))%0A%0AoutPaths = %7B%7D%0Afor inPath in inPaths:%0A outPaths%5BinPath%5D = inPath.replace(inSessionsPath, outSessionsPath)%0A%0Alog('Computing file length checksums')%0AinChecksumMap = checksumFiles(inPaths)%0AoutChecksumMap = checksumFiles(outPaths.itervalues())%0A%0Ai = 0%0A%0Alog('Moving input files')%0Afor inPath in inPaths:%0A fin = gzip.open(inPath)%0A%0A outPath = outPaths%5BinPath%5D%0A if not os.path.exists(os.path.dirname(outPath)):%0A os.makedirs(os.path.dirname(outPath))%0A fout = gzip.open(outPath, 'ab')%0A%0A for ln in fin:%0A fout.write(ln)%0A%0A # check that the checksum fails when it should by introducing an extra line in a file%0A # if (outPath == 'session_merger_test_out/a/1/100.gz'):%0A # fout.write('checksum test%5Cn')%0A %0A fin.close()%0A fout.close()%0A%0Alog('Verifying the checksum of the output files')%0AnewOutChecksumMap = checksumFiles(outPaths.itervalues())%0A%0Afor inPath in inPaths:%0A inChecksum = inChecksumMap%5BinPath%5D%0A outChecksum = outChecksumMap%5BoutPaths%5BinPath%5D%5D%0A newOutChecksum = newOutChecksumMap%5BoutPaths%5BinPath%5D%5D%0A if (newOutChecksum != inChecksum + outChecksum):%0A log(%22Checksum doesn't match for file %22 + outPaths%5BinPath%5D + %22: expected %22 + str(inChecksum + outChecksum) + %22, got %22 + str(newOutChecksum))%0A else:%0A os.remove(inPath)%0A
|
|
e9861532a3aa420b68bc797ee54f7429ea73c0e7
|
fix #1
|
rest.py
|
rest.py
|
Python
| 0.000002
|
@@ -0,0 +1,180 @@
+from flask import Flask%0Aapp = Flask(__name__)%0A%0A@app.route(%22/%22)%0Adef ping():%0A return %22python server received 'ping' request!%22%0A%0Aif __name__ == %22__main__%22:%0A app.run(debug=True)%0A%0A
|
|
e40091a2105fc4e04bcce9b2c6ebcc7cf28f22ca
|
add module uwsgi
|
modules/uwsgi.py
|
modules/uwsgi.py
|
Python
| 0.000001
|
@@ -0,0 +1,993 @@
+%22%22%22uwsgi-for-Django module.%0A%0AWebsite: https://uwsgi-docs.readthedocs.io/en/latest/%0A%22%22%22%0Aimport os%0A%0Adef uwsgi(loader, *args):%0A loader.setup_virtualenv()%0A venv_dir = loader.get_virtualenv_dir()%0A binargs = %5Bos.path.join(venv_dir, 'bin', 'uwsgi')%5D + list(args)%0A os.execvp(binargs%5B0%5D, binargs)%0A%0A%0Adef uwsgi_run(loader, project=None, variant=None, *args): #pylint:disable=keyword-arg-before-vararg%0A project, variant = loader.setup_project_env(project, variant)%0A loader.setup_virtualenv()%0A loader.setup_shell_env()%0A config = loader.get_project_config()%0A%0A venv_dir = loader.get_virtualenv_dir()%0A binargs = %5Bos.path.join(venv_dir, 'bin', 'uwsgi'), '--master',%0A '--die-on-term'%5D%0A if not loader.is_production():%0A binargs.append('--honour-stdin')%0A%0A work_dir = config.get('work_dir', project)%0A work_dir = loader.expand_path(work_dir)%0A os.chdir(work_dir)%0A%0A binargs += list(args)%0A os.execvp(binargs%5B0%5D, binargs)%0A%0A%0Acommands = (uwsgi, uwsgi_run)%0A
|
|
a1d523c2daf563444761cb7315e18e1e0ee1b506
|
Add ARM program (forgot to add)
|
ppci/arch/arm/program.py
|
ppci/arch/arm/program.py
|
Python
| 0
|
@@ -0,0 +1,870 @@
+from ppci.programs import MachineProgram%0A%0A%0Aclass ArmProgram(MachineProgram):%0A %22%22%22 Machine code for most mobile devices and e.g. the Raspberry Pi.%0A %22%22%22%0A %0A def _check_items(self, items):%0A return items%0A %0A def _copy(self):%0A raise NotImplementedError()%0A %0A def _get_report(self, html):%0A obj = self._items%5B0%5D%0A lines = %5B%5D%0A lines.append(repr(obj))%0A for section in obj.sections:%0A lines.append(repr(section))%0A for symbol in obj.symbols:%0A lines.append(repr(symbol))%0A for reloc in obj.relocations:%0A lines.append(repr(reloc))%0A return '%5Cn'.join(lines)%0A %0A # todo: does this make sense for arm?%0A def as_object(self):%0A %22%22%22 Export as binary code object (bytes)%0A %22%22%22%0A obj = self._items%5B0%5D%0A return bytes(obj.get_section('code').data)%0A
|
|
10af3d7ee13afda37e8ecf76927bc2fedfe22b6f
|
add expand module
|
biothings_explorer/expand/__init__.py
|
biothings_explorer/expand/__init__.py
|
Python
| 0.000001
|
@@ -0,0 +1,2230 @@
+from collections import defaultdict%0A%0Afrom ..smartapi_kg import MetaKG%0Afrom ..call_apis import APIQueryDispatcher%0Afrom ..query.utils import annotateEdgesWithInput%0A%0A%0Aclass Expander:%0A def __init__(self):%0A self.kg = MetaKG()%0A self.kg.constructMetaKG(source=%22local%22)%0A%0A def __getEdges(self, semanticType):%0A %22%22%22%0A Get a list of smart-api edges based on semantic type.%0A%0A :param semanticType: Type of bioentities to expand%0A :returns: list of smartapi edges expanding the semantic type%0A %22%22%22%0A return self.kg.filter(%0A %7B%0A %22input_type%22: semanticType,%0A %22output_type%22: semanticType,%0A %22predicate%22: %22has_subclass%22,%0A %7D%0A )%0A%0A @staticmethod%0A def __parseResponse(res):%0A if not res:%0A return%0A result = %7B%7D%0A for rec in res:%0A if (%0A %22$output_id_mapping%22 in rec%0A and %22resolved_ids%22 in rec%5B%22$output_id_mapping%22%5D%0A ):%0A result%5B%0A rec%5B%22$output_id_mapping%22%5D%5B%22resolved_ids%22%5D%5B%22id%22%5D%5B%22identifier%22%5D%0A %5D = rec%5B%22$output_id_mapping%22%5D%5B%22resolved_ids%22%5D%0A return result%0A%0A @staticmethod%0A def __groupIDsbySemanticType(output_ids):%0A result = defaultdict(list)%0A for resolved_ids in output_ids:%0A result%5Bresolved_ids.get(%22type%22)%5D.append(resolved_ids)%0A return result%0A%0A def expand(self, inputs):%0A %22%22%22%0A Expand input biomedical objects to its children%0A :param semanticType: semantic type of the inputs%0A :param inputs: list of resolved identifiers%0A %22%22%22%0A grpedIDs = self.__groupIDsbySemanticType(inputs)%0A bte_edges = %5B%5D%0A for semanticType, resolvedIDs in grpedIDs.items():%0A smartapi_edges = self.__getEdges(semanticType)%0A if not smartapi_edges:%0A continue%0A tmp_edges = annotateEdgesWithInput(smartapi_edges, resolvedIDs)%0A if not tmp_edges:%0A continue%0A bte_edges += tmp_edges%0A if not bte_edges:%0A return%0A dp = APIQueryDispatcher(bte_edges)%0A res = dp.syncQuery()%0A return self.__parseResponse(res)%0A
|
|
b55e27e7420443b4d9a48da7c4e5501e1de66f44
|
Add some code
|
modules/descriptive_statistics/app.py
|
modules/descriptive_statistics/app.py
|
Python
| 0.000013
|
@@ -0,0 +1,109 @@
+class DescriptiveStatistics():%0A%09def __init__(self,data):%0A%09%09return self.main()%0A%0A%09def main(self):%0A%09%09return True
|
|
10c1b98f42a57c1f6672207d8de4a39e1482caec
|
Fix on_change_party in sale
|
sale.py
|
sale.py
|
# -*- coding: utf-8 -*-
"""
sale
:copyright: (c) 2015 by Openlabs Technologies & Consulting (P) Limited
:license: see LICENSE for more details.
"""
from trytond.model import fields
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
from trytond.pyson import Eval, Or, Bool
__all__ = ['Sale']
__metaclass__ = PoolMeta
class Sale:
__name__ = 'sale.sale'
channel = fields.Many2One(
'sale.channel', 'Channel', required=True, domain=[
('id', 'in', Eval('context', {}).get('allowed_read_channels', [])),
],
states={
'readonly': Or(
(Eval('id', default=0) > 0),
Bool(Eval('lines', default=[])),
)
}, depends=['id']
)
channel_type = fields.Function(
fields.Char('Channel Type'), 'on_change_with_channel'
)
@classmethod
def __setup__(cls):
super(Sale, cls).__setup__()
cls._error_messages.update({
'channel_missing': (
'Go to user preferences and select a current_channel ("%s")'
),
'channel_change_not_allowed': (
'Cannot change channel'
),
'not_create_channel': (
'You cannot create order under this channel because you do not '
'have required permissions'
),
})
@classmethod
def default_channel(cls):
User = Pool().get('res.user')
user = User(Transaction().user)
channel_id = Transaction().context.get('current_channel')
if channel_id:
return channel_id
return user.current_channel and \
user.current_channel.id # pragma: nocover
@staticmethod
def default_company():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).company.id
return Transaction().context.get('company') # pragma: nocover
@staticmethod
def default_invoice_method():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Config = Pool().get('sale.configuration')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
config = Config(1)
return config.sale_invoice_method
return Channel(channel_id).invoice_method
@staticmethod
def default_shipment_method():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Config = Pool().get('sale.configuration')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
config = Config(1)
return config.sale_invoice_method
return Channel(channel_id).shipment_method
@staticmethod
def default_warehouse():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Location = Pool().get('stock.location')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
return Location.search([('type', '=', 'warehouse')], limit=1)[0].id
else:
return Channel(channel_id).warehouse.id
@staticmethod
def default_price_list():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).price_list.id
return None # pragma: nocover
@staticmethod
def default_payment_term():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).payment_term.id
return None # pragma: nocover
@fields.depends('channel', 'party')
def on_change_channel(self):
if not self.channel:
return {} # pragma: nocover
res = {}
for fname in ('company', 'warehouse', 'currency', 'payment_term'):
fvalue = getattr(self.channel, fname)
if fvalue:
res[fname] = fvalue.id
if (not self.party or not self.party.sale_price_list):
res['price_list'] = self.channel.price_list.id # pragma: nocover
if self.channel.invoice_method:
res['invoice_method'] = self.channel.invoice_method
if self.channel.shipment_method:
res['shipment_method'] = self.channel.shipment_method
# Update AR record
for key, value in res.iteritems():
if '.' not in key:
setattr(self, key, value)
return res
@fields.depends('channel')
def on_change_party(self): # pragma: nocover
res = super(Sale, self).on_change_party()
channel = self.channel
if channel:
if not res.get('price_list') and res.get('invoice_address'):
res['price_list'] = channel.price_list.id
res['price_list.rec_name'] = channel.price_list.rec_name
if not res.get('payment_term') and res.get('invoice_address'):
res['payment_term'] = channel.payment_term.id
res['payment_term.rec_name'] = \
self.channel.payment_term.rec_name
# Update AR record
for key, value in res:
setattr(self, key, value)
return res
@fields.depends('channel')
def on_change_with_channel(self, name=None):
"""
Returns the source of the channel
"""
if self.channel:
return self.channel.source
return None
def check_create_access(self):
"""
Check sale creation in channel
"""
User = Pool().get('res.user')
user = User(Transaction().user)
if user.id == 0:
return # pragma: nocover
if self.channel not in user.allowed_create_channels:
self.raise_user_error('not_create_channel')
@classmethod
def write(cls, sales, values, *args):
"""
Check if channel in sale is is user's create_channel
"""
if 'channel' in values:
# Channel cannot be changed at any cost.
cls.raise_user_error('channel_change_not_allowed')
super(Sale, cls).write(sales, values, *args)
@classmethod
def create(cls, vlist):
"""
Check if user is allowed to create sale in channel
"""
User = Pool().get('res.user')
user = User(Transaction().user)
if 'channel' not in vlist and not cls.default_channel():
cls.raise_user_error(
'channel_missing', (user.rec_name,)
) # pragma: nocover
sales = super(Sale, cls).create(vlist)
for sale in sales:
sale.check_create_access()
return sales
# TODO: On copying an order from a channel the user does not have
# create access, default to the current channel of the user. If there
# is no current channel, blow up
|
Python
| 0
|
@@ -5432,16 +5432,28 @@
e in res
+.iteritems()
:%0A
|
c505b95c3affe3805bd9274c3aedd1c6640c5ff5
|
Create solution.py
|
leetcode/medium/linked_list_cycle_ii/py/solution.py
|
leetcode/medium/linked_list_cycle_ii/py/solution.py
|
Python
| 0.000018
|
@@ -0,0 +1,781 @@
+# Definition for singly-linked list.%0A# class ListNode(object):%0A# def __init__(self, x):%0A# self.val = x%0A# self.next = None%0A%0Aclass Solution(object):%0A def detectCycle(self, head):%0A %22%22%22%0A :type head: ListNode%0A :rtype: ListNode%0A %22%22%22%0A if head == None:%0A return None%0A %0A slow = head%0A fast = head%0A %0A while fast != None and fast.next != None:%0A slow = slow.next%0A fast = fast.next.next%0A %0A if slow == fast:%0A slow = head%0A %0A while slow != fast:%0A slow = slow.next%0A fast = fast.next%0A %0A return slow%0A %0A return None%0A
|
|
2b7fffd3f6f358df5613b9aa304f9a70f9b04bc2
|
add bloom filter script
|
cloud-computing-concepts-part1/scripts/bloom-filter.py
|
cloud-computing-concepts-part1/scripts/bloom-filter.py
|
Python
| 0
|
@@ -0,0 +1,251 @@
+__author__ = 'grokrz'%0A%0Am = 32%0A%0A%0Adef hash_function(x, i, m):%0A return ((pow(x, 2) * pow(x, 3)) * i) %25 m%0A%0A%0Adef show_bits_set_to_1(val):%0A for i in range(1, 4):%0A print %22Bit set to 1: %22 + str(hash_function(val, i, m))%0A%0A%0Ashow_bits_set_to_1(2013)
|
|
c3a83ed6158fcd9335f9253417ca4b24e9ab7934
|
Add test for fqdn thread leak
|
tests/pytests/unit/modules/test_network.py
|
tests/pytests/unit/modules/test_network.py
|
Python
| 0.000002
|
@@ -0,0 +1,898 @@
+import threading%0A%0Aimport pytest%0Aimport salt.modules.network as networkmod%0Afrom tests.support.mock import patch%0A%0A%0A@pytest.fixture%0Adef configure_loader_modules():%0A return %7Bnetworkmod: %7B%7D%7D%0A%0A%0A@pytest.fixture%0Adef socket_errors():%0A # Not sure what kind of errors could be returned by getfqdn or%0A # gethostbyaddr, but we have reports that thread leaks are happening%0A with patch(%22socket.getfqdn%22, autospec=True, side_effect=Exception), patch(%0A %22socket.gethostbyaddr%22, autospec=True, side_effect=Exception%0A ):%0A yield%0A%0A%0A@pytest.mark.xfail%0Adef test_when_errors_happen_looking_up_fqdns_threads_should_not_leak(socket_errors):%0A before_threads = threading.active_count()%0A networkmod.fqdns()%0A after_threads = threading.active_count()%0A assert (%0A before_threads == after_threads%0A ), %22Difference in thread count means the thread pool is not correctly cleaning up.%22%0A
|
|
f58589f4bcb2aa233ebbd71831e31b1b9505e2c4
|
Create wiki_img_extractor.py
|
wiki_img_extractor.py
|
wiki_img_extractor.py
|
Python
| 0.000001
|
@@ -0,0 +1,1665 @@
+##/**%0A## * xa2.js%0A## * @author Akshay Dahiya - @xadahiya%0A## * @description Typingeek's tutor script%0A## */%0A%0Aimport wikipedia, requests, lxml.html%0A%0A###images and caption from wikipedia%0A##%0A##main image from table@class=%22infobox%22%0A## img/@src,img/@alt%0A## %0A##wikipedia images from div@class=%22thumb tright%22%0A## img@class = %22thumbimage%22/@src%0A## div@class = %22thumbcaption%22%0A## all text including link texts%0A## %0A%0Adef extract(query):%0A %0A search_result = wikipedia.search(query)%5B0%5D%0A page = wikipedia.page(search_result)%0A page_url = page.url%0A page_title = page.title%0A print page_title%0A %0A## doc = lxml.html.parse(page_url)%0A res = requests.get(page_url)%0A## doc = lxml.html.parse(res.content)%0A doc = lxml.html.fromstring(res.content)%0A %0A ## to get images%0A %0A ##Get main image and its alt%0A print %22Main Image and its alt%22%0A main_images = doc.xpath('//table%5B1%5D%5Bcontains(concat(%22 %22,@class,%22 %22),%22infobox%22)%5D/tr/td//img/@src')%0A main_images_alt = doc.xpath('//table%5B1%5D%5Bcontains(concat(%22 %22,@class,%22 %22),%22vcard%22)%5D/tr/td//img/@alt')%0A for link in main_images:%0A print %22https:%22 + link%0A for alt in main_images_alt:%0A print alt%0A %0A ##Get thumbimages url and caption%0A print %22%5CnThumbimage urls%22 %0A thumb_imgs = doc.xpath('//img%5B@class=%22thumbimage%22%5D/@src')%0A for link in thumb_imgs:%0A print %22https:%22 + link%0A## thumb_caption = doc.xpath('string(//div%5B@class=%22thumbcaption%22%5D)')%0A print %22%5CnThumbimage captions%22%0A thumb_caption = doc.xpath('//div%5B@class=%22thumbcaption%22%5D')%0A for a in thumb_caption:%0A print ' '.join(a.xpath('string()').split())%0A## print a.xpath('string()')%0A%0A
|
|
f7328b96275bad6c3d8d9a4f844d47a65fe2bf4b
|
Create test_wrap.py
|
wordwrap/test_wrap.py
|
wordwrap/test_wrap.py
|
Python
| 0.000007
|
@@ -0,0 +1,904 @@
+import unittest%0Afrom wrap import wrap%0A%0A%0Aclass TestWW(unittest.TestCase):%0A def test_empty_string(self):%0A self.assertEqual('', wrap('', 1))%0A%0A def test_string_smaller_than_col(self):%0A self.assertEqual('ab', wrap('ab', 3))%0A%0A def test_string_without_spaces(self):%0A self.assertEqual('ab%5Cncd', wrap('abcd', 2))%0A%0A def test_big_string_without_spaces(self):%0A self.assertEqual('ab%5Cncd%5Cnef%5Cngh', wrap('abcdefgh', 2))%0A%0A def test_string_with_space_at_column(self):%0A self.assertEqual('word%5Cnword', wrap('word word', 5))%0A%0A def test_after_word_boundary(self):%0A self.assertEqual('word%5Cnword', wrap('word word', 6))%0A%0A def test_three_words_after_first_space(self):%0A self.assertEqual('word%5Cnword%5Cnword', wrap('word word word', 6))%0A%0A def test_three_words_after_second_space(self):%0A self.assertEqual('word word%5Cnword', wrap('word word word', 10))%0A
|
|
7922f168c1f844d7f1b69dfb383918d051cc312f
|
test when no server is present on the connection
|
tests/twisted/search/no-server-property.py
|
tests/twisted/search/no-server-property.py
|
"""
Tests Contact Search channels to a simulated XEP-0055 service, without
passing the Server property
"""
import dbus
from twisted.words.xish import xpath
from gabbletest import exec_test, sync_stream, make_result_iq, acknowledge_iq, elem_iq, elem
from servicetest import EventPattern
from search_helper import call_create, answer_field_query
import constants as cs
import ns
JUD_SERVER = 'jud.localhost'
def test(q, bus, conn, stream):
conn.Connect()
_, iq_event, disco_event = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED]),
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'),
EventPattern('stream-iq', to='localhost', query_ns=ns.DISCO_ITEMS))
acknowledge_iq(stream, iq_event.stanza)
requests = dbus.Interface(conn, cs.CONN_IFACE_REQUESTS)
# no search server has been discovered yet. Requesting a search channel
# without specifying the Server will fail
call_create(q, requests, server=None)
e = q.expect('dbus-error', method='CreateChannel')
assert e.error.get_dbus_name() == cs.INVALID_ARGUMENT
# reply to IQ query
reply = make_result_iq(stream, disco_event.stanza)
query = xpath.queryForNodes('/iq/query', reply)[0]
item = query.addElement((None, 'item'))
item['jid'] = JUD_SERVER
stream.send(reply)
# wait for the disco#info query
event = q.expect('stream-iq', to=JUD_SERVER, query_ns=ns.DISCO_INFO)
reply = elem_iq(stream, 'result', id=event.stanza['id'], from_=JUD_SERVER)(
elem(ns.DISCO_INFO, 'query')(
elem('identity', category='directory', type='user', name='vCard User Search')(),
elem('feature', var=ns.SEARCH)()))
stream.send(reply)
# Make sure Gabble's received the reply
sync_stream(q, stream)
call_create(q, requests, server=None)
# JUD_SERVER is used as default
answer_field_query(q, stream, JUD_SERVER)
if __name__ == '__main__':
exec_test(test)
|
Python
| 0
|
@@ -409,20 +409,33 @@
t'%0A%0Adef
-test
+server_discovered
(q, bus,
@@ -2026,51 +2026,1102 @@
R)%0A%0A
-if __name__ == '__main__':%0A exec_test(test
+def no_server_discovered(q, bus, conn, stream):%0A conn.Connect()%0A%0A _, iq_event, disco_event = q.expect_many(%0A EventPattern('dbus-signal', signal='StatusChanged',%0A args=%5Bcs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED%5D),%0A EventPattern('stream-iq', to=None, query_ns='vcard-temp',%0A query_name='vCard'),%0A EventPattern('stream-iq', to='localhost', query_ns=ns.DISCO_ITEMS))%0A%0A acknowledge_iq(stream, iq_event.stanza)%0A%0A requests = dbus.Interface(conn, cs.CONN_IFACE_REQUESTS)%0A%0A # reply to IQ query. No search server is present%0A reply = make_result_iq(stream, disco_event.stanza)%0A stream.send(reply)%0A%0A # Make sure Gabble's received the reply%0A sync_stream(q, stream)%0A%0A # This server doesn't have a search server. We can't create Search channel%0A # without specifying a Server property%0A call_create(q, requests, server=None)%0A e = q.expect('dbus-error', method='CreateChannel')%0A assert e.error.get_dbus_name() == cs.INVALID_ARGUMENT%0A%0Aif __name__ == '__main__':%0A exec_test(server_discovered)%0A exec_test(no_server_discovered
)%0A
|
0a02faf18fd3f05156df1b59dce83cee49a149f5
|
set version in finstance
|
onadata/apps/fsforms/management/commands/save_version_in_finstance.py
|
onadata/apps/fsforms/management/commands/save_version_in_finstance.py
|
Python
| 0
|
@@ -0,0 +1,1057 @@
+from django.core.management.base import BaseCommand%0Afrom onadata.apps.fsforms.models import FInstance%0A%0Aclass Command(BaseCommand):%0A help = 'Set version in FInstance for given user'%0A %0A def add_arguments(self, parser):%0A parser.add_argument('username', type=str)%0A%0A def handle(self, *args, **options):%0A # xls_directory = %22/home/xls%22%0A batchsize = options.get(%22batchsize%22, 100)%0A username = options%5B'username'%5D%0A stop = False%0A offset = 0%0A while stop is not True:%0A limit = offset + batchsize%0A instances = FInstance.objects.filter(instance__xform__user__username=username, version='')%0A if instances:%0A for instance in instances:%0A instance.set_version()%0A %0A self.stdout.write(_(%22Updating instances from #%7B%7D to #%7B%7D%5Cn%22).format(%0A instances%5B0%5D.id,%0A instances%5B-1%5D.id))%0A %0A else:%0A stop = True%0A %0A offset += batchsize%0A%0A%0A
|
|
b7f9bbd7afd64c702a2ea296b9e47cb5f563a4a2
|
Create valid-square.py
|
Python/valid-square.py
|
Python/valid-square.py
|
Python
| 0.99833
|
@@ -0,0 +1,1089 @@
+# Time: O(1)%0A# Space: O(1)%0A%0A# Given the coordinates of four points in 2D space,%0A# return whether the four points could construct a square.%0A#%0A# The coordinate (x,y) of a point is represented by an integer array with two integers.%0A#%0A# Example:%0A# Input: p1 = %5B0,0%5D, p2 = %5B1,1%5D, p3 = %5B1,0%5D, p4 = %5B0,1%5D%0A# Output: True%0A# Note:%0A#%0A# All the input integers are in the range %5B-10000, 10000%5D.%0A# A valid square has four equal sides with positive length%0A# and four equal angles (90-degree angles).%0A# Input points have no order.%0A%0Aclass Solution(object):%0A def validSquare(self, p1, p2, p3, p4):%0A %22%22%22%0A :type p1: List%5Bint%5D%0A :type p2: List%5Bint%5D%0A :type p3: List%5Bint%5D%0A :type p4: List%5Bint%5D%0A :rtype: bool%0A %22%22%22%0A def dist(p1, p2):%0A return (p1%5B0%5D - p2%5B0%5D) ** 2 + (p1%5B1%5D - p2%5B1%5D) ** 2%0A%0A counter = collections.Counter(%5Bdist(p1, p2), dist(p1, p3),%5C%0A dist(p1, p4), dist(p2, p3),%5C%0A dist(p2, p4), dist(p3, p4)%5D)%0A return 0 not in counter and len(counter) == 2%0A
|
|
9925f3a677b7a855a2242176139bde4ab9d62ba0
|
Add script which will compute the number of 'bonnes boites' per rome
|
labonneboite/scripts/nb_hirings/rome_nb_bonne_boite.py
|
labonneboite/scripts/nb_hirings/rome_nb_bonne_boite.py
|
Python
| 0.834097
|
@@ -0,0 +1,273 @@
+import pandas as pd%0A%0Aif __name__ == '__main__':%0A%0A df = pd.read_csv('prediction_per_company_per_rome2019-11-08.csv')%0A df_rome_nb_bonne_boite = df.groupby(%5B'rome'%5D)%5B'is a bonne boite ?'%5D.sum()%0A df_rome_nb_bonne_boite.to_csv('nb_bonne_boite_per_rome2019-11-089.csv')%0A
|
|
8f488365c9a4f14bf96eab089d6ac869b675c1b4
|
Add system.version functional test
|
tests/functional/test_system.py
|
tests/functional/test_system.py
|
Python
| 0.000002
|
@@ -0,0 +1,989 @@
+import logging%0Aimport unittest%0A%0Aimport trovebox%0Afrom tests.functional import test_base%0A%0Aclass TestSystem(test_base.TestBase):%0A testcase_name = %22system%22%0A%0A def setUp(self):%0A %22%22%22%0A Override the default setUp, since we don't need a populated database%0A %22%22%22%0A logging.info(%22%5CnRunning %25s...%22, self.id())%0A%0A def test_system_version(self):%0A %22%22%22%0A Check that the API version string is returned correctly%0A %22%22%22%0A client = trovebox.Trovebox(config_file=self.config_file)%0A version = client.system.version()%0A self.assertEqual(version%5B%22api%22%5D, %22v%25s%22 %25 trovebox.LATEST_API_VERSION)%0A%0A @unittest.skip(%22Diagnostics don't work with the hosted site%22)%0A def test_system_diagnostics(self):%0A %22%22%22%0A Check that the system diagnostics can be performed%0A %22%22%22%0A client = trovebox.Trovebox(config_file=self.config_file)%0A diagnostics = client.system.diagnostics()%0A self.assertIn(diagnostics, %22database%22)%0A
|
|
234d53ed185976a65042c136426d7f05022a698d
|
add memnn test
|
tests/test_memnn.py
|
tests/test_memnn.py
|
Python
| 0.000001
|
@@ -0,0 +1,1496 @@
+from __future__ import print_function%0A%0Aimport unittest%0Afrom seya.layers.memnn2 import MemN2N%0Afrom keras.models import Sequential%0Afrom keras.layers.core import Lambda%0Afrom keras import backend as K%0A%0Aimport numpy as np%0A%0A%0Aclass TestMemNN(unittest.TestCase):%0A %22%22%22Test seya.layers.memnn layer%22%22%22%0A%0A def test_memnn(self):%0A def identity_init(shape, name=None):%0A dim = max(shape)%0A I = np.identity(dim)%5B:shape%5B0%5D, :shape%5B1%5D%5D%0A return K.variable(I, name=name)%0A%0A input_dim = 20%0A output_dim = 64%0A input_length = 9%0A memory_length = 7%0A%0A facts = Sequential()%0A facts.add(Lambda(lambda x: x, input_shape=(memory_length, input_dim),%0A output_shape=(memory_length, input_dim)))%0A question = Sequential()%0A question.add(Lambda(lambda x: x, input_shape=(1, input_dim),%0A output_shape=(1, input_dim)))%0A%0A memnn = MemN2N(%5Bfacts, question%5D, output_dim, input_dim,%0A input_length, memory_length,%0A output_shape=(output_dim,))%0A memnn.build()%0A%0A model = Sequential()%0A model.add(memnn)%0A model.compile(%22sgd%22, %22mse%22)%0A%0A inp = np.random.randint(0, input_dim,%0A (1, memory_length, input_length))%0A que = np.random.randint(0, input_dim, (1, 1, input_length))%0A print(model.predict(%5Binp, que%5D).shape)%0A%0Aif __name__ == '__main__':%0A unittest.main(verbosity=2)%0A
|
|
737ac4ddbc3d047fbf41e3d9f7cde20a53d8974a
|
add management command to decrypt eval data
|
evaluation/management/commands/decrypt_eval_data.py
|
evaluation/management/commands/decrypt_eval_data.py
|
Python
| 0.000002
|
@@ -0,0 +1,1368 @@
+from django.core.management.base import BaseCommand%0Aimport gnupg%0Aimport json%0Afrom django.conf import settings%0Aimport environ%0Aenv = environ.Env()%0A%0Afrom evaluation.models import EvalRow%0A%0Aclass Command(BaseCommand):%0A help='decrypts eval data. can only be run in local environments (import data from prod)'%0A%0A def handle(self, *args, **options):%0A if not settings.DEBUG:%0A raise RuntimeError(%22Don't run this in production!!! Import encrypted prod data to your local environment%22)%0A eval_key = env('CALLISTO_EVAL_PRIVATE_KEY')%0A decrypted_eval_data = %5B%5D%0A for row in EvalRow.objects.all():%0A decrypted_row = %7B'pk': row.pk,%0A 'user': row.user_identifier,%0A 'record': row.record_identifier,%0A 'action': row.action,%0A 'timestamp': row.timestamp.timestamp()%7D%0A gpg = gnupg.GPG()%0A gpg.import_keys(eval_key)%0A decrypted_eval_row = str(gpg.decrypt(row.row))%0A if decrypted_eval_row:%0A decrypted_row.update(json.loads(decrypted_eval_row))%0A decrypted_eval_data.append(decrypted_row)%0A with open('eval_data.json','w') as output:%0A json.dump(decrypted_eval_data, output)%0A self.stdout.write(%22Decrypted eval data written to eval_data.json%22)
|
|
b977b1e6732255732843aaaad3e5c1f8e2b4d0e0
|
add unit tests for openquake/utils/general.py
|
tests/utils_general_unittest.py
|
tests/utils_general_unittest.py
|
Python
| 0.000004
|
@@ -0,0 +1,901 @@
+# -*- coding: utf-8 -*-%0A# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A%0A# Copyright (c) 2010-2011, GEM Foundation.%0A#%0A# OpenQuake is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License version 3%0A# only, as published by the Free Software Foundation.%0A#%0A# OpenQuake is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Lesser General Public License version 3 for more details%0A# (a copy is included in the LICENSE file that accompanied this code).%0A#%0A# You should have received a copy of the GNU Lesser General Public License%0A# version 3 along with OpenQuake. If not, see%0A# %3Chttp://www.gnu.org/licenses/lgpl-3.0.txt%3E for a copy of the LGPLv3 License.%0A%0A%0A%22%22%22%0ATest related to code in openquake/utils/general.py%0A%22%22%22%0A%0A
|
|
e4e4a52318b857ce315a6f673e72b018e6501a83
|
Add a plotting example.
|
Lib/sandbox/pyem/examples/plotexamples.py
|
Lib/sandbox/pyem/examples/plotexamples.py
|
Python
| 0.999999
|
@@ -0,0 +1,1049 @@
+#! /usr/bin/env python%0A# Last Change: Mon Jun 11 03:00 PM 2007 J%0A%0A# This is a simple test to check whether plotting ellipsoides of confidence and%0A# isodensity contours match%0Aimport numpy as N%0Afrom numpy.testing import set_package_path, restore_path%0A%0Aimport pylab as P%0A%0Aset_package_path()%0Aimport pyem%0Arestore_path()%0A%0A# Generate a simple mixture model, plot its confidence ellipses + isodensity%0A# curves for both diagonal and full covariance matrices%0Ad = 3%0Ak = 3%0Adim = %5B0, 2%5D%0A# diag model%0Aw, mu, va = pyem.GM.gen_param(d, k)%0Adgm = pyem.GM.fromvalues(w, mu, va)%0A# full model%0Aw, mu, va = pyem.GM.gen_param(d, k, 'full', spread = 1)%0Afgm = pyem.GM.fromvalues(w, mu, va)%0A%0Adef plot_model(gm, dim):%0A X, Y, Z, V = gm.density_on_grid(dim = dim)%0A h = gm.plot(dim = dim)%0A %5Bi.set_linestyle('-.') for i in h%5D%0A P.contour(X, Y, Z, V)%0A data = gm.sample(200)%0A P.plot(data%5B:, dim%5B0%5D%5D, data%5B:,dim%5B1%5D%5D, '.')%0A%0A# Plot the contours and the ellipsoids of confidence%0AP.subplot(2, 1, 1)%0Aplot_model(dgm, dim)%0A%0AP.subplot(2, 1, 2)%0Aplot_model(fgm, dim)%0A%0AP.show()%0A
|
|
0caef8ed3bcf369ffd61f83b06f971b31ae0fb70
|
test unittest
|
bnw_core/test_delayed_global.py
|
bnw_core/test_delayed_global.py
|
Python
| 0.000001
|
@@ -0,0 +1,325 @@
+# coding: utf-8%0Afrom delayed_global import DelayedGlobal%0A%0Adef test_delayed_global():%0A a = DelayedGlobal()%0A b = dict(%7B100:200%7D)%0A%0A try:%0A c = a.get(100)%0A except AttributeError:%0A pass%0A else:%0A assert 0, %22Got result from empty DelayedGlobal%22%0A %0A a.register(b)%0A assert a.get(100) == 200%0A%0A
|
|
ae6f5556cc37d72fff49c76932b94ac8a65bcfbf
|
make an example python'
|
example.py
|
example.py
|
Python
| 0.001311
|
@@ -0,0 +1,1064 @@
+import time%0Aimport uuid%0Afrom datetime import datetime%0Afrom decimal import Decimal%0A%0Afrom json_util import dumps, loads%0A%0Ajson_ = %7B%22MyString%22: %22a%22,%0A %22num%22: 4,%0A %22MyBool%22: False,%0A %22my_dict%22: %7B%22my_date%22: datetime.utcnow()%7D,%0A %22MyNone%22: None,%0A %22MyZero%22: 0,%0A %22myDecimal%22: Decimal(%2219.2%22), # converts Decimal to float, load it as float%0A %22myLong%22: long(1938475658493),%0A %22MyNestedDict%22: %7B%0A %22my_other_nested%22: %7B%0A %22name%22: %22John%22,%0A %22surname%22: %22Lennon%22,%0A %22MyOtherNone%22: None,%0A %22floaty%22: float(29.4),%0A %22myList%22: %5B1, 3, 4, 5, 6, %22This Is Sparta!%22%5D,%0A %22mySet%22: %7B1, 3, 4, 5, 6%7D, # converts set to list, returns as list%0A %22myUUID%22: uuid.uuid4(), # converts uuid to string, loads it as string%0A %22time%22: time.time() # converts it to seconds python float, loads it as float%0A %7D%0A %7D%0A %7D%0A%0Adynamodb_json = dumps(json_)%0Aprint dynamodb_json%0Aprint loads(dynamodb_json)%0A
|
|
405d33789a70a04ebdcca491ca2a749e9e48ddfd
|
Add example
|
example.py
|
example.py
|
Python
| 0.000003
|
@@ -0,0 +1,1115 @@
+import pygame%0Aimport bluePoV%0A%0A# Varia el color regularmente%0Ax,y = (480,64)%0Apendiente = 4%0A%0A# Pygame inits & variables%0Apygame.init()%0Apygame.display.set_mode((x,y))%0A%0Adisp = pygame.display.get_surface()%0Aclock = pygame.time.Clock()%0A%0A# BluePoV init & variables%0Aprint (%22Port? (default /dev/ttyUSB0)%22)%0Aport = input()%0Aif not port:%0A port = %22/dev/ttyUSB0%22%0A%0Asckt = bluePoV.SerialSocket()%0Asckt.connect(port,115200)%0A%0Adriver = bluePoV.Driver(sckt,%5Bx,y%5D,depth=1)%0A%0A# Colores%0Ar = 0%0Ag = 255%0Ab = 0%0A%0A# # Pendientes%0ApR = pendiente%0ApG = 0%0ApB = 0%0A%0Awhile True:%0A for event in pygame.event.get():%0A if event.type == pygame.QUIT:%0A quit()%0A elif event.type == pygame.KEYDOWN:%0A if event.key == pygame.K_q:%0A quit()%0A%0A r += pR%0A g += pG%0A b += pB%0A%0A if 255 %3C r or r %3C 0 or 255 %3C g or g %3C 0 or 255 %3C b or b %3C 0:%0A r = 255 if r %3E= 255 else 0%0A g = 255 if g %3E= 255 else 0%0A b = 255 if b %3E= 255 else 0%0A%0A pTemp = pB%0A pB = -pG%0A pG = -pR%0A pR = -pTemp%0A%0A disp.fill(%5Br,g,b%5D)%0A%0A driver.blit(disp)%0A pygame.display.flip()%0A%0A%0A clock.tick(10)%0A
|
|
3a200bbc447ee05c650bbd592a331b2817c9a498
|
Update create_address_doc_from_address_field_in_company.py
|
erpnext/patches/v8_0/create_address_doc_from_address_field_in_company.py
|
erpnext/patches/v8_0/create_address_doc_from_address_field_in_company.py
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
# new field address_html is created in place of address field for the company's address in PR #8754 (without patch)
# so here is the patch for moving the address details in the address doc
company_list = []
if 'address' in frappe.db.get_table_columns('Company'):
company_list = frappe.db.sql('''select name, address from `tabCompany` where address is not null''', as_dict=1)
for company in company_list:
add_list = company.address.split(" ")
if ',' in company.address:
add_list = company.address.rpartition(',')
elif ' ' in company.address:
add_list = company.address.rpartition(' ')
else:
add_list = [company.address, None, company.address]
doc = frappe.get_doc({
"doctype":"Address",
"address_line1": add_list[0],
"city": add_list[2],
"links": [{
"link_doctype": "Company",
"link_name": company.name
}]
})
doc.save()
|
Python
| 0.000004
|
@@ -508,16 +508,20 @@
ompany%60
+%0A%09%09%09
where ad
@@ -537,16 +537,34 @@
not null
+ and address != %22%22
''', as_
|
a6c8176e3f4602e846888293093fc64b7b20233b
|
Add cmd to force send of cancelled repeat records
|
corehq/motech/repeaters/management/commands/send_cancelled_records.py
|
corehq/motech/repeaters/management/commands/send_cancelled_records.py
|
Python
| 0
|
@@ -0,0 +1,3264 @@
+import csv%0Aimport datetime%0Aimport re%0Aimport time%0A%0Afrom django.core.management.base import BaseCommand%0A%0Afrom corehq.motech.repeaters.const import RECORD_CANCELLED_STATE%0Afrom corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain%0A%0A%0Aclass Command(BaseCommand):%0A help = %22%22%22%0A Send cancelled repeat records. You may optionally specify a regex to%0A filter records using --include or --exclude, an a sleep time with --sleep%0A %22%22%22%0A%0A def add_arguments(self, parser):%0A parser.add_argument('domain')%0A parser.add_argument('repeater_id')%0A parser.add_argument(%0A '--include',%0A dest='include_regex',%0A help=(%22Regex that will be applied to a record's 'failure_reason' to %22%0A %22determine whether to include it.%22),%0A )%0A parser.add_argument(%0A '--exclude',%0A dest='exclude_regex',%0A help=(%22Regex that will be applied to a record's 'failure_reason' to %22%0A %22determine whether to exclude it.%22),%0A )%0A parser.add_argument(%0A '--sleep',%0A dest='sleep_time',%0A help=%22Time in seconds to sleep between each request.%22,%0A )%0A%0A def handle(self, domain, repeater_id, *args, **options):%0A sleep_time = options.get('sleep_time')%0A include_regex = options.get('include_regex')%0A exclude_regex = options.get('exclude_regex')%0A if include_regex and exclude_regex:%0A print %22You may not specify both include and exclude%22%0A%0A def meets_filter(record):%0A if include_regex:%0A if not record.failure_reason:%0A return False%0A return bool(re.search(include_regex, record.failure_reason))%0A elif exclude_regex:%0A if not record.failure_reason:%0A return True%0A return not bool(re.search(exclude_regex, record.failure_reason))%0A return True # No filter applied%0A%0A records = filter(%0A meets_filter,%0A iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)%0A )%0A%0A total_records = len(records)%0A print %22Found %7B%7D matching records. Requeue them?%22.format(total_records)%0A if not raw_input(%22(y/n)%22) == 'y':%0A print %22Aborting%22%0A return%0A%0A log = %5B('record_id', 'payload_id', 'state', 'failure_reason')%5D%0A for i, record in enumerate(records):%0A try:%0A record.fire(force_send=True)%0A except Exception as e:%0A print %22%7B%7D/%7B%7D: %7B%7D %7B%7D%22.format(i, total_records, 'EXCEPTION', repr(e))%0A log.append((record._id, record.payload_id, record.state, repr(e)))%0A else:%0A print %22%7B%7D/%7B%7D: %7B%7D%22.format(i, total_records, record.state)%0A log.append((record._id, record.payload_id, record.state, record.failure_reason))%0A if sleep_time:%0A time.sleep(float(sleep_time))%0A%0A filename = %22sent_repeat_records-%7B%7D.csv%22.format(datetime.datetime.utcnow().isoformat())%0A print %22Writing log of changes to %7B%7D%22.format(filename)%0A with open(filename, 'w') as f:%0A writer = csv.writer(f)%0A writer.writerows(log)%0A
|
|
bf66372b2b5b49ba4a93d8ac4f573ceb7857f5b8
|
Fix attach in case of multiple threads.
|
python/helpers/pydev/pydevd_attach_to_process/linux/lldb_threads_settrace.py
|
python/helpers/pydev/pydevd_attach_to_process/linux/lldb_threads_settrace.py
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
try:
show_debug_info = 0
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for t in process:
# Get the first frame
frame = t.GetFrameAtIndex (t.GetNumFrames()-1)
if frame:
print('Will settrace in: %s' % (frame,))
frame.EvaluateExpression("expr (int) SetSysTraceFunc(%s, %s);" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
|
Python
| 0
|
@@ -266,16 +266,32 @@
ython!')
+%0A import lldb
%0A%0A tr
@@ -319,17 +319,17 @@
_info =
-0
+1
%0A
@@ -494,16 +494,21 @@
for t
+hread
in proc
@@ -578,55 +578,351 @@
-frame = t.GetFrameAtIndex (t.GetNum
+print('Thread %25s, suspended %25s%5Cn'%25(thread, thread.IsStopped()))%0A%0A process.SetSelectedThread(thread)%0A%0A if not thread.IsStopped():%0A error = process.Stop()%0A print(error)%0A%0A if thread:%0A frame = thread.GetSelected
Frame
-s
()
--1)%0A
+%0A
@@ -937,16 +937,18 @@
+
if frame
@@ -949,16 +949,20 @@
frame:%0A
+
@@ -1037,24 +1037,34 @@
+ res =
frame.Evalu
@@ -1082,13 +1082,8 @@
on(%22
-expr
(int
@@ -1111,15 +1111,18 @@
%25s)
-;
%22 %25 (%0A
+
@@ -1171,16 +1171,213 @@
s_debug)
+, lldb.eDynamicCanRunTarget)%0A error = res.GetError()%0A if error:%0A print(error)%0A thread.Resume(
)%0A ex
|
1f6595acc01c6dfda899886388b4309f3d8c855b
|
add index to fixed_ips
|
nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py
|
nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py
|
Python
| 0.000001
|
@@ -0,0 +1,1429 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A%0A# Copyright 2012 OpenStack LLC.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom sqlalchemy import Index, MetaData, Table%0Afrom sqlalchemy.exc import IntegrityError%0A%0A%0Adef upgrade(migrate_engine):%0A meta = MetaData()%0A meta.bind = migrate_engine%0A%0A t = Table('fixed_ips', meta, autoload=True)%0A%0A # Based on fixed_ip_delete_associate%0A # from: nova/db/sqlalchemy/api.py%0A i = Index('fixed_ips_deleted_allocated_idx',%0A t.c.address, t.c.deleted, t.c.allocated)%0A try:%0A i.create(migrate_engine)%0A except IntegrityError:%0A pass%0A%0A%0Adef downgrade(migrate_engine):%0A meta = MetaData()%0A meta.bind = migrate_engine%0A%0A t = Table('fixed_ips', meta, autoload=True)%0A%0A i = Index('fixed_ips_deleted_allocated_idx',%0A t.c.address, t.c.deleted, t.c.allocated)%0A i.drop(migrate_engine)%0A
|
|
5e8ce7c9fb31d76ec5f372bb0b62e7b846304966
|
Create exerc-4.py
|
exerc-4.py
|
exerc-4.py
|
Python
| 0.000001
|
@@ -0,0 +1,503 @@
+arquivo = open(%22arquivo.txt%22,%22w%22)%0Afor i in range(1):%0A arquivo.write(%22Atividade 4 %22)%0Aarquivo.close()%0A%0A%0A%0Aarquivo = open(%22arquivo.txt%22,%22r%22)%0Afor linha in arquivo:%0A print(%22Texto: %22,linha)%0Aarquivo.close()%0A%0A%0A%0Aarquivo = open(%22arquivo.txt%22, %22r%22)%0Acopia = open(%22copia.txt%22, %22w%22)%0Awhile 1:%0A texto = arquivo.read(50)%0A if texto == %22%22:%0A break%0A copia.write(texto)%0Aarquivo.close()%0Acopia.close()%0A%0A%0A%0Acopia = open(%22copia.txt%22,%22r%22)%0Afor linha in copia:%0A print(%22Texto copiado: %22,linha)%0Aarquivo.close()%0A
|
|
98a6a1dfe9b692cfde47e25a504d2a9ee80bcf29
|
remove unnecessary import
|
planetstack/observer/event_manager.py
|
planetstack/observer/event_manager.py
|
import threading
import requests, json
from core.models import *
from planetstack.config import Config
from observer.deleters import deleters
import os
import base64
from fofum import Fofum
import json
# decorator that marks dispatachable event methods
def event(func):
setattr(func, 'event', func.__name__)
return func
class EventHandler:
# This code is currently not in use.
def __init__(self):
pass
@staticmethod
def get_events():
events = []
for name in dir(EventHandler):
attribute = getattr(EventHandler, name)
if hasattr(attribute, 'event'):
events.append(getattr(attribute, 'event'))
return events
def dispatch(self, event, *args, **kwds):
if hasattr(self, event):
return getattr(self, event)(*args, **kwds)
class EventSender:
def __init__(self,user=None,clientid=None):
try:
clid = Config().feefie_client_id
user = Config().feefie_client_user
except:
clid = 'planetstack_core_team'
user = 'pl'
self.fofum = Fofum(user=user)
self.fofum.make(clid)
def fire(self,**args):
self.fofum.fire(json.dumps(args))
class EventListener:
def __init__(self,wake_up=None):
self.handler = EventHandler()
self.wake_up = wake_up
def handle_event(self, payload):
payload_dict = json.loads(payload)
try:
deletion = payload_dict['deletion_flag']
if (deletion):
model = payload_dict['model']
pk = payload_dict['pk']
for deleter in deleters[model]:
deleter(pk)
except:
deletion = False
if (not deletion and self.wake_up):
self.wake_up()
def run(self):
# This is our unique client id, to be used when firing and receiving events
# It needs to be generated once and placed in the config file
try:
clid = Config().feefie_client_id
user = Config().feefie_client_user
except:
clid = 'planetstack_core_team'
user = 'pl'
f = Fofum(user=user)
listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
listener_thread.start()
|
Python
| 0.000037
|
@@ -37,34 +37,8 @@
on%0A%0A
-from core.models import *%0A
from
|
a18ef3eb9128ee27d4f14e7952ba8545b510c4ac
|
add e2e test file
|
e2e_test.py
|
e2e_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,778 @@
+# Copyright 2015, Google, Inc.%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may not use%0A# this file except in compliance with the License. You may obtain a copy of the%0A# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable%0A# law or agreed to in writing, software distributed under the License is distributed%0A# on an %22AS IS%22 BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express%0A# or implied. See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%0Aimport urllib2%0Aimport logging%0A%0AHOST='http://fill-app.appspot.com'%0A%0A# %5BSTART e2e%5D%0Aresponse = urllib2.urlopen(%22%7B%7D/get_author/ulysses%22.format(HOST))%0Ahtml = response.read()%0Aassert(html == %22James Joyce%22)%0A# %5BEND e2e%5D%0A%0A%0A
|
|
5695a468f8619ea6bb7c9a01857a375f827de6a1
|
Add parser for 0.4.6 XML report.
|
libptp/tools/arachni/parser.py
|
libptp/tools/arachni/parser.py
|
Python
| 0
|
@@ -0,0 +1,1460 @@
+from libptp.exceptions import NotSupportedVersionError%0Afrom libptp.info import Info%0Afrom libptp.parser import AbstractParser%0A%0A%0Aclass ArachniXMLParser(AbstractParser):%0A%0A __tool__ = 'arachni'%0A __format__ = 'xml'%0A __version__ = %5B'0.4.6'%5D%0A%0A def __init__(self, *args, **kwargs):%0A AbstractParser.__init__(self, *args, **kwargs)%0A%0A @classmethod%0A def is_mine(cls, stream):%0A %22%22%22Check if it is a supported report.%22%22%22%0A if not cls.__tool__ in stream.tag:%0A return False%0A return True%0A%0A def parse_metadata(self, stream):%0A %22%22%22Parse the metadatas of the report.%22%22%22%0A # Find the version of Arachni.%0A version = stream.find('.//version')%0A # Reconstruct the metadata%0A # TODO: Retrieve the other metadata likes the date, etc.%0A metadata = %7Bversion.tag: version.text,%7D%0A if self.check_version(metadata):%0A return metadata%0A else:%0A raise NotSupportedVersionError(%0A 'PTP does NOT support this version of Arachni.')%0A%0A def parse_report(self, stream, scale):%0A %22%22%22Parse the report.%22%22%22%0A res = %5B%5D%0A vulns = stream.find('.//issues')%0A for vuln in vulns.findall('.//issue'):%0A info = Info(%0A # Convert the severity of the issue thanks to an unified%0A # ranking scale.%0A ranking=scale%5Bvuln.find('.//severity').text%5D,)%0A res.append(info)%0A return res%0A
|
|
8e9c2a3c31184e789bf2788f5fa0ab06e0db988f
|
Add utilities module
|
sufam/utils.py
|
sufam/utils.py
|
Python
| 0.000001
|
@@ -0,0 +1,372 @@
+import errno%0Aimport os%0Aimport shutil%0Aimport sys%0A%0A%0Adef mkdir_p(path):%0A try:%0A os.makedirs(path)%0A except OSError as exc:%0A if exc.errno == errno.EEXIST and os.path.isdir(path):%0A pass%0A else:%0A raise%0A%0A%0Adef rm_rf(path):%0A if os.path.isdir(path):%0A shutil.rmtree(path)%0A elif os.path.exists(path):%0A os.remove(path)%0A
|
|
469d2bde4420017d2361ebcebfe9506517d041ae
|
fix typo in ir_cron
|
bin/addons/base/ir/ir_cron.py
|
bin/addons/base/ir/ir_cron.py
|
##############################################################################
#
# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
# Fabien Pinckaers <fp@tiny.Be>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
#
# SPEC: Execute "model.function(*eval(args))" periodically
# date : date to execute the job or NULL if directly
# delete_after: delete the ir.cron entry after execution
# interval_* : period
# max_repeat : number of execution or NULL if endlessly
#
# TODO:
# Error treatment: exception, request, ... -> send request to uid
#
from mx import DateTime
import time
import netsvc
import tools
import pooler
from osv import fields,osv
next_wait = 60
_intervalTypes = {
'work_days': lambda interal: DateTime.RelativeDateTime(days=interval),
'days': lambda interval: DateTime.RelativeDateTime(days=interval),
'hours': lambda interval: DateTime.RelativeDateTime(hours=interval),
'weeks': lambda interval: DateTime.RelativeDateTime(days=7*interval),
'months': lambda interval: DateTime.RelativeDateTime(months=interval),
'minutes': lambda interval: DateTime.RelativeDateTime(minutes=interval),
}
class ir_cron(osv.osv, netsvc.Agent):
_name = "ir.cron"
_columns = {
'name': fields.char('Name', size=60, required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'active': fields.boolean('Active'),
'interval_number': fields.integer('Interval Number'),
'interval_type': fields.selection( [('minutes', 'Minutes'),
('hours', 'Hours'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
# number of time the function is called, a negative number
# indicates that the function will always be called.
'numbercall': fields.integer('Number of calls'),
# Repeat missed cronjobs ?
'doall' : fields.boolean('Repeat all missed'),
'nextcall' : fields.datetime('Next call date', required=True),
'model': fields.char('Model', size=64),
'function': fields.char('Function', size=64),
'args': fields.text('Arguments'),
# 0 = Very Urgent, 10 = not urgent
'priority': fields.integer('Priority (0=Very Urgent)')
}
_defaults = {
'nextcall' : lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'priority' : lambda *a: 5,
'user_id' : lambda obj,cr,uid,context: uid,
'interval_number' : lambda *a: 1,
'interval_type' : lambda *a: 'months',
'numbercall' : lambda *a: 1,
'active' : lambda *a: 1,
'doall' : lambda *a: 1
}
def _callback(self, cr, uid, model, func, args):
args = (args or []) and eval(args)
m=self.pool.get(model)
if m and hasattr(func, m):
f = getattr(m, func)
f(cr, uid, *args)
def _poolJobs(self, db_name, check=False):
now = DateTime.now()
#FIXME: multidb. Solution: a l'instanciation d'une nouvelle connection bd (ds pooler) fo que j'instancie
# un nouveau pooljob avec comme parametre la bd
try:
cr = pooler.get_db(db_name).cursor()
except:
return False
try:
cr.execute('select * from ir_cron where numbercall<>0 and active and nextcall<=now() order by priority')
for job in cr.dictfetchall():
nextcall = DateTime.strptime(job['nextcall'], '%Y-%m-%d %H:%M:%S')
numbercall = job['numbercall']
ok = False
while nextcall<now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(cr, job['user_id'], job['model'], job['function'], job['args'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql=''
if not numbercall:
addsql = ', active=False'
cr.execute("update ir_cron set nextcall=%s, numbercall=%d"+addsql+" where id=%d", (nextcall.strftime('%Y-%m-%d %H:%M:%S'), numbercall, job['id']))
cr.commit()
finally:
cr.close()
#
# Can be improved to do at the min(min(nextcalls), time()+next_wait)
# But is this an improvement ?
#
if not check:
self.setAlarm(self._poolJobs, int(time.time())+next_wait, [db_name])
# def __init__(self):
# super(ir_cron, self).__init__()
ir_cron()
|
Python
| 0.999802
|
@@ -3689,15 +3689,15 @@
ttr(
+m,
func
-, m
):%0A%09
|
7d23360e8df0659cf506cae3ecd72eeddfb0adf6
|
Fix local deploy (doesn't require it to be a git repo) and do proper fab_shared upload to s3.
|
fabfile.py
|
fabfile.py
|
#!/usr/bin/env python
import os
from fabric.api import env, abort, require, settings, runs_once, prompt, get
from fabric.contrib.console import confirm
from fab_shared import (_find_unit_root, _development, _production, _localhost,
_clone, _make_release, TIME_NOW, _make_archive,
_conditional_upload_to_s3, S3_KEY, local, put, run, sudo,
EC2_CONNECTION, ELB_CONNECTION)
import time
env.unit = "chef"
env.user_data = "deployment/chef.user-data"
env.scm = "git@github.com:bueda/chef"
env.root_dir = _find_unit_root(os.path.abspath(os.path.dirname(__file__)))
env.scratch_path = '/tmp/%s-%s' % (env.unit, TIME_NOW)
def development():
"""
[Env] Sets environment for development server.
"""
_development()
env.tagged = False
env.security_groups = ["development", "ssh", "database-client"]
env.key_name = "development"
env.chef_configs = ["common", "common-web", "dev", "lda", "solr"]
def production():
"""
[Env] Sets environment for production servers behind load balancer.
"""
_production()
env.tagged = False
env.security_groups = ["production", "ssh", "database-client"]
env.key_name = "production"
env.chef_configs = ["common", "common-web", "production"]
def localhost():
"""
[Env] Sets environment for this machine, without using SSH.
"""
_localhost()
env.tagged = False
env.chef_configs = ["common", "common-web", "dev", "lda", "solr"]
def deploy(release=None):
"""
Deploy a specific commit, tag or HEAD to all servers and/or S3.
"""
require('hosts', provided_by = [development, production])
require('unit')
deploy_fabfile()
_clone(release)
_make_release(release)
require('pretty_release')
require('archive')
if test(env.scratch_path):
abort("Unit tests did not pass")
require('pretty_release')
_conditional_upload_to_s3()
if confirm("Re-Chef?", default=True):
rechef(release=env.release)
def deploy_fabfile():
require('hosts', provided_by = [development, production])
print "Deploying shared fabfile..."
put('fab_shared.py', '/tmp', mode=0755)
sudo('mv /tmp/fab_shared.py /root')
# TODO this will never happen, since it's not tagged
_conditional_upload_to_s3('fab_shared.py')
def rechef(release=None):
"""
Run the latest commit of the Chef cookbook on all servers.
"""
require('chef_configs', provided_by=[development, production])
require('tagged')
archive_path = '/tmp/chef-%s.tar.gz' % TIME_NOW
if (not env.tagged and
confirm("Re-chef with production cookbook?", default=True)):
S3_KEY.key = '%(unit)s.tar.gz' % env
S3_KEY.get_contents_to_filename('/tmp/%(unit)s.tar.gz' % env)
else:
if not release:
env.release = prompt("Chef commit or tag?", default='HEAD')
_clone()
_make_archive()
require('scratch_path')
require('archive')
local('mv %s/%s %s' % (env.scratch_path, env.archive, archive_path))
put(archive_path, '/tmp', mode=0777)
run('tar -xzf %s -C /tmp' % archive_path)
_run_chef_solo('base')
for config in env.chef_configs:
_run_chef_solo(config)
run('rm -rf /tmp/%(unit)s' % env)
def _run_chef_solo(config):
env.config = config
with settings(warn_only=True):
result = sudo("""
cd /tmp/%(unit)s;
/var/lib/gems/1.8/bin/chef-solo \
-j /tmp/%(unit)s/config/%(config)s.json \
-c /tmp/%(unit)s/config/solo.rb
""" % env)
if result.failed:
abort("Chef run failed, %s" % result)
@runs_once
def spawn_ec2_instance():
"""
Create a new server instance, different for each environment.
"""
require('ami', provided_by=[production, development])
require('region', provided_by=[production, development])
require('user_data', provided_by=[production, development])
require('security_groups', provided_by=[production, development])
require('key_name', provided_by=[production, development])
print "Launching instance with image %s" % env.ami
image = EC2_CONNECTION.get_image(env.ami)
print "Found AMI image image %s" % image
user_data_file = open(env.user_data, "rb").read()
instance = image.run(security_groups=env.security_groups,
user_data=user_data_file,
key_name=env.key_name).instances[0]
print "%s created" % instance
time.sleep(5)
while instance.update() != 'running':
time.sleep(20)
print "%s is %s" % (instance, instance.state)
print "Public DNS: %s" % instance.dns_name
instance.monitor()
print "Waiting for Chef to finish bootstrapping the instance..."
time.sleep(350)
with settings(hosts=["%s:%d" % (instance.dns_name, env.ssh_port)]):
get('/tmp/CHEF-STATUS', '/tmp/CHEF-STATUS')
status = open("/tmp/CHEF-STATUS", "rb").read()
if status[0] != "0":
abort("Chef exited with non-zero status %s" % status)
print("Chef bootstrapping completed successfully")
with settings(hosts=["%s:%d" % (instance.dns_name, env.ssh_port)]):
rechef()
if (env.key_name == "production"
and confirm("Attach to load balancer? Test before saying yes!",
default=False)):
status = ELB_CONNECTION.register_instances(env.load_balancer,
[instance.id])
print("Status of attaching %s to load balancer %s was %s"
% (instance.id, env.load_balancer, status))
def test(dir=None):
if not dir:
dir = env.root_dir
with settings(root_dir=dir):
return local('rake' % env, capture=False).return_code
|
Python
| 0
|
@@ -315,16 +315,31 @@
d_to_s3,
+ _upload_to_s3,
S3_KEY,
@@ -2209,77 +2209,8 @@
-# TODO this will never happen, since it's not tagged%0A _conditional
_upl
|
efb6072e097a816bb46fdd83541c763e222816c9
|
Add initial tests for the concordance view
|
clic/dickens/test_concordance.py
|
clic/dickens/test_concordance.py
|
Python
| 0
|
@@ -0,0 +1,1257 @@
+import unittest%0Afrom concordance_new import Concordancer_New%0A%0A%0Aclass TestConcordancerNewChapterIndex(unittest.TestCase):%0A %0A def test_create_concordance(self):%0A %22%22%22%0A This is a very naive test to run whilst reviewing the create %0A concordance code. It's goal is simply to evaluate whether that%0A function is still up an running.%0A%0A For that purpose it uses a hard-coded example%0A %22%22%22%0A concordance = Concordancer_New()%0A fog = concordance.create_concordance(terms=%22fog%22, %0A%09%09%09%09%09 idxName=%22chapter-idx%22, %0A%09%09%09%09%09 Materials=%5B%22dickens%22%5D, %0A%09%09%09%09%09 selectWords=%22whole%22)%0A%0A%09assert len(fog) == 95 %09# 94 hits + one variable total_count in the list%0A%0A%0Aclass TestConcordancerNewQuoteIndex(unittest.TestCase):%0A%0A def test_create_concordance(self):%0A %22%22%22%0A This is another naive test focusing on searching in quotes %0A%0A It also uses a hard-coded example%0A %22%22%22%0A concordance = Concordancer_New()%0A maybe = concordance.create_concordance(terms=%22maybe%22, %0A%09%09%09%09%09 idxName=%22quote-idx%22, %0A%09%09%09%09%09 Materials=%5B%22dickens%22%5D, %0A%09%09%09%09%09 selectWords=%22whole%22)%0A%0A%09assert len(maybe) == 46 # 45 hits + one variable total_count in the list%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
915f94433171c9ee73fa5cf5941c695590e6ee16
|
add fabric config file
|
fabfile.py
|
fabfile.py
|
Python
| 0.000001
|
@@ -0,0 +1,1550 @@
+import os, re%0Afrom datetime import datetime%0A%0Afrom fabric.api import *%0A%0A%0A#server user name%0Aenv.user = 'zhengnan'%0A%0A# sudo user%0Aenv.sudo_user = 'root'%0A%0A# server address%0Aenv.hosts = %5B'192.168.56.103'%5D%0A%0Adb_user = 'www-data'%0Adb_password = 'www-data'%0A%0A_TAR_FILE = 'dist-awesome.tar.gz'%0A%0Adef build():%0A includes = %5B'static', 'templates', 'favicon.ico', '*.py'%5D%0A excludes = %5B'test', '.*', '*.pyc', '*.pyo'%5D%0A local('rm -f dist/%25s' %25 _TAR_FILE)%0A %0A with lcd(os.path.join(os.path.abspath('.'), 'www')):%0A%09cmd = %5B'tar', '--dereference', '-czvf', '../dist/%25s' %25 _TAR_FILE%5D%0A%09cmd.extend(%5B'--exclude=%5C'%25s%5C'' %25 ex for ex in excludes%5D)%0A%09cmd.extend(includes)%0A%09local(' '.join(cmd))%0A%0A%0A_REMOTE_TMP_TAR = '/tmp/%25s' %25 _TAR_FILE%0A_REMOTE_BASE_DIR = '/srv/awesome'%0A%0Adef deploy():%0A newdir = 'www-%25s' %25 datetime.now().strftime('%25y-%25m-%25d_%25H.%25M.%25S')%0A # remove exist tar file%0A run('rm -f %25s' %25 _REMOTE_TMP_TAR)%0A # upload new tar file%0A put('dist/%25s' %25 _TAR_FILE, _REMOTE_TMP_TAR)%0A # make new dir%0A with cd(_REMOTE_BASE_DIR):%0A%09sudo('mkdir %25s' %25 newdir)%0A%0A # unzip tar to new directory%0A with cd('%25s/%25s' %25 _REMOTE_TMP_TAR):%0A%09sudo('tar -xzvf %25s' %25 _REMOTE_TMP_TAR)%0A%0A # re-set the soft link%0A with cd(_REMOTE_BASE_DIR):%0A sudo('rm -f www')%0A%09sudo('ln -s %25s www' %25 newdir)%0A%09sudo('chown www-data:www-data www')%0A%09sudo('chown -R www-data:www-data %25s' %25 newdir)%0A%0A # restart python and nginx service%0A with settings(warn_only=True):%0A%09sudo('supervisorctl stop awesome')%0A%09sudo('supervisorctl start awesome')%0A%09sudo('/etc/init.d/nginx reload')%0A %0A
|
|
9f5bc55f7cfc5b6d0ee3ee9d6ad8a5317e1fa62b
|
Move all constants here
|
consts.py
|
consts.py
|
Python
| 0.000003
|
@@ -0,0 +1,144 @@
+## Constants%0A%0A# Lat / long bounding box of City of Portland%0A# (-123.0, 44.0, -122.0, 45.0) ??%0APDX_BOUNDING_BOX = (-122.9, 45.35, -122.4, 45.7)%0A%0A
|
|
7185136cf4322397803eabe805ab4e818197edf8
|
Add wsgi startup script
|
core.wsgi
|
core.wsgi
|
Python
| 0.000001
|
@@ -0,0 +1,235 @@
+import os%0Aimport sys%0A%0Aroot = os.path.dirname(__file__)%0Asys.path.insert(0, root)%0A%0Afrom core import app as application%0Afrom core import index%0Afrom core.handlers.search import *%0Afrom core.handlers.regulation import *%0A%0Aindex.init_schema()%0A
|
|
a76f2bfea735f6b452785185ebc257d1da179ec4
|
Add tests (#5451)
|
core/platform/taskqueue/gae_taskqueue_services_test.py
|
core/platform/taskqueue/gae_taskqueue_services_test.py
|
Python
| 0
|
@@ -0,0 +1,1744 @@
+# coding: utf-8%0A#%0A# Copyright 2018 The Oppia Authors. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS-IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22Tests for the GAE taskqueue API wrapper.%22%22%22%0A%0Aimport json%0Aimport operator%0A%0Afrom core.platform.taskqueue import gae_taskqueue_services as taskqueue_services%0Afrom core.tests import test_utils%0Aimport feconf%0A%0Afrom google.appengine.ext import deferred%0A%0A%0Aclass TaskQueueTests(test_utils.GenericTestBase):%0A %22%22%22Tests for taskqueue-related operations.%22%22%22%0A%0A def test_defer(self):%0A taskqueue_services.defer(%0A operator.add, taskqueue_services.QUEUE_NAME_DEFAULT, 1, 2)%0A%0A tasks = self.taskqueue_stub.get_filtered_tasks()%0A self.assertEqual(len(tasks), 1)%0A%0A result = deferred.run(tasks%5B0%5D.payload)%0A self.assertEqual(result, 3)%0A%0A def test_enqueue_email_task(self):%0A payload = %7B%0A 'param1': 1,%0A 'param2': 2,%0A %7D%0A%0A taskqueue_services.enqueue_email_task(%0A feconf.TASK_URL_FLAG_EXPLORATION_EMAILS, payload, 0)%0A tasks = self.taskqueue_stub.get_filtered_tasks(%0A queue_names=taskqueue_services.QUEUE_NAME_EMAILS)%0A self.assertEqual(len(tasks), 1)%0A self.assertEqual(tasks%5B0%5D.payload, json.dumps(payload))%0A
|
|
e0085ac404e3c811b48e60e0acf088889dc72248
|
Fix synth script
|
tasks/synth.py
|
tasks/synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
# tasks has two product names, and a poorly named artman yaml
v2beta2_library = gapic._generate_code(
'tasks', 'v2beta2', 'python',
config_path='artman_cloudtasks.yaml',
artman_output_name='cloud-tasks-v2beta2')
s.copy(v2beta2_library)
# Set Release Status
release_status = 'Development Status :: 3 - Alpha'
s.replace('setup.py',
'(release_status = )(.*)$',
f"\\1'{release_status}'")
# Add Dependencies
s.replace('setup.py',
'dependencies = \[\n*(^.*,\n)+',
"\\g<0> 'grpc-google-iam-v1<0.12dev,>=0.11.4',\n")
# Correct Naming of package
s.replace('**/*.rst',
'google-cloud-cloud-tasks',
'google-cloud-tasks')
s.replace('**/*.py',
'google-cloud-cloud-tasks',
'google-cloud-tasks')
s.replace('README.rst',
'/cloud-tasks',
'/tasks')
# Correct calls to routing_header
# https://github.com/googleapis/gapic-generator/issues/2016
s.replace(
"google/cloud/*/gapic/*_client.py",
"routing_header\(",
"routing_header.to_grpc_metadata(")
# metadata in tests in none but should be empty list.
# https://github.com/googleapis/gapic-generator/issues/2014
s.replace(
"google/cloud/*/gapic/*_client.py",
'def .*\(([^\)]+)\n.*metadata=None\):\n\s+"""(.*\n)*?\s+"""\n',
'\g<0>'
' if metadata is None:\n'
' metadata = []\n'
' metadata = list(metadata)\n')
# empty objects trying to get attrs
# https://github.com/googleapis/gapic-generator/issues/2015
s.replace(
"google/cloud/*/gapic/*_client.py",
"(^ )(routing_header = google.api_core.gapic_v1.routing_header"
".to_grpc_metadata\(\n)"
"(\s+)(\[\('[a-z\_]*?\.name', )([a-z\_]*?)(.name\)\], \)\n)"
"(\s+metadata.append\(routing_header\)\n)",
"\g<1>if hasattr(\g<5>, 'name'):\n"
"\g<1> \g<2>\g<3> \g<4>\g<5>\g<6> \g<7>"
)
# fix the combined shared/local modules.
# https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5364
# https://github.com/googleapis/gapic-generator/issues/2058
s.replace(
"google/cloud/*/types.py",
"for module in \(\n(.*\n)*?\):\n( .*\n)+",
"""_shared_modules = [
http_pb2,
iam_policy_pb2,
policy_pb2,
any_pb2,
descriptor_pb2,
duration_pb2,
empty_pb2,
field_mask_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
cloudtasks_pb2,
queue_pb2,
target_pb2,
task_pb2,
]
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.tasks_v2beta2.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
""")
|
Python
| 0.000009
|
@@ -901,22 +901,18 @@
pic.
-_generate_code
+py_library
(%0A
@@ -936,18 +936,8 @@
a2',
- 'python',
%0A
@@ -977,54 +977,8 @@
aml'
-,%0A artman_output_name='cloud-tasks-v2beta2'
)%0A%0As
|
f54168d54c36c6d1e7bac5432ac9f72bd0c19617
|
Version bump to 0.8.1
|
aliyun/__init__.py
|
aliyun/__init__.py
|
"""
Aliyun API
==========
The Aliyun API is well-documented at `dev.aliyun.com <http://dev.aliyun.com/thread.php?spm=0.0.0.0.MqTmNj&fid=8>`_.
Each service's API is very similar: There are regions, actions, and each action has many parameters.
It is an OAuth2 API, so you need to have an ID and a secret. You can get these from the Aliyun management console.
Authentication
==============
You will need security credentials for your Aliyun account. You can view and
create them in the `Aliyun management console <http://console.aliyun.com>`_. This
library will look for credentials in the following places:
1. Environment variables `ALI_ACCESS_KEY_ID` and `ALI_SECRET_ACCESS_KEY`
2. An ini-style configuration file at `~/.aliyun.cfg` with contents like:
::
[default]
access_key_id=xxxxxxxxxxxxx
secret_access_key=xxxxxxxxxxxxxxxxxxxxxxx
..
3. A system-wide version of that file at /etc/aliyun.cfg with similar contents.
We recommend using environment variables whenever possible.
Main Interfaces
===============
The main components of python-aliyun are ECS and SLB. Other Aliyun products will
be added as API support develops. Within each Aliyun product, we tried to
implement every API Action variation available. We used a boto-style design
where most API interaction is done with a connection object which marshalls
Python objects and API representations.
*ECS*:
You can create a new ECS connection and interact with ECS like this::
import aliyun.ecs.connection
conn = aliyun.ecs.connection.EcsConnection('cn-hangzhou')
print conn.get_all_instance_ids()
See more at :mod:`aliyun.ecs`
*SLB*:
Similarly for SLB, get the connection object like this::
import aliyun.slb.connection
conn = aliyun.slb.connection.SlbConnection('cn-hangzhou')
print conn.get_all_load_balancer_ids()
See more at :mod:`aliyun.slb`
ali command
===========
The ali commandline tool is mostly used for debugging the Aliyun API interactions.
It accepts arbitrary Key=Value pairs and passes them on to the API after wrapping them.
::
ali --region cn-hangzhou ecs Action=DescribeRegions
ali --region cn-hangzhou slb Action=DescribeLoadBalancers
"""
__version__ = "0.8"
|
Python
| 0
|
@@ -2204,10 +2204,12 @@
_ = %220.8
+.1
%22%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.