commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
959580ea313e4445374e8ee9f32e1a8822dd5beb | add setup script for install | setup.py | setup.py | from setuptools import setup
setup(name='hlm_gibbs',
version='0.0.1',
description='Fit spatial multilevel models and diagnose convergence',
url='https://github.com/ljwolf/hlm_gibbs',
author='Levi John Wolf',
author_email='levi.john.wolf@gmail.com',
license='3-Clause BSD',
packages=['hlm_gibbs'],
install_requires=['numpy','scipy','pysal','pandas','seaborn']
zip_safe=False)
| Python | 0 | |
dca7a5f766b7e2fd5cfc346cbc358faafa1ec9f1 | add setup.py file | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
libname="vgdl"
setup(
name = libname,
version="1.0",
description='A video game description language (VGDL) built on top pf pygame',
author='Tom Schaul',
url='https://github.com/schaul/py-vgdl',
packages= ['vgdl'],
install_requires=['pygame']
)
| Python | 0 | |
1618d8afeca1b667b4439d62b3727528dcba9159 | Add setup.py | setup.py | setup.py | from setuptools import setup
setup(
name='django-filebased-email-backend-ng',
packages=(
'django_filebased_email_backend_ng',
)
)
| Python | 0.000001 | |
95d1f63ce4d9698f8ab4b64757e3669c75accbbd | throw on some more setup.py pypi classifiers | setup.py | setup.py | from distutils.core import setup
setup(
name='django-object-actions',
version='0.0.1',
author="The Texas Tribune",
author_email="cchang@texastribune.org",
maintainer="Chris Chang",
# url
packages=['django_object_actions'],
include_package_data=True, # automatically include things from MANIFEST
license='Apache License, Version 2.0',
description='A Django app for adding object tools to models',
long_description=open('README.md').read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
],
)
| from distutils.core import setup
setup(
name='django-object-actions',
version='0.0.1',
author="The Texas Tribune",
author_email="cchang@texastribune.org",
maintainer="Chris Chang",
# url
packages=['django_object_actions'],
include_package_data=True, # automatically include things from MANIFEST
license='Apache License, Version 2.0',
description='A Django app for adding object tools to models',
long_description=open('README.md').read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
],
)
| Python | 0 |
591b9be8d03cf2ecd12eed1bd36f9d762e91195c | Add setup.py for package installation | setup.py | setup.py | from setuptools import setup
setup(
name='simplio',
version='0.1',
description='Simplest-case command-line input/output',
long_description=(
'Simplio is a Python function decorator that applies an input file '
'object and an output file object as arguments to the decorated '
'function. It determines this based on STDIN or the presence of '
'command-line arguments.'),
url='https://github.com/josephl/simplio',
author='Joseph Lee',
author_email='joe.lee.three.thousand@gmail.com',
license='MIT',
keywords='input output file io',
)
| Python | 0 | |
0abe1e173b73770b5f2ee81f57f21c41466e5c61 | Add setup script | setup.py | setup.py | #!/usr/bin/env python
import os.path
from setuptools import find_packages, setup
setup(
name = 'technic-solder-client',
version = '1.0',
description = 'Python implementation of a Technic Solder client',
author = 'Cadyyan',
url = 'https://github.com/cadyyan/technic-solder-client',
licensee = 'MIT',
packages = find_packages(),
install_requires = [
'tabulate',
],
scripts = [
os.path.join('bin', 'solder'),
],
)
| Python | 0.000001 | |
af49ecf6ce12b2fa909733c17569c7231c343190 | add simple sql shell | shell.py | shell.py | # simple interactive shell for MSSQL server
import pytds
import os
def main():
conn = pytds.connect(dsn=os.getenv("HOST", "localhost"), user=os.getenv("SQLUSER", "sa"), password=os.getenv("SQLPASSWORD"))
while True:
try:
sql = input("sql> ")
except KeyboardInterrupt:
return
with conn.cursor() as cursor:
try:
cursor.execute(sql)
except pytds.ProgrammingError as e:
print("Error: " + str(e))
else:
for _, msg in cursor.messages:
print(msg.text)
if cursor.description:
print('\t'.join(col[0] for col in cursor.description))
print('-' * 80)
count = 0
for row in cursor:
print('\t'.join(str(col) for col in row))
count += 1
print('-' * 80)
print("Returned {} rows".format(count))
print()
main() | Python | 0.000003 | |
93e2d3d72099b854f854abc44a79b2c4edb74af8 | add basic file splitter | split.py | split.py | #!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Echo all output starting with the line after the line that starts with splitStart.
import sys
splitStart = "QQQQQQQQQ"
foundLine = False
for l in sys.stdin:
if foundLine:
print l,
continue
if l.startswith(splitStart):
foundLine = True
| Python | 0 | |
3d44701308fe1c32d8ae2efab609d5e7bcd563c0 | Create ajastin.py | ajastin.py | ajastin.py | def downloader():
#import downloader
#downloader.main()
return 0
def lampotila():
Tnow = 15
#import lampotila
#lampotila.main()
return Tnow
def main():
import time
from datetime import datetime
n = 0
ret1 = 0
t0 = time.time()
try:
while ret1 == 0:
time.sleep(10)
#tähän tulee PID funktio
now = datetime.now()
print("{:d}:{:d}:{:d}".format(now.hour, now.minute, now.second))
if now.minute == 0 and now.hour == 0:
downloader()
while now.minute == 0:
time.sleep(1)
now = datetime.now()
if now.minute % 30 == 0:
lampotila()
while now.minute % 30:
time.sleep(1)
datetime.now()
except KeyboardInterrupt:
return
main()
| Python | 0.000115 | |
c45da8544bd3e4f85073e61cfba417862ce66fc2 | add 'Appeaser' strategy | axelrod/strategies/appeaser.py | axelrod/strategies/appeaser.py | from axelrod import Player
class Appeaser(Player):
"""
A player who tries to guess what the opponent wants, switching his
behaviour every time the opponent plays 'D'.
"""
def strategy(self, opponent):
"""
Start with 'C', switch between 'C' and 'D' when opponent plays 'D'.
"""
if len(self.history) == 0:
self.str = 'C'
if opponent.history[-1] == 'D':
if self.str == 'C':
self.str = 'D'
else:
self.str = 'C'
return self.str
def __repr__(self):
"""
The string method for the strategy:
"""
return 'Appeaser'
| Python | 0.004212 | |
d29a94809f6f58e053a646d796fe9e55a51b334e | Initialize Ch. 1 caesarHacker | books/CrackingCodesWithPython/Chapter01/caesarHacker.py | books/CrackingCodesWithPython/Chapter01/caesarHacker.py | # Caesar Hacker improved
# Rewritten as function for importing
# SPOILERS: Chapter 6 (caesarHacker), Chapter 7 (functions)
import books.CrackingCodesWithPython.Chapter01.config
def hackCaesar(message):
# Loop through every possible key:
for key in range(len(books.CrackingCodesWithPython.Chapter01.config.SYMBOLS)):
# It is important to set translated to the blank string so that the
# previous iteration's value for translated is cleared:
translated = ''
# The rest of the program is almost the same as the Caesar program:
# Loop through each symbol in message:
for symbol in message:
if symbol in books.CrackingCodesWithPython.Chapter01.config.SYMBOLS:
symbolIndex = books.CrackingCodesWithPython.Chapter01.config.SYMBOLS.find(symbol)
translatedIndex = symbolIndex - key
# Handle the wraparound:
if translatedIndex < 0:
translatedIndex += len(books.CrackingCodesWithPython.Chapter01.config.SYMBOLS)
# Append the decrypted symbol:
translated += books.CrackingCodesWithPython.Chapter01.config.SYMBOLS[translatedIndex]
else:
# Append the symbol without encrypting/decrypting:
translated += symbol
# Display every possible decryption:
print('Key #%s: %s' % (key, translated))
return None
| Python | 0.00232 | |
cfdbfd30f41ea4a0dc5fb693e896c6e24ae78e05 | Create pipeline.py | toxicity_ml/toxicBERT/pipeline.py | toxicity_ml/toxicBERT/pipeline.py | # coding=utf-8
# Copyright 2020 Google LLC
import tensorflow_model_analysis as tfma
from tfx.components import (Evaluator, ExampleValidator, ImportExampleGen,
ModelValidator, Pusher, ResolverNode, SchemaGen,
StatisticsGen, Trainer, Transform)
from tfx.proto import example_gen_pb2
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.utils.dsl_utils import external_input
def create_train_pipeline(pipeline_name: Text, pipeline_root: Text):
'''
Args:
pipeline_name: name of the TFX pipeline being created.
pipeline_root: root directory of the pipeline. Should be a valid GCS path
Returns:
A TFX pipeline object.
'''
## Parameters
TRAINING_STEPS = 10000
EVALUATION_STEPS = 1000
## GCS Location
serving_model_dir = "/directory"
## Bring Data Into Pipeline
example_gen = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=45),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=5)
]))
## Computes Statistics for Validation
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples']
)
## Performs Transforms
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath("transform.py")
)
## Trainer Component
trainer = Trainer(
module_file=os.path.abspath("trainer.py"),
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=TRAINING_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=EVALUATION_STEPS)
)
## Resolver Component
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing)
)
## Evaluator
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(label_key='target')
],
metrics_specs=[
tfma.MetricsSpec(
metrics=[
tfma.MetricConfig(class_name='ExampleCount')
],
thresholds = {
'binary_accuracy': tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))
}
)
],
slicing_specs=[tfma.SlicingSpec(),]
)
## Evaluator Componant
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config
)
## Pusher - Export for Model Serving
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator
],
)
def main(unused_argv):
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
# Specify custom docker image to use.
tfx_image=tfx_image)
kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
))
if __name__ == '__main__':
app.run(main)
| Python | 0.000004 | |
03fce72b60eb8cad2368447cf23f72f8084f4a4b | Add py solution for 575. Distribute Candies | py/distribute-candies.py | py/distribute-candies.py | class Solution(object):
def distributeCandies(self, candies):
"""
:type candies: List[int]
:rtype: int
"""
return min(len(candies) / 2, len(set(candies)))
| Python | 0.000002 | |
d34dcf1179e6e5c2b864627266ae1788d10142aa | Add Chuanping Yu's solutions to Problem02 | Week01/Problem02/cyu_02.py | Week01/Problem02/cyu_02.py | #!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 2
FIB = []
F = 1
S = 0
FIB.append(F)
FIB.append(F)
while F <= 4000000:
F = FIB[-1] + FIB[-2]
FIB.append(F)
if F%2 == 0 and F <= 4000000:
S = S + F
print(S)
| Python | 0 | |
40ca566b6cf45c1e62da98536e8ad35516c8326d | update pod versions in repo | scripts/update_pod_versions.py | scripts/update_pod_versions.py | import argparse
import logging
import os
import pprint
import re
import subprocess
import sys
import tempfile
from collections import defaultdict
from pkg_resources import packaging
PODSPEC_REPOSITORY = 'https://github.com/CocoaPods/Specs.git'
PODS = (
'FirebaseCore',
'FirebaseAdMob',
'FirebaseAnalytics',
'FirebaseAuth',
'FirebaseCrashlytics',
'FirebaseDatabase',
'FirebaseDynamicLinks',
'FirebaseFirestore',
'FirebaseFunctions',
'FirebaseInstallations',
'FirebaseInstanceID',
'FirebaseMessaging',
'FirebaseRemoteConfig',
'FirebaseStorage',
)
PODS2 = (
'FirebaseAuth',
)
def scan_pod_versions(local_repo_dir, pods=PODS):
all_versions = defaultdict(list)
logging.info('Scanning podspecs in Specs repo...')
specs_dir = os.path.join(local_repo_dir, 'Specs')
podspec_extension = ".podspec.json"
for dirpath, _, filenames in os.walk(local_repo_dir):
for f in filenames:
if not f.endswith(podspec_extension):
continue
# Example: FirebaseAuth.podspec.json
podname = f.split('.')[0]
if not podname in pods:
continue
logging.debug('Found matching pod {0} in "Specs" at {1}'.format(podname,
dirpath))
version = os.path.basename(dirpath)
all_versions[podname].append(version)
return all_versions
def get_latest_pod_versions(pods=PODS):
logging.info('Cloning podspecs git repo...')
# Create a temporary directory context to clone pod spec repo.
# Automatically cleaned up on completion of context.
with tempfile.TemporaryDirectory(suffix='pods') as local_specs_repo_dir:
# git_clone_cmd = ['git', 'clone', '-q', '--depth', '1',
# PODSPEC_REPOSITORY, local_specs_repo_dir]
# subprocess.run(git_clone_cmd)
local_specs_repo_dir = '/tmp/foo/Specs'
all_versions = scan_pod_versions(local_specs_repo_dir, pods)
latest_versions = {}
for pod in all_versions:
# all_versions map is in the following format:
# { 'PodnameA' : ['1.0.1', '2.0.4'], 'PodnameB': ['3.0.4', '1.0.2'] }
# Convert string version numbers to semantic version objects and get
# latest version.
latest_version = max([packaging.version.parse(v)
for v in all_versions[pod]])
# Replace the list of versions with just the latest version
latest_versions[pod] = latest_version.base_version
print("Latest pod versions retreived from cocoapods specs repo: \n")
pprint.pprint(latest_versions)
print()
return latest_versions
def get_pod_files(dirs_and_files):
pod_files = []
for entry in dirs_and_files:
abspath = os.path.abspath(entry)
if not os.path.exists(abspath):
continue
if os.path.isdir(abspath):
for dirpath, _, filenames in os.walk(abspath):
for f in filenames:
if f == 'Podfile':
pod_files.append(os.path.join(dirpath, f))
elif os.path.isfile(abspath):
pod_files.append(abspath)
return pod_files
def update_pod_files(pod_files, pod_version_map, dryrun=True):
pattern = re.compile("pod '(?P<pod_name>.+)', '(?P<version>.+)'\n")
for pod_file in pod_files:
to_update = False
existing_lines = []
with open(pod_file, "r") as podfile:
existing_lines = podfile.readlines()
if not existing_lines:
continue
if dryrun:
print('Checking if update is required for {0}'.format(pod_file))
else:
logging.debug('Checking if update is required for {0}'.format(pod_file))
for idx, line in enumerate(existing_lines):
match = re.match(pattern, line.lstrip())
if match:
pod_name = match['pod_name']
pod_name_key = pod_name.replace('/', '')
if pod_name_key in pod_version_map:
latest_version = pod_version_map[pod_name_key]
substituted_line = line.replace(match['version'], latest_version)
if substituted_line != line:
if dryrun:
print('Replacing\n{0}with\n{1}'.format(line, substituted_line))
else:
logging.info('Replacing\n{0}with\n{1}'.format(line, substituted_line))
existing_lines[idx] = substituted_line
to_update = True
if not dryrun and to_update:
print('Updating contents of {0}'.format(pod_file))
with open(pod_file, "w") as podfile:
podfile.writelines(existing_lines)
print()
def main():
args = parse_cmdline_args()
if not args.files_or_dirs:
args.files_or_dirs = [os.getcwd()]
#latest_versions_map = get_latest_pod_versions(PODS)
latest_versions_map = {'FirebaseAuth': '8.0.0', 'FirebaseRemoteConfig':'9.9.9'}
pod_files = get_pod_files(args.files_or_dirs)
update_pod_files(pod_files, latest_versions_map, args.dryrun)
def parse_cmdline_args():
parser = argparse.ArgumentParser(description='Update pod files with '
'latest pod versions')
parser.add_argument('--dryrun', action='store_true',
help='Just print the replaced lines, '
'DO NOT overwrite any files')
parser.add_argument('files_or_dirs', nargs='*', metavar='file',
help= 'List of pod files or directories containing podfiles')
parser.add_argument( "--log_level", default="info",
help="Logging level (debug, warning, info)")
args = parser.parse_args()
# Special handling for log level argument
log_levels = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG
}
level = log_levels.get(args.log_level.lower())
if level is None:
raise ValueError('Please use one of the following as'
'log levels:\n{0}'.format(','.join(log_levels.keys())))
logging.basicConfig(level=level)
logger = logging.getLogger(__name__)
return args
if __name__ == '__main__':
main()
# from IPython import embed
# embed()
| Python | 0 | |
5eb9a910096f3e0000499390541a83bc50fb73ce | add binheap | binheap.py | binheap.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class BinHeap(object):
def __init__(self, iterable=None):
self.list = []
if iterable:
for item in iterable:
self.push(item)
def push(self, value):
self.list.append(value)
self._bubble_up(len(self.list) - 1)
def _bubble_up(self, index):
if self.list[index] < self.list[(index - 1) // 2]:
self.list[index], self.list[(index - 1) // 2] = self.list[(index - 1) // 2],
self.list[index]
self._bubble_up((index - 1) // 2)
def pop(self):
return_val = self.list[0]
self.list[0] = self.list.pop()
self._bubble_down(0)
return return_val
def _bubble_down(self, index):
child = None
if self.list[2 * index + 1] > self.list[2 * index + 2]:
child = 2 * index + 2
else:
child = 2 * index + 1
if self.list[index] < self.list[child]:
self.list[index], self.list[child] = self.list[child], self.list[index]
self._bubble_down(child)
| Python | 0.000841 | |
4f0e9a14286f21d835e36e549ebee80419e46cec | test game | blocker.py | blocker.py | #!/usr/bin/env python
class Blocker:
def __init__(self):
print 'Blocker v1.0'
return
def run(self):
return
game = Blocker()
game.run()
| Python | 0.000006 | |
769019be1331fa58e363fba37957ec90ab6f8163 | add code for more precise arbtirage math (WiP) | arbmath.py | arbmath.py | import decimal
from decimal import Decimal
class ExchangeModel(object);
def __init__(self, depths, tradeApi):
self.depths = depths;
self.tradeApi = tradeApi
self.symbols = [key[:3] for key, value in depths] + [key[3:] for key, value in depths]
self.symbols = list(set(self.symbols))
# returns (balance, remaining order)
def ModelL1Trade(balance, pair, type, price, amount):
depth = self.depths[pair]
remainingOrder = { 'pair': pair, 'type': type, 'price': price, 'amount': amount }
remainder = remainingOrder['amount']
traded = False
if type == 'buy':
if(not depth['ask']):
return (balance, remainingOrder, traded)
ask = depth['ask'][0]
if ask['price'] > price:
return (balance, remainingOrder, traded)
tradedAmount = min(amount, ask['amount'])
remainder = max(amount - ask['amount'], 0)
ask['amount'] -= tradedAmount
balance[pair[:3]] += tradedAmount * k
balance[pair[3:]] -= tradedAmount * ask['price']
traded = True
if ask['amount'] == Decimal('0'):
self.depths[pair]['ask'] = self.depths[pair]['ask'][1:]
elif type == 'sell':
if not depth['bid']:
return (balance, remainingOrder, traded)
bid = depth['bid'][0]
if bid['price'] < price:
return (balance, remainingOrder, traded)
tradedAmount = min(amount, bid['amount'])
remainder = max(amount - bid['amount'], 0)
bid['amount'] -= tradedAmount
balance[pair[:3]] -= tradedAmount
balance[pair[3:]] += tradedAmount * bid['price'] * k
traded = True
if bid['amount'] == Decimal('0'):
self.depths[pair]['bid'] = self.depths[pair]['bid'][1:]
remainingOrder['amount'] = remainder
return (balance, remainingOrder, traded)
def ModelTrade(balance, pair, type, price, amount):
if not (pair in depths):
return None
depth = depths[pair]
if type == 'buy':
ask = depth['ask']
def CalculateArb(direction, price1, price2, price3, k):
def CalculateElemArb(direction, books, pair1, pair2, pair3, tradeApi, balance):
# returns (list of orders that produces immediate profit, balance)
def CalculateArb(books, pair1, pair2, pair3, maxArbDepth, tradeApi, balance):
k = | Python | 0 | |
22ee1754a1409fb40bf2bb31cb565bfe914c9c38 | Create comparison charts from two summary.csv files | distribution/scripts/jmeter/create-comparison-charts.py | distribution/scripts/jmeter/create-comparison-charts.py | #!/usr/bin/env python3.6
# Copyright 2017 WSO2 Inc. (http://wso2.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Create comparison charts from two summary.csv files
# ----------------------------------------------------------------------------
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import getopt, sys
def usage():
print(sys.argv[0] + " --summary1 <summary1.csv> --name1 <name1> --summary2 <summary2.csv> --name2 <name2>")
def main():
global summary1_file
global summary2_file
global name1
global name2
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "summary1=", "name1=", "summary2=", "name2="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o == "--summary1":
summary1_file = a
elif o == "--name1":
name1 = a
elif o == "--summary2":
summary2_file = a
elif o == "--name2":
name2 = a
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option"
if __name__ == "__main__":
main()
if summary1_file == '' or summary2_file == '' or name1 == '' or name2 == '':
print("Please provide arguments")
usage()
sys.exit(1)
def add_suffix(string, suffix):
return string + " - " + suffix
print("Comparing " + name1 + " and " + name2)
df1 = pd.read_csv(summary1_file)
df2 = pd.read_csv(summary2_file)
keys=['Message Size (Bytes)', 'Sleep Time (ms)', 'Concurrent Users']
df = df1.merge(df2, on=keys, how='inner', suffixes=[add_suffix('', name1), add_suffix('', name2)])
sns.set_style("darkgrid")
unique_sleep_times=df['Sleep Time (ms)'].unique()
def save_multi_columns_categorical_charts(chart, sleep_time, columns, y, hue, title, kind='point'):
print("Creating " + chart + " charts for " + str(sleep_time) + "ms backend delay")
fig, ax = plt.subplots()
df_results = df.loc[df['Sleep Time (ms)'] == sleep_time]
all_columns=['Message Size (Bytes)','Concurrent Users']
for column in columns:
all_columns.append(add_suffix(column, name1))
all_columns.append(add_suffix(column, name2))
df_results=df_results[all_columns]
df_results = df_results.set_index(['Message Size (Bytes)', 'Concurrent Users']).stack().reset_index().rename(columns={'level_2': hue, 0: y})
g = sns.factorplot(x="Concurrent Users", y=y,
hue=hue, col="Message Size (Bytes)",
data=df_results, kind=kind,
size=5, aspect=1, col_wrap=2 ,legend=False);
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(
tkr.FuncFormatter(lambda y, p: "{:,}".format(y)))
plt.subplots_adjust(top=0.9, left=0.1)
g.fig.suptitle(title)
plt.legend(frameon=True)
plt.savefig("comparison_" + chart + "_" + str(sleep_time) + "ms.png")
plt.clf()
plt.close(fig)
for sleep_time in unique_sleep_times:
save_multi_columns_categorical_charts("thrpt", sleep_time, ['Throughput'],
"Throughput", "API Manager", "Throughput (Requests/sec) vs Concurrent Users for " + str(sleep_time) + "ms backend delay");
save_multi_columns_categorical_charts("avgt", sleep_time, ['Average (ms)'],
"Average Response Time", "API Manager", "Average Response Time (ms) vs Concurrent Users for " + str(sleep_time) + "ms backend delay");
save_multi_columns_categorical_charts("response_time_summary", sleep_time, ['Min (ms)','90th Percentile (ms)','95th Percentile (ms)','99th Percentile (ms)','Max (ms)'],
"Response Time", "API Manager", "Response Time Summary for " + str(sleep_time) + "ms backend delay", kind='bar');
save_multi_columns_categorical_charts("loadavg", sleep_time, ['API Manager Load Average - Last 1 minute','API Manager Load Average - Last 5 minutes','API Manager Load Average - Last 15 minutes'],
"Load Average", "API Manager", "Load Average with " + str(sleep_time) + "ms backend delay");
save_multi_columns_categorical_charts("network", sleep_time, ['Received (KB/sec)', 'Sent (KB/sec)'],
"Network Throughput (KB/sec)", "Network", "Network Throughput with " + str(sleep_time) + "ms backend delay");
save_multi_columns_categorical_charts("gc", sleep_time, ['API Manager GC Throughput (%)'],
"GC Throughput", "API Manager", "GC Throughput with " + str(sleep_time) + "ms backend delay")
print("Done")
| Python | 0.000005 | |
cd5e6a14bb0a67d6558b691f6b55f7918c4d4970 | Create new package (#6384) | var/spack/repos/builtin/packages/r-fnn/package.py | var/spack/repos/builtin/packages/r-fnn/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RFnn(RPackage):
"""Cover-tree and kd-tree fast k-nearest neighbor search algorithms and
related applications including KNN classification, regression and
information measures are implemented."""
homepage = "https://cran.r-project.org/web/packages/FNN/index.html"
url = "https://cran.r-project.org/src/contrib/FNN_1.1.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/FNN"
version('1.1', '8ba8f5b8be271785593e13eae7b8c393')
version('1.0', 'e9a47dc69d1ba55165be0877b8443fe0')
version('0.6-4', '1c105df9763ceb7b13989cdbcb542fcc')
version('0.6-3', 'f0f0184e50f9f30a36ed5cff24d6cff2')
version('0.6-2', '20648ba934ea32b1b00dafb75e1a830c')
depends_on('r@3.4.0:3.4.9')
depends_on('r-mvtnorm', type=('build', 'run'))
depends_on('r-chemometrics', type=('build', 'run'))
| Python | 0 | |
f68175870692d128fb2a01795d20605bb2e17aa9 | Add initial functional tests | functional_tests/test_evexml.py | functional_tests/test_evexml.py | """Functional tests for the xml api part of aniauth project.
This is a temporary app as EVE Online's xml api is deprecated and will be
disabled March 2018.
"""
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import tag
from django.shortcuts import reverse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
MAX_WAIT = 10
@tag('functional')
class SubmissionTest(StaticLiveServerTestCase):
"""Tests for users who are submitting xml api key.
"""
@classmethod
def setUpClass(cls):
super(SubmissionTest, cls).setUpClass()
cls.browser = webdriver.Chrome()
cls.browser.maximize_window()
cls.browser.implicitly_wait(MAX_WAIT)
super(SubmissionTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.browser.refresh()
cls.browser.quit()
super(SubmissionTest, cls).tearDownClass()
def tearDown(self):
self.browser.refresh()
def test_user_can_see_apikey_form(self):
"""A user should be able to see the form for submitting api keys.
"""
# They browse to the eve api keys page.
url = self.live_server_url + reverse('eveapi')
self.browser.get(self.live_server_url)
# They see input boxes for keyID and vCode.
keyid_input = self.browser.find_element_by_name('keyID')
vcode_input = self.browser.find_element_by_name('vCode')
| Python | 0.000001 | |
6a7b32e271a264aad763fbd28749ac1258cf041f | Add dialplan filestring module | wirecurly/dialplan/filestring.py | wirecurly/dialplan/filestring.py | import logging
from wirecurly.exc import *
from wirecurly.dialplan.expression import *
import os
log = logging.getLogger(__name__)
__all__ = ['FileString']
class FileString(object):
'''
Filestring oject to use with playback app in dialplan.
'''
def __init__(self,*argv):
super(FileString, self).__init__()
self.audios = []
self.path = 'usr/share/freeswitch/sounds/en/us/callie/'
for i in argv:
self.addAudio(i)
def addAudio(self,audio):
'''
Add an audio file to FileString object
'''
self.audios.append(audio)
def setPath(self,path):
'''
Set Path for audios
'''
self.path = path
def toString(self):
'''
Return a string to use with playback app
'''
return 'file_string://%s' % '!'.join(['%s%s' % (self.path,a) for a in self.audios])
| Python | 0 | |
6ce84d454ef18f7b7dfc988195bfacb4e69e8c3f | add CRUD test cases for Snippet | hackathon_starter/hackathon/unittests/testsnippets.py | hackathon_starter/hackathon/unittests/testsnippets.py | from hackathon.models import Snippet
from rest_framework import status
from rest_framework.test import APITestCase
class SnippetViewTestCase(APITestCase):
def setUp(self):
self.s1 = Snippet.objects.create(title='t1', code="""print("Hello, World.")""")
self.s2 = Snippet.objects.create(title='t2', code="""print("Goodbye, World.")""")
super(SnippetViewTestCase, self).setUp()
def test_list(self):
response = self.client.get('/hackathon/snippets/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
def test_detail(self):
response = self.client.get('/hackathon/snippets/{}/'.format(self.s1.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], self.s1.id)
def test_create(self):
payload = {'title': 't3', 'code': """print("Create, World.")"""}
response = self.client.post('/hackathon/snippets/', payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['title'], 't3')
self.assertEqual(response.data['code'], """print("Create, World.")""")
def test_update(self):
payload = {'title': 't666', 'code': '2 + 2'}
response = self.client.put('/hackathon/snippets/{}/'.format(self.s1.id), payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['title'], 't666')
self.assertEqual(response.data['code'], '2 + 2')
def test_partial_update(self):
payload = {'title': 't666'}
response = self.client.patch('/hackathon/snippets/{}/'.format(self.s1.id), payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['title'], 't666')
def test_delete(self):
response = self.client.delete('/hackathon/snippets/{}/'.format(self.s1.id))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Snippet.objects.count(), 1)
| Python | 0 | |
f59749db263291f481c4bdc9f6ede2f6de6cb6d4 | Create foundation for input file generation (csv for connectivity table, etc.) | create_input_files.py | create_input_files.py | import csv
import argparse
import itertools
from thermo_utils import csv_row_writer, read_csv_rows
# Read input/output arguments
parser = argparse.ArgumentParser()
parser.add_argument('-o','--output',required=True)
parser.add_argument('-d','--dof',required=True)
# parser.add_argument('-v','--version',required=False)
args = parser.parse_args()
# Write all rows to equations CSV file
csv_row_writer(args.output,outRows)
print('Output file: %s' % args.output)
| Python | 0 | |
9f6df0b93a7a6911d9e7eee0e4fe87e34ea52832 | Create main entrypoint of cli | shub_cli/cli.py | shub_cli/cli.py | """
Scrapinghub CLI
Usage:
shub-cli jobs
shub-cli jobs [-t TAG1,TAG2] [-l LACK1,LACK2] [-s SPIDER] [-e STATE] [-c COUNT]
shub-cli job -id <id>
Options:
-t TAG1,TAG2 Description.
-l LACK1,LACK2 Description.
-s SPIDER Description.
-e STATE Description.
-c COUNT Description.
Examples:
shub-cli jobs
shub-cli jobs -c 100
shub-cli jobs -t fast,production -l consumed,dev -s spider1 state finished
shub-cli jobs tags consumed lacks teste spider my-spider state state count 1000
shub-cli job -id '10/10/1000'
Help:
For help using this tool, please open an issue on the Github repository:
https://github.com/victormartinez/shub_cli
"""
from docopt import docopt
from shub_cli import __version__ as VERSION
from shub.config import load_shub_config
from shub_cli.commands.job import Job
from shub_cli.commands.jobs import Jobs
from shub_cli.util.display import display, display_jobs
config = load_shub_config()
api_keys = config.apikeys
projects = config.projects
# 70953/91/7817
def main():
"""Main CLI entrypoint."""
default_api_key = api_keys['default']
default_project = projects['default']
options = dict(docopt(__doc__, version=VERSION).items())
print('Connection: {}'.format(default_api_key))
print('Project: {}'.format(default_project))
if 'job' in options.keys() and options['job'] == True:
if '-id' in options.keys():
job = Job(options, api_key=default_api_key, project=default_project)
display(job.run())
else:
print('')
print('Wrong command.')
if 'jobs' in options.keys() and options['jobs'] == True:
jobs = Jobs(options, api_key=default_api_key, project=default_project)
display_jobs(jobs.run())
| Python | 0 | |
53c7233d0ecf7e3f807da9112d1c5eecb75c9ae2 | Add a new moderation-style cog | cogs/moderation.py | cogs/moderation.py | from discord.ext import commands
import discord
import datetime
class Moderation:
def __init__(self, liara):
self.liara = liara
@commands.command(pass_context=True, no_pm=True)
async def userinfo(self, ctx, user: discord.Member=None):
if user is None:
user = ctx.message.author
# user-friendly status
if user.status == discord.Status.online:
status = '<:online:212789758110334977>'
elif user.status == discord.Status.idle:
status = '<:away:212789859071426561>'
elif user.status == discord.Status.do_not_disturb:
status = '<:do_not_disturb:236744731088912384>'
else:
status = '<:offline:212790005943369728>'
embed = discord.Embed()
embed.title = '{} {}'.format(status, user)
embed.description = '**Display name**: {0.display_name}\n**ID**: {0.id}\n[Avatar]({0.avatar_url})'.format(user)
join_delta = datetime.datetime.now() - user.joined_at
created_delta = datetime.datetime.now() - user.created_at
embed.add_field(name='Join Dates', value='**This server**: {} ago ({})\n**Discord**: {} ago ({})'
.format(join_delta, user.joined_at, created_delta, user.created_at))
roles = [x.mention for x in user.roles if not x.is_everyone]
if roles: # only show roles if the member has any
if len(str(roles)) < 1025: # deal with limits
embed.add_field(name='Roles', value=', '.join(roles))
embed.set_thumbnail(url=user.avatar_url)
try:
await self.liara.say(embed=embed)
except discord.HTTPException:
await self.liara.say('Unable to post userinfo, please allow the Embed Links permission')
@commands.command(pass_context=True)
async def serverinfo(self, ctx):
server = ctx.message.server
if server.large:
await self.liara.request_offline_members(server)
embed = discord.Embed()
embed.title = str(server)
if server.icon_url is not None:
embed.description = '**ID**: {0.id}\n[Icon URL]({0.icon_url})'.format(server)
embed.set_thumbnail(url=server.icon_url)
else:
embed.description = '**ID**: {0.id}'.format(server)
embed.add_field(name='Members', value=str(len(server.members)))
roles = [x.mention for x in server.role_hierarchy if not x.is_everyone]
if roles: # only show roles if the server has any
if len(str(roles)) < 1025: # deal with limits
embed.add_field(name='Roles', value=', '.join(roles))
channels = [x.mention for x in server.channels if x.type == discord.ChannelType.text]
if len(str(channels)) < 1025:
embed.add_field(name='Text channels', value=', '.join(channels))
if server.verification_level == discord.VerificationLevel.none:
level = 'Off'
elif server.verification_level == discord.VerificationLevel.low:
level = 'Low'
elif server.verification_level == discord.VerificationLevel.medium:
level = 'Medium'
else:
level = '(╯°□°)╯︵ ┻━┻'
embed.add_field(name='Other miscellaneous info', value='**AFK Channel**: {0.afk_channel}\n'
'**AFK Timeout**: {0.afk_timeout} seconds\n'
'**Owner**: {0.owner.mention}\n'
'**Verification level**: {1}'.format(server, level))
embed.timestamp = server.created_at
embed.set_footer(text='Created at')
try:
await self.liara.say(embed=embed)
except discord.HTTPException:
await self.liara.say('Unable to post serverinfo, please allow the Embed Links permission')
def setup(liara):
liara.add_cog(Moderation(liara))
| Python | 0.000057 | |
6bce6ca2ae91b2eebad1d32ed970969ea5e423a2 | String reverse done | Text/reverse.py | Text/reverse.py | # -*- coding: cp1252 -*-
"""
Reverse a String Enter a string and the program
will reverse it and print it out.
"""
string = raw_input("Whatchu wanna say to me? ")
print "You say %s, I say %s" % (string, string[::-1])
| Python | 0.999408 | |
087829b024ea9c5b2028c3f13786578be6dfd702 | fix the bug of loading all cifar data | load_data.py | load_data.py | # encoding: utf-8
"""
@author: ouwj
@position: ouwj-win10
@file: load_data.py
@time: 2017/4/26 14:33
"""
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def load_data(dataset='MNIST'):
if dataset == 'MNIST':
return input_data.read_data_sets('MNIST/')
elif dataset == 'CIFAR':
dirname = 'CIFAR/cifar-10-batches-py/'
# print(unpickle(dirname+'test_batch'))
data = unpickle(dirname+'test_batch')[b'data'] / 255.0
for i in range(1, 6):
data = np.vstack((data, unpickle(dirname+'data_batch_'+str(i))[b'data'] / 255.0))
return data
if __name__ == '__main__':
data = load_data('CIFAR')
print(data[0:5, :]) | # encoding: utf-8
"""
@author: ouwj
@position: ouwj-win10
@file: load_data.py
@time: 2017/4/26 14:33
"""
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def load_data(dataset='MNIST'):
if dataset == 'MNIST':
return input_data.read_data_sets('MNIST/')
elif dataset == 'CIFAR':
dirname = 'CIFAR/cifar-10-batches-py/'
# print(unpickle(dirname+'test_batch'))
data = unpickle(dirname+'test_batch')[b'data'] / 255.0
# for i in range(1, 6):
# data = np.vstack((data, unpickle(dirname+'data_batch_'+str(i))[b'data'] / 255.0))
return data
if __name__ == '__main__':
data = load_data('CIFAR')
print(data[0:5, :]) | Python | 0.000001 |
c55d1a9709f26fee18ac880bf66dfac048d742a7 | change method argument to immutable | pykintone/application.py | pykintone/application.py | import requests
import json
from pykintone.account import kintoneService
import pykintone.result as pykr
class Application(object):
API_ROOT = "https://{0}.cybozu.com/k/v1/{1}"
def __init__(self, account, app_id, api_token="", app_name="", requests_options=()):
self.account = account
self.app_id = app_id
self.api_token = api_token
self.app_name = app_name
self.requests_options = {} if len(requests_options) == 0 else requests_options
def __make_headers(self, body=True):
# create header
header = {}
header["Host"] = "{0}.cybozu.com:443".format(self.account.domain)
if body:
header["Content-Type"] = "application/json"
def encode(user_id, password):
import base64
return base64.b64encode("{0}:{1}".format(user_id, password).encode(kintoneService.ENCODE))
if self.account.basic_id:
auth = encode(self.account.basic_id, self.account.basic_password)
header["Authorization"] = "Basic {0}".format(auth)
if self.api_token:
header["X-Cybozu-API-Token"] = self.api_token
elif self.account.login_id:
auth = encode(self.account.login_id, self.account.login_password)
header["X-Cybozu-Authorization"] = auth
return header
def __single(self):
return self.API_ROOT.format(self.account.domain, "record.json")
def __multiple(self):
return self.API_ROOT.format(self.account.domain, "records.json")
def __is_record_id(self, field_name):
return True if field_name in ["id", "$id"] else False
def __is_revision(self, field_name):
return True if field_name in ["revision", "$revision"] else False
def get(self, record_id):
url = self.__single()
headers = self.__make_headers(body=False)
params = {
"app": self.app_id,
"id": record_id
}
r = requests.get(url, headers=headers, params=params, **self.requests_options)
return pykr.SelectSingleResult(r)
def select(self, query="", fields=()):
url = self.__multiple()
headers = self.__make_headers()
headers["X-HTTP-Method-Override"] = "GET" # use post to get
data = {
"app": self.app_id,
"totalCount": True
}
if query:
data["query"] = query
if len(fields) > 0:
data["fields"] = fields
r = requests.post(url, headers=headers, data=json.dumps(data), **self.requests_options)
return pykr.SelectResult(r)
def __get_model_type(self, instance):
import pykintone.model as pykm
if isinstance(instance, pykm.kintoneModel):
return instance.__class__
else:
return None
def __to_create_format(self, record_or_model):
formatted = {}
record = record_or_model
if self.__get_model_type(record_or_model):
record = record_or_model.to_record()
for k in record:
if self.__is_record_id(k) or self.__is_revision(k):
continue
else:
formatted[k] = record[k]
return formatted
def create(self, record_or_model):
url = self.__single()
headers = self.__make_headers()
_record = self.__to_create_format(record_or_model)
data = {
"app": self.app_id,
"record": _record
}
resp = requests.post(url, headers=headers, data=json.dumps(data), **self.requests_options)
r = pykr.CreateResult(resp)
return r
def batch_create(self, records_or_models):
url = self.__multiple()
headers = self.__make_headers()
_records = [self.__to_create_format(r) for r in records_or_models]
data = {
"app": self.app_id,
"records": _records
}
resp = requests.post(url, headers=headers, data=json.dumps(data), **self.requests_options)
r = pykr.BatchCreateResult(resp)
return r
def __to_update_format(self, record_or_model):
formatted = {"id": -1, "revision": -1, "record": {}}
record = record_or_model
if self.__get_model_type(record_or_model):
record = record_or_model.to_record()
for k in record:
value = record[k]["value"]
if self.__is_record_id(k) and value >= 0:
formatted["id"] = value
elif self.__is_revision(k) and value >= 0:
formatted["revision"] = value
else:
formatted["record"][k] = record[k]
return formatted
def update(self, record_or_model):
url = self.__single()
headers = self.__make_headers()
data = self.__to_update_format(record_or_model)
data["app"] = self.app_id
resp = requests.put(url, headers=headers, data=json.dumps(data), **self.requests_options)
r = pykr.UpdateResult(resp)
return r
def batch_update(self, records_or_models):
url = self.__multiple()
headers = self.__make_headers()
_records = [self.__to_update_format(r) for r in records_or_models]
data = {
"app": self.app_id,
"records": _records
}
resp = requests.put(url, headers=headers, data=json.dumps(data), **self.requests_options)
r = pykr.BatchUpdateResult(resp)
return r
def delete(self, record_ids_or_models, revisions=()):
url = self.__multiple()
headers = self.__make_headers()
data = {
"app": self.app_id,
}
ids = []
revs = []
if isinstance(revisions, (list, tuple)):
if len(revisions) > 0:
revs = [int(r) for r in revisions]
else:
revs = [int(revisions)]
def to_key(id_or_m):
if self.__get_model_type(id_or_m):
return id_or_m.record_id, id_or_m.revision
else:
return int(id_or_m), -1
def append_key(key):
for i, k in enumerate(key):
if k >= 0:
if i == 0:
ids.append(k)
else:
revs.append(k)
if isinstance(record_ids_or_models, (list, tuple)):
for i in record_ids_or_models:
append_key(to_key(i))
else:
append_key(to_key(record_ids_or_models))
if len(revs) > 0:
if len(revs) != len(ids):
raise Exception("when deleting, the size of ids have to be equal to revisions.")
else:
data["ids"] = ids
data["revisions"] = revisions
else:
data["ids"] = ids
resp = requests.delete(url, headers=headers, data=json.dumps(data), **self.requests_options)
r = pykr.Result(resp)
return r
def __str__(self):
info = str(self.account)
info += "\napp:\n"
info += " id={0}, token={1}".format(self.app_id, self.api_token)
return info
| Python | 0.000004 | |
54404541913185a54fea75353d9fffc72ddc2ff6 | Create discovery_diag.py | python/discovery_diag.py | python/discovery_diag.py | import requests
import json
requests.packages.urllib3.disable_warnings()
s = requests.Session()
def netmriLogin( temp, querystring ):
username = "admin"
password = "infioblox"
url = "https://demo-netmri.infoblox.com/api/3.3" + temp
response = s.request("GET", url, params=querystring, verify=False,
auth=(username, password))
t = response.text
return(t);
t = netmriLogin(temp="/device_group_members/index", querystring={"GroupID":"20","select":"DeviceID"})
z = json.loads(t)
for entry in z['device_group_members']:
print(entry['DeviceID'])
filename = str(entry['DeviceID']) + ".txt"
device = {"DeviceID": entry['DeviceID']}
with open(filename, "w") as f:
p = netmriLogin(temp="/devices/diagnostic", querystring=device)
i = json.loads(p)
print(type(i))
print(i)
f.write(i['text'])
| Python | 0 | |
b3c408845a6aba2e5bc15509f7d06800fb9e6c8b | multiples of 3 or 5 | 1-10/1.py | 1-10/1.py | def sum_of_multiples_of_three_or_five(n):
result = sum([x for x in range(1, n) if x % 3 == 0 or x % 5 == 0])
return result
def main():
n = 10**3
print(sum_of_multiples_of_three_or_five(n))
if __name__ == "__main__":
main()
| Python | 0.999834 | |
35f4f5bbea5b291b8204a2ca30acddebfad86d3e | Create 2004-4.py | 2004-4.py | 2004-4.py | times = input()
i = 0
while i < times:
length = input()
ascents = 0
descents = 0
plateaus = 0
maxA = 0
maxD = 0
maxP = 0
sequence = []
j = 0
while j < length:
currentNum = input()
sequence.append(currentNum)
if j != 0:
if currentNum < sequence[j-1]:
#descent
else:
#first time you can do nothing except reset max length to 1 below
maxA += 1
maxD += 1
maxP += 1
j += 1
i += 1
| Python | 0.000009 | |
f228b0d76a5c619e45d40d4d0da12059cb2668e9 | Create warlock.py | hsgame/cards/minions/warlock.py | hsgame/cards/minions/warlock.py | import hsgame.targeting
from hsgame.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hsgame.game_objects import MinionCard, Minion, Card
#from hsgame.cards.battlecries import
__author__ = 'randomflyingtaco'
#let the train wreck begin
| Python | 0.000001 | |
97fcef753647bfbdab0381b30d1533bdce36aeb9 | fix admin | django-pyodbc/contrib/admin/models/models.py | django-pyodbc/contrib/admin/models/models.py | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message)
e.save()
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(User)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_unicode(self.action_time)
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
return mark_safe(u"%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, self.object_id))
| Python | 0 | |
0ace48790374ea75ba2c6cbc51678e3240c22a88 | Create Differ.py | Differ.py | Differ.py |
file1 = raw_input('[file1:] ')
modified = open(file1,"r").readlines()[0]
file2 = raw_input('[file2:] ')
pi = open(file2, "r").readlines()[0] # [:len(modified)]
resultado = "".join( x for x,y in zip(modified, pi) if x != y)
resultado2 = "".join( x for x,y in zip(pi, modified) if x != y)
print "[Differ:]
print '\n-------------------------------------'
print "[file1] -> [file2]", resultado
print '-------------------------------------'
print "[file2] -> [file1]", resultado2
| Python | 0 | |
60de63d2fc53c020649bc21576765366f310cf56 | fix by adding migration | src/polls/migrations/0006_auto_20171114_1128.py | src/polls/migrations/0006_auto_20171114_1128.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-14 10:28
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
import django.core.serializers.json
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0005_poll_tags'),
]
operations = [
migrations.AlterField(
model_name='poll',
name='rules',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder, help_text='Un object JSON décrivant les règles. Actuellement, sont reconnues `options`,`min_options` et `max_options', verbose_name='Les règles du vote'),
),
]
| Python | 0.000001 | |
a5d5dde8c523aa28452d790e7f0291c1cf52aacb | Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest. | tests/external/py2/testfixture_test.py | tests/external/py2/testfixture_test.py | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2013 Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
"""
Unit tests for our dependencies in the pytest package; at the time of this
writing, we were using an unreleased version of pytest that added support for
the unittest setUpModule fixture and friends. Some of our tests rely on
setUpModule. Once, there was a conflict with pytest installation in our build
system, and an older version of pytest was installed that didn't support
setUpModule, which resulted in suble side-effects in some of these tests.
"""
import unittest2 as unittest
g_setUpModuleCalled = False
def setUpModule():
global g_setUpModuleCalled
g_setUpModuleCalled = True
class TestPytest(unittest.TestCase):
def testSetUpModuleCalled(self):
self.assertTrue(g_setUpModuleCalled)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
b0c74bcf7dd4120684a944a7cd8cc005bee039f5 | Create BogoBogo.py | Challenge-175/01-Easy/BogoBogo.py | Challenge-175/01-Easy/BogoBogo.py | import random
def bogosort(n, m):
i = 0
while n != m:
n = ''.join(random.sample(n,len(n)))
i += 1
print(i, 'iterations')
return i
def bogobogosort(n, m):
i = 0 #number of iterations
j = 2 #number of elements
while n[:j] != m:
n = ''.join(random.sample(n,len(n)))
while n[:j] != m[:j]:
n = ''.join(random.sample(n,len(n)))
i += 1
if n[:j] != m[:j]:
j = 2 #Start over
j += 1
print(i, 'iterations')
return i
print("BOGO SORT\n==============================")
for i in range(10):
bogosort("lolhe","hello")
print("\nBOGOBOGO SORT\n==============================")
for i in range(10):
bogobogosort("lolhe","hello")
| Python | 0.000001 | |
84b932df5520901645c6d999abddea1191654a34 | create skeleton of a proper in place quicksort | algorithms/sorting/quicksort_ip.py | algorithms/sorting/quicksort_ip.py | from random import randint
def partition(unsorted, start, end, pivot):
pass
def choose_pivot(start, end):
pass
def quicksort(unsorted, start=0, end=None):
pass
if __name__ == '__main__':
unsorted = [3,345,456,7,879,970,7,4,23,123,45,467,578,78,6,4,324,145,345,3456,567,5768,6589,69,69]
sorted = quicksort(unsorted)
print '%r <-- unsorted' % unsorted
print '%r <-- sorted' % sorted
| Python | 0.00004 | |
60002062970a2f83725355911dde73673c5875a5 | Add a snippet. | python/pyqt/pyqt5/button_clic_event_as_class.py | python/pyqt/pyqt5/button_clic_event_as_class.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.resize(250, 150)
self.setWindowTitle('Hello')
button = QPushButton('Hello', self)
button.clicked.connect(self.on_clic)
self.show()
def on_clic(self):
print("Hello!")
app = QApplication(sys.argv)
window = Window()
exit_code = app.exec_()
sys.exit(exit_code)
| Python | 0.000002 | |
b7fa44bed363b32dfced05fce538502f7684bb6c | Add a first pass at Mercurial changegroup (hg push) integration. | api/integrations/hg/zulip-changegroup.py | api/integrations/hg/zulip-changegroup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Zulip hook for Mercurial changeset pushes.
# Copyright © 2012-2013 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# This hook is called when changesets are pushed to the master repository (ie
# `hg push`). See https://zulip.com/integrations for installation instructions.
import zulip
VERSION = "0.9"
def format_summary_line(web_url, user, base, tip, branch, node):
"""
Format the first line of the message, which contains summary
information about the changeset and links to the changelog if a
web URL has been configured:
Jane Doe <jane@example.com> pushed 1 commit to master (170:e494a5be3393):
"""
revcount = tip - base
plural = "s" if revcount > 1 else ""
if web_url:
shortlog_base_url = web_url.rstrip("/") + "/shortlog/"
summary_url = "{shortlog}{tip}?revcount={revcount}".format(
shortlog=shortlog_base_url, tip=tip - 1, revcount=revcount)
formatted_commit_count = "[{revcount} commit{s}]({url})".format(
revcount=revcount, s=plural, url=summary_url)
else:
formatted_commit_count = "{revcount} commit{s}".format(
revcount=revcount, s=plural)
return u"**{user}** pushed {commits} to **{branch}** (`{tip}:{node}`):\n\n".format(
user=user, commits=formatted_commit_count, branch=branch, tip=tip,
node=node[:12])
def format_commit_lines(web_url, repo, base, tip):
"""
Format the per-commit information for the message, including the one-line
commit summary and a link to the diff if a web URL has been configured:
"""
if web_url:
rev_base_url = web_url.rstrip("/") + "/rev/"
commit_summaries = []
for rev in range(base, tip):
rev_node = repo.changelog.node(rev)
rev_ctx = repo.changectx(rev_node)
one_liner = rev_ctx.description().split("\n")[0]
if web_url:
summary_url = rev_base_url + str(rev_ctx)
summary = "* [{summary}]({url})".format(
summary=one_liner, url=summary_url)
else:
summary = "* {summary}".format(summary=one_liner)
commit_summaries.append(summary)
return "\n".join(summary for summary in commit_summaries)
def send_zulip(email, api_key, stream, subject, content):
"""
Send a message to Zulip using the provided credentials, which should be for
a bot in most cases.
"""
client = zulip.Client(email=email, api_key=api_key,
client="mercurial " + VERSION)
message_data = {
"type": "stream",
"to": stream,
"subject": subject,
"content": content,
}
client.send_message(message_data)
def get_config(ui, item):
try:
# configlist returns everything in lists.
return ui.configlist('zulip', item)[0]
except IndexError:
return None
def hook(ui, repo, **kwargs):
"""
Invoked by configuring a [hook] entry in .hg/hgrc.
"""
hooktype = kwargs["hooktype"]
node = kwargs["node"]
ui.debug("Zulip: received {hooktype} event\n".format(hooktype=hooktype))
if hooktype != "changegroup":
ui.warn("Zulip: {hooktype} not supported\n".format(hooktype=hooktype))
exit(1)
ctx = repo.changectx(node)
branch = ctx.branch()
# If `branches` isn't specified, notify on all branches.
branch_whitelist = get_config(ui, "branches")
branch_blacklist = get_config(ui, "ignore_branches")
if branch_whitelist:
# Only send notifications on branches we are watching.
watched_branches = [b.lower().strip() for b in branch_whitelist.split(",")]
if branch.lower() not in watched_branches:
ui.debug("Zulip: ignoring event for {branch}\n".format(branch=branch))
exit(0)
if branch_blacklist:
# Don't send notifications for branches we've ignored.
ignored_branches = [b.lower().strip() for b in branch_blacklist.split(",")]
if branch.lower() in ignored_branches:
ui.debug("Zulip: ignoring event for {branch}\n".format(branch=branch))
exit(0)
# The first and final commits in the changeset.
base = repo[node].rev()
tip = len(repo)
email = get_config(ui, "email")
api_key = get_config(ui, "api_key")
if not (email and api_key):
ui.warn("Zulip: missing email or api_key configurations\n")
ui.warn("in the [zulip] section of your .hg/hgrc.\n")
exit(1)
stream = get_config(ui, "stream")
# Give a default stream if one isn't provided.
if not stream:
stream = "commits"
web_url = get_config(ui, "web_url")
user = ctx.user()
content = format_summary_line(web_url, user, base, tip, branch, node)
content += format_commit_lines(web_url, repo, base, tip)
subject = branch
ui.debug("Sending to Zulip:\n")
ui.debug(content + "\n")
send_zulip(email, api_key, stream, subject, content)
| Python | 0.000001 | |
6f5843fb04cfa2ed2082b340f282223ec374f9f6 | copy group descriptions to text table | alembic/versions/49ed2a435cf_group_description.py | alembic/versions/49ed2a435cf_group_description.py | revision = '49ed2a435cf'
down_revision = '5927719682b'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy import sql
import jinja2
def random_uuid():
return str(uuid.uuid4())
def upgrade():
text = sql.table('text',
sql.column('id'),
sql.column('ns'),
sql.column('name'),
)
text_version = sql.table( 'text_version',
sql.column('id'),
sql.column('text_id'),
sql.column('time'),
sql.column('content'),
sql.column('more_content'),
)
time = datetime(2014, 9, 22, 11, 50, 0)
conn = op.get_bind()
query = (
"SELECT short_name, description FROM mp_group "
"WHERE year=2012 "
"AND description IS NOT NULL"
)
data = list(conn.execute(query))
for name, description in data:
text_id = random_uuid()
op.execute(text.insert().values({
'id': text_id,
'ns': 'party',
'name': name,
}))
op.execute(text_version.insert().values({
'id': random_uuid(),
'text_id': text_id,
'time': time,
'content': '<p>' + jinja2.escape(description) + '</p>',
'more_content': '',
}))
def downgrade():
op.execute(
"DELETE FROM text_version "
"WHERE text_id IN (SELECT id FROM text WHERE ns = 'party')"
)
op.execute("DELETE FROM text WHERE ns = 'party'")
| Python | 0 | |
f18fd5c4ad61adb56ac7524a006ce9977aa06a31 | Add worker to send queue mails | mailing/management/commands/send_queued_mails_worker.py | mailing/management/commands/send_queued_mails_worker.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS
from django.core.management.base import BaseCommand
from ...utils import send_queued_mails
import time
class Command(BaseCommand):
help = """Send mails with `status` Mail.STATUS_PENDING and having
`scheduled_on` set on a past date. In daemon mode."""
def handle(self, *args, **options):
while True:
send_queued_mails()
time.sleep(15)
| Python | 0 | |
5a7081c5c46a050566477adda19d30844192ceb2 | Add migration to add authtokens for existing users | src/mmw/apps/user/migrations/0002_auth_tokens.py | src/mmw/apps/user/migrations/0002_auth_tokens.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
def add_auth_tokens_to_users(apps, schema_editor):
for user in User.objects.all():
Token.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('authtoken', '0001_initial'),
('user', '0001_initial')
]
operations = [
migrations.RunPython(add_auth_tokens_to_users)
]
| Python | 0 | |
31b309c1f5981a10207e85950ef8139018afd37c | add roles urls | src/python/expedient/clearinghouse/roles/urls.py | src/python/expedient/clearinghouse/roles/urls.py | '''
Created on Jul 29, 2010
@author: jnaous
'''
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns("expedient.clearinghouse.roles.views",
url(r"^confirm/(?P<proj_id>\d+)/(?P<req_id>\d+)/(?P<allow>\d)/(?P<delegate>\d)/$", "confirm_request", name="roles_confirm_request"),
)
| Python | 0.000001 | |
abb72a3a248efd1b244798f91cbca09af01ebb3e | Fix CloneManga modules. | dosagelib/plugins/clonemanga.py | dosagelib/plugins/clonemanga.py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2017 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..helpers import indirectStarter, xpath_class
from ..scraper import _ParserScraper
from ..util import getQueryParams
class CloneManga(_ParserScraper):
baseUrl = 'http://manga.clone-army.org'
imageSearch = '//div[%s]//img' % xpath_class('subsectionContainer')
prevSearch = '//a[span[text()="<<"]]'
latestSearch = '//a[span[text()=">|"]]'
starter = indirectStarter
help = 'Index format: n'
def __init__(self, name, shortName, endOfLife=False):
super(CloneManga, self).__init__('CloneManga/' + name)
self.stripUrl = '%s/viewer.php?page=%%s&lang=&series=%s&HUDoff=' % (
self.baseUrl, shortName)
self.url = self.stripUrl % '1'
self.endOfLife = endOfLife
def namer(self, image_url, page_url):
return '%03d' % int(getQueryParams(page_url)['page'][0])
@classmethod
def getmodules(cls):
return (
cls('ACaptainsWorries', 'captains_worries'),
cls('AHimehornsDailyLife', 'himehorn'),
cls('AprilAndMay', 'anm', endOfLife=True),
cls('DollAndMaker', 'maria_doll', endOfLife=True),
cls('Kanami', 'kanami', endOfLife=True),
cls('MomokaCorner', 'momoka', endOfLife=True),
cls('MyShutInVampirePrincess', 'snax'),
cls('NanasEverydayLife', 'nana', endOfLife=True),
cls('NNN', 'nnn', endOfLife=True),
cls('PaperEleven', 'pxi', endOfLife=True),
cls('PennyTribute', 'penny', endOfLife=True),
cls('Tomoyo42sRoom', 't42r'),
)
| # -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile
from ..scraper import _BasicScraper
from ..util import tagre, getQueryParams
class CloneManga(_BasicScraper):
_linkTag = tagre("a", "href", r'([^"]+)')
prevSearch = compile(_linkTag + tagre("img", "src", r"previous\.gif"))
nextSearch = compile(_linkTag + tagre("img", "src", r"next\.gif"))
latestSearch = compile(_linkTag + tagre("img", "src", r"last\.gif"))
help = 'Index format: n'
def __init__(self, name, shortName, imageFolder=None, lastStrip=None):
super(CloneManga, self).__init__('CloneManga/' + name)
_url = 'http://manga.clone-army.org'
self.url = '%s/%s.php' % (_url, shortName)
if imageFolder is None:
imageFolder = shortName
self.stripUrl = self.url + '?page=%s'
self.imageSearch = compile(tagre("img", "src", r'((?:%s/)?%s/[^"]+)' % (_url, imageFolder), after="center"))
if lastStrip is None:
self.starter = self._starter
else:
self.url = self.stripUrl % lastStrip
def namer(self, image_url, page_url):
return '%03d' % int(getQueryParams(page_url)['page'][0])
def _starter(self):
# first, try hopping to previous and next comic
data = self.getPage(self.url)
try:
url = self.fetchUrl(self.url, data, self.prevSearch)
except ValueError:
# no previous link found, try hopping to last comic
return self.fetchUrl(self.url, data, self.latestSearch)
else:
data = self.getPage(url)
return self.fetchUrl(url, data, self.nextSearch)
@classmethod
def getmodules(cls):
return [
cls('AprilAndMay', 'anm', imageFolder='AAM'),
cls('Kanami', 'kanami'),
cls('MomokaCorner', 'momoka'),
cls('NanasEverydayLife', 'nana', lastStrip='78'),
cls('PaperEleven', 'pxi', imageFolder='papereleven', lastStrip='311'),
cls('Tomoyo42sRoom', 't42r'),
cls('PennyTribute', 'penny'),
]
| Python | 0 |
14e55d45428c617507c5c161f4d33154849f63a5 | Create Endings.py | Edabit/Endings.py | Edabit/Endings.py | #!/usr/bin/env python3
'''
Create a function that adds a string ending to each member in a list.
'''
def add_ending(lst, ending):
return [i + ending for i in lst]
| Python | 0.000002 | |
ab53993b708b3f9cf3b5762664fef58bae99ea20 | Add some code to auto-remove Ltac | recursive_remove_ltac.py | recursive_remove_ltac.py | import re
__all__ = ["recursively_remove_ltac"]
LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', re.MULTILINE)
def recursively_remove_ltac(statements, exclude_n=3):
"""Removes any Ltac statement which is not used later in
statements. Does not remove any code in the last exclude_n
statements."""
rtn = list(reversed(statements))[:exclude_n]
for statement in reversed(statements)[exclude_n:]:
match = LTAC_REG.search(statement)
if match:
ltac_name = match.groups()[0]
# search for the name of the tactic, by itself
reg = re.compile('\b%s\b' % ltac_name, re.MULTILINE)
if any(reg.search(other_statement) for other_statement in rtn):
rtn.append(statement)
else:
rtn.append(statement)
return list(reversed(rtn))
| Python | 0.000001 | |
cd6eebfecab9b93863e7e20acec1ba0481f6b95f | Fix benchmark naming in reporting | tensorflow/python/eager/benchmarks_test_base.py | tensorflow/python/eager/benchmarks_test_base.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmark base to run and report benchmark results."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
from tensorflow.python.eager import test
class MicroBenchmarksBase(test.Benchmark):
"""Run and report benchmark results."""
def run_report(self, run_benchmark, func, num_iters, execution_mode=None):
"""Run and report benchmark results."""
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
extras = {
"examples_per_sec": float("{0:.3f}".format(num_iters / total_time)),
"us_per_example": float("{0:.3f}".format(total_time * 1e6 / num_iters))
}
benchmark_name = self._get_benchmark_name()
self.report_benchmark(
iters=num_iters, wall_time=mean_us, extras=extras, name=benchmark_name)
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmark base to run and report benchmark results."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
from tensorflow.python.eager import test
class MicroBenchmarksBase(test.Benchmark):
"""Run and report benchmark results."""
def run_report(self, run_benchmark, func, num_iters, execution_mode=None):
"""Run and report benchmark results."""
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
extras = {
"examples_per_sec": float("{0:.3f}".format(num_iters / total_time)),
"us_per_example": float("{0:.3f}".format(total_time * 1e6 / num_iters))
}
self.report_benchmark(iters=num_iters, wall_time=mean_us, extras=extras)
| Python | 0.000478 |
dccb0f292c86da942c5e4493a5e117e5f3047a05 | add aiohttp exercise | aiohttp_ext.py | aiohttp_ext.py | import asyncio
from aiohttp import web
async def index(request):
await asyncio.sleep(0.5)
return web.Response(body=b'<h1>Index</h1>',content_type='text/html')
async def hello(request):
await asyncio.sleep(0.5)
text = '<h1>hello, %s</h1>' % request.match_info['name']
return web.Response(body=text.encode('utf-8'), content_type='text/html')
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hello)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 8000)
print('Server started at http://127.0.0.1:8000')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | Python | 0 | |
5788864141c2b635a3c0b8358d868fa7e2b5e789 | Create Pedido_Cadastrar.py | backend/Models/Turma/Pedido_Cadastrar.py | backend/Models/Turma/Pedido_Cadastrar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoCadastrar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoCadastrar, self).__init__(variaveis_do_ambiente)
try:
self.letra = self.corpo['letra']
self.id_disciplina = self.corpo['id_dsciplina']
except:
raise ErroNoHTTP(400)
def getLetra(self):
return self.letra
def getId_disciplina(self):
return self.id_disciplina
| Python | 0 | |
44e3876d76c7d7b3571c82030ff78260e4ec7e65 | Add PCA.py template | ML/PCA.py | ML/PCA.py | """
Exact principal component analysis (PCA)
"""
class PCA(object):
"""
Exact principal component analysis (PCA)
"""
def __init__(self):
return
def fit(self, X):
return | Python | 0 | |
cd2c959674043fcc3b6261129f57f266539a8658 | Add a Python snippet. | Python.py | Python.py | #!/usr/bin/env python
# coding: utf-8
"""Python snippet
"""
import os
import sys
if __name__ == '__main__':
if len (sys.argv) == 1:
print ("Hi there!")
else:
print ("Hello, %s!" % sys.argv[1])
| Python | 0.000043 | |
65449c60f357eeab5ddc9eb91a468ab1e3719de7 | Add dismiss_recommendation example (#35) | examples/v0/recommendations/dismiss_recommendation.py | examples/v0/recommendations/dismiss_recommendation.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example dismisses a given recommendation.
To retrieve recommendations for text ads, run get_text_ad_recommendations.py.
"""
from __future__ import absolute_import
import argparse
import six
import sys
import google.ads.google_ads.client
def main(client, customer_id, recommendation_id):
recommendation_service = client.get_service('RecommendationService')
dismiss_recommendation_request = client.get_type(
'DismissRecommendationRequest')
dismiss_recommendation_operation = (dismiss_recommendation_request.
DismissRecommendationOperation())
dismiss_recommendation_operation.resource_name = (
recommendation_service.recommendation_path(
customer_id, recommendation_id))
try:
dismissal_response = recommendation_service.dismiss_recommendation(
customer_id,
[dismiss_recommendation_operation])
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Dismissed recommendation with resource name: "%s".'
% dismissal_response.results[0].resource_name)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description=('Dismisses a recommendation with the given ID.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=six.text_type,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-r', '--recommendation_id', type=six.text_type,
required=True, help='The recommendation ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.recommendation_id)
| Python | 0 | |
8d6ca433d33551cc1fe5c08edcf68ec65e5447b0 | Add solution to exercise 3.3. | exercises/chapter_03/exercise_03_03/exercies_03_03.py | exercises/chapter_03/exercise_03_03/exercies_03_03.py | # 3-3 Your Own List
transportation = ["mountainbike", "teleportation", "Citroën DS3"]
print("A " + transportation[0] + " is good when exercising in the woods.\n")
print("The ultimate form of trarsportation must be " + transportation[1] + ".\n")
print("Should I buy a " + transportation[2] + "?\n")
| Python | 0.000054 | |
d82ecab372ed22da0b00512294ee6cd3f5fcb012 | Add script to reindex datasets. | ckanofworms/scripts/reindex.py | ckanofworms/scripts/reindex.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-of-Worms -- A logger for errors found in CKAN datasets
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/ckan-of-worms
#
# This file is part of CKAN-of-Worms.
#
# CKAN-of-Worms is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-of-Worms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Reindex objects."""
import argparse
import logging
import os
import sys
import paste.deploy
from ckanofworms import contexts, environment, model
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config', help = "CKAN-of-Worms configuration file")
parser.add_argument('-a', '--all', action = 'store_true', default = False, help = "publish everything")
parser.add_argument('-d', '--dataset', action = 'store_true', default = False, help = "publish datasets")
parser.add_argument('-g', '--group', action = 'store_true', default = False, help = "publish groups")
parser.add_argument('-o', '--organization', action = 'store_true', default = False, help = "publish organizations")
parser.add_argument('-s', '--section', default = 'main',
help = "Name of configuration section in configuration file")
parser.add_argument('-u', '--user', action = 'store_true', default = False, help = "publish accounts")
parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = paste.deploy.appconfig('config:{0}#{1}'.format(os.path.abspath(args.config), args.section))
environment.load_environment(site_conf.global_conf, site_conf.local_conf)
ctx = contexts.null_ctx
if args.all or args.dataset:
for dataset in model.Dataset.find():
dataset.compute_weight()
dataset.compute_timestamp()
if dataset.save(ctx, safe = False):
log.info(u'Updated dataset: {}'.format(dataset.name))
return 0
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
3a5df951bb9d12843d46107d0fbca9bd3d9105b3 | Change the name of GUI.py to main_window.py, and change the __init__ function, so that board object and the size of the window can be passed when a frame object is created. | src/main_window.py | src/main_window.py | import wx
import wx.lib.stattext as ST
import board
class My2048_wx(wx.Frame):
def __init__(self, parent, id, title, size, board_object):
super(My2048_wx, self).__init__(parent, title = title,
size = size)
board_object = board.Board
self.Construct()
def Construct(self):
SIZE = 4;
'''panel_box is the container that contains all the widgets'''
panel_box = wx.BoxSizer(wx.VERTICAL)
'''header is the top parts which holds the name of the game,
current score, and the best score'''
header = wx.BoxSizer(wx.VERTICAL)
'''upper_header contains three parts: game_name(2048), a boxsizer contains the current score information
and another boxsizer contains the best score informaton
All three parts are lined HORIZONTAL'''
upper_header = wx.BoxSizer(wx.HORIZONTAL)
game_name = ST.GenStaticText(self, -1, label = '2048',
size = (100, 30), style = wx.ALIGN_CENTRE)
upper_header.Add(game_name, flag = wx.EXPAND|wx.RIGHT, border = 60)
upper_header_score = wx.BoxSizer(wx.VERTICAL)
score_str = ST.GenStaticText(self, -1, label = 'SCORE', size = (50, 20), style = wx.ALIGN_CENTRE)
score_str.SetBackgroundColour((187, 173, 160))
score = ST.GenStaticText(self, -1, label = '0', size = (50, 20), style = wx.ALIGN_CENTRE)
score.SetForegroundColour('white')
score.SetBackgroundColour((187, 173, 160))
upper_header_score.AddMany([score_str, score])
upper_header.Add(upper_header_score, flag = wx.EXPAND|wx.LEFT|wx.RIGHT, border = 10)
upper_header_best = wx.GridSizer(2, 1)
best_str = ST.GenStaticText(self, -1, label = 'BEST', size = (50, 20), style = wx.ALIGN_CENTRE)
best_str.SetBackgroundColour((187, 173, 160))
best = ST.GenStaticText(self, -1, label = '0', size = (50, 20), style = wx.ALIGN_CENTRE)
best.SetForegroundColour('white')
best.SetBackgroundColour((187, 173, 160))
upper_header_best.AddMany([best_str, best])
upper_header.Add(upper_header_best)
header.Add(upper_header)
'''lower_header contains a sub_title and a button that allows users to start a new game'''
lower_header = wx.BoxSizer(wx.HORIZONTAL)
sub_title = ST.GenStaticText(self, -1, label = 'Join the numbers and get to the 2048 tile!')
lower_header.Add(sub_title, flag = wx.EXPAND|wx.RIGHT, border = 5)
new_game_button = wx.Button(self, -1, label = 'NEW GAME')
lower_header.Add(new_game_button)
header.Add(lower_header)
panel_box.Add(header, flag = wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP|wx.BOTTOM,
border = 10)
'''play_board is a container where all the tiles are put '''
play_board = wx.GridSizer(SIZE, SIZE)
'''Set a list to store the numbers appear in different labels'''
tile_list = []
'''Get tiles information frome board'''
tile_list = board_object.get_tiles()
text_list = []
for i in range(0, SIZE):
for j in range(0, SIZE):
if tile_list[i][j] == None:
text_list.append('_')
else:
text_list.append(str(tile_list[i][j]))
'''This list is used to store the wx labels with information
ST.GenStaticText(self, -1, label = text_list[i])
And put all the numbers from the board into the GUI'''
label_list = []
for i in range(0, SIZE * SIZE):
label_list.append(ST.GenStaticText(self, -1, label = text_list[i],
size = (60, 30), style = wx.ALIGN_CENTRE))
label_list[i].SetBackgroundColour((238, 228, 218))
play_board.Add(label_list[i], flag = wx.EXPAND|wx.RIGHT|wx.TOP, border = 10)
panel_box.Add(play_board, flag = wx.EXPAND|wx.TOP|wx.LEFT,
border = 10)
'''User can use these keys to control the move and merge of the tile numbers'''
ctrl_keys = wx.BoxSizer(wx.VERTICAL)
up_box = wx.BoxSizer()
up_button = wx.Button(self, -1, label = 'UP', size = (60, 30))
up_box.Add(up_button, flag = wx.EXPAND|wx.LEFT, border = 110)
ctrl_keys.Add(up_box)
left_right_box = wx.GridSizer(1, 2)
left_button = wx.Button(self, -1, label = 'LEFT', size = (60, 30))
right_button = wx.Button(self, -1, label = 'RIGHT', size = (60, 30))
left_right_box.Add(left_button, flag = wx.LEFT, border = 80)
left_right_box.Add(right_button, flag = wx.RIGHT)
ctrl_keys.Add(left_right_box)
down_box = wx.BoxSizer()
down_button = wx.Button(self, -1, label = 'DOWN', size = (60, 30))
down_box.Add(down_button, flag = wx.EXPAND|wx.LEFT, border = 110)
ctrl_keys.Add(down_box)
panel_box.Add(ctrl_keys, flag = wx.EXPAND|wx.ALIGN_CENTRE|wx.TOP, border = 10)
self.SetSizer(panel_box)
self.Show(True)
if __name__ == "__main__":
app = wx.App()
board_object = board.Board(2)
frame = My2048_wx(None, -1, '2048', (380, 420), board_object)
app.MainLoop()
| Python | 0 | |
ab99892d974503f2e0573a8937dc8f1b085b0014 | Add stringbuilder module | modules/pipestrconcat.py | modules/pipestrconcat.py | # pipestrconcat.py #aka stringbuilder
#
from pipe2py import util
def pipe_strconcat(context, _INPUT, conf, **kwargs):
"""This source builds a string and yields it forever.
Keyword arguments:
context -- pipeline context
_INPUT -- not used
conf:
part -- parts
Yields (_OUTPUT):
string
"""
s = ""
for part in conf['part']:
if "subkey" in part:
pass #todo get from _INPUT e.g {u'type': u'text', u'subkey': u'severity'}
else:
s += util.get_value(part, kwargs)
while True:
yield s
| Python | 0.000001 | |
e7053da76c14f12bfc02992ab745aac193e7c869 | Create compareLists.py | compareLists.py | compareLists.py | def unique(a):
""" return the list with duplicate elements removed """
return list(set(a))
def intersect(a, b):
""" return the intersection of two lists """
return list(set(a) & set(b))
def union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
if __name__ == "__main__":
a = [0,1,2,0,1,2,3,4,5,6,7,8,9]
b = [5,6,7,8,9,10,11,12,13,14]
print unique(a)
print intersect(a, b)
print union(a, b)
| Python | 0.000001 | |
f347e84d4488d635d6b4a1eaf93855631f42c410 | Add simple ant system based solver | solvers/AntSystem.py | solvers/AntSystem.py | #!/usr/bin/env python
# encoding: utf-8
from random import shuffle, random
from itertools import permutations
from base_solver import BaseSolver
INF = float('inf')
class Ant(object):
route = []
score = INF
def __init__(self, route):
self.route = route
def evaluate(self, task):
start = task.start.name
finish = task.finish.name
route = [start, ] + self.route + [finish, ]
self.score = task.get_path_distance(route)
def update_trail(self, total_distance, arcs, start, finish):
power = self.score / total_distance
# update arcs on route
for i in range(1, len(self.route)):
arc = (self.route[i-1], self.route[i])
arcs[arc] += power
# remember to update begining and end arcs
arcs[(start, self.route[0])] += power
arcs[(self.route[0], finish)] += power
def run(self, arcs, start):
route = [start,]
unused_nodes = set(self.route)
# use shuffled arcs list to prevent privleged arcs
shuffled_arcs = arcs.keys()
shuffle(shuffled_arcs)
while unused_nodes:
power_from_origin = 0.0
tmp_arcs = {}
for arc, power in arcs.iteritems():
if arc[0] == route[-1] and arc[1] in unused_nodes:
tmp_arcs[arc] = power
power_from_origin += power
n = random()
for arc, power in tmp_arcs.items():
if power_from_origin == 0:
break
elif power / power_from_origin > n:
break
route.append(arc[1])
unused_nodes.remove(arc[1])
self.route = route[1:]
class AntSystemSolver(BaseSolver):
deterministic = False
# genetic alghoritm settings
ants_count = 50
vaporize_factor = 0.5
# helpers
best_route = []
best_score = INF
def run_search(self):
# TODO - adjust settings acording to preblems complexity
# genetate some random solutions
self.ants = self.generate_initial_ants(self.task)
# prepare data for pheromone trails
self.prepare_arcs()
# check stop condition (run loop)
self.cycles = 0
while self.continue_():
# evaluate each ants solution
self.evaluate_ants()
# get all the best
self.update_best_solutions()
# update pheromone trail
self.update_pheromone_trails()
self.vaporize()
# release the ants
self.run_ants()
self.cycles += 1
route = ([self.task.start.name] + self.best_route +
[self.task.finish.name])
return route, self.best_score, self.cycles
def generate_initial_ants(self, task):
nodes = [node.name for node in task.mid_nodes]
ants = []
for i in range(self.ants_count):
route = nodes[:]
shuffle(route)
ants.append(Ant(route))
return ants
def prepare_arcs(self):
nodes = self.task.all_nodes.keys()
self.arcs = {x: 0 for x in permutations(nodes, 2)}
def continue_(self):
return self.cycles <= 100
def evaluate_ants(self):
for ant in self.ants:
ant.evaluate(self.task)
def update_pheromone_trails(self):
total_distance = 0
for ant in self.ants:
total_distance += ant.score
start = self.task.start.name
finish = self.task.finish.name
for ant in self.ants:
ant.update_trail(total_distance, self.arcs, start, finish)
def vaporize(self):
for arc, power in self.arcs.iteritems():
if power:
self.arcs[arc] = self.get_vaporized_power(power)
def get_vaporized_power(self, power):
return max(0, power * self.vaporize_factor)
def run_ants(self):
start = self.task.start.name
for ant in self.ants:
ant.run(self.arcs, start)
def update_best_solutions(self):
for ant in self.ants:
if ant.score < self.best_score:
self.best_score = ant.score
self.best_route = ant.route
| Python | 0.000004 | |
0ca69bd8c29d123702e1934863d5d8a8c0d1703b | Create parse.py | parse.py | parse.py | # Parse the Essential Script
def parse(source):
parsedScript = [[]]
word = ''
prevChar = ''
inArgs = False
inList = False
inString = False
inQuote = False
for char in source:
if char == '(' and not inString and not inQuote:
parsedScript.append([])
parsedScript[-1].append('args')
if word:
parsedScript[-1].append(word)
word = ''
elif char in (';', '\n') and not inString and not inQuote:
if word:
parsedScript[-1].append(word)
word = ''
parsedScript.append([])
elif char == '[':
parsedScript.append([])
parsedScript[-1].append('list')
if word:
parsedScript[-1].append(word)
word = ''
elif char in (')', ']') and not inString and not inQuote:
if word:
parsedScript[-1].append(word)
word = ''
temp = parsedScript.pop()
parsedScript[-1].append(temp)
elif char in (' ', '\t') and not inString and not inQuote:
if word:
parsedScript[-1].append(word)
word = ''
elif char == '\"' and not prevChar == '\\':
inString = not inString
elif char == '\'' and not prevChar == '\\':
inQuote = not inQuote
elif char in ('+', '-', '*', '/'):
if word:
parsedScript[-1].append(word)
word = ''
parsedScript[-1].append(char)
else:
word += char
prevChar = char
if word:
parsedScript[-1].append(word)
word = ''
reparsedScript = [[]]
# Parse multi-line code until 'end'
for word in parsedScript:
if word:
if word[0] in ('subroutine', 'if', 'for', 'while'):
reparsedScript.append([])
reparsedScript[-1].append(word)
elif word[0] == 'end':
temp = reparsedScript.pop()
reparsedScript[-1].append(temp)
else:
reparsedScript[-1].append(word)
return reparsedScript[0]
| Python | 0.00002 | |
c4b7bd5b74aaba210a05f946d59c98894b60b21f | Add test for pixel CLI | tests/cli/test_pixel.py | tests/cli/test_pixel.py | """ Test ``yatsm line``
"""
import os
from click.testing import CliRunner
import pytest
from yatsm.cli.main import cli
@pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display")
def test_cli_pixel_pass_1(example_timeseries):
""" Correctly run for one pixel
"""
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'pixel',
'--band', '5',
'--plot', 'TS',
'--style', 'ggplot',
example_timeseries['config'], '1', '1'
])
assert result.exit_code == 0
| Python | 0 | |
a0b71bb07956832fdaf5491ebd177418f0c7363d | add test to use two cameras and calculate distance from target based on FOV and IPD | Laptop/cvtest3.py | Laptop/cvtest3.py | import numpy as np
import cv2
import math
import time
capA = cv2.VideoCapture(1)
capB = cv2.VideoCapture(2)
#print capA.get(cv2.CAP_PROP_FRAME_WIDTH) 640
#print capA.get(cv2.CAP_PROP_FRAME_HEIGHT) 480
#capA.release()
#capB.release()
#time.sleep(100)
def findBiggestContour(contours):
biggestContourIndex = 0
for i in range(len(contours)):
if(cv2.contourArea(contours[i]) > cv2.contourArea(contours[biggestContourIndex])):
biggestContourIndex = i
return biggestContourIndex
def findSecondBiggestContour(contours):
biggestContourIndex = 0
secondBiggest = 0
for i in range(len(contours)):
if(cv2.contourArea(contours[i]) > cv2.contourArea(contours[biggestContourIndex])):
secondBiggest = biggestContourIndex
biggestContourIndex = i
elif(cv2.contourArea(contours[i]) > cv2.contourArea(contours[secondBiggest])):
secondBiggest = i
#//else
#smaller than both current biggest and second biggest
return biggestContourIndex, secondBiggest
def findBigContours(contours):
bigContours = []
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
hull = cv2.convexHull(contours[i])
hull_area = cv2.contourArea(hull)
if(hull_area == 0):
continue
solidity = float(area)/hull_area
if(solidity > .75):#450
bigContours.append(contours[i])
return bigContours
def findBestAR(contours):
bestMatchIndex = 0
bestMatch = 100
idealAR = 0.4
for i in range(len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
if(abs((w/float(h)) - idealAR) < bestMatch):
bestMatchIndex = i
bestMatch = abs((w/float(h)) - idealAR)
return bestMatchIndex
def processCam(cap):
bx = -1
by = -1
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)#HSV
#white led ring: 45,2,240 - 130,40,255
lower_lim = np.array([37,10,180])#80,23,235
upper_lim = np.array([106,63,255])#102,167,255
mask = cv2.inRange(hsv, lower_lim, upper_lim)
img, contours, heirarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.inRange(hsv, lower_lim, upper_lim)
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
contours = findBigContours(contours)
# find big contours
#biggestContourIndex = findBiggestContour(contours)
biggestContourIndex, secondBiggestIndex = findSecondBiggestContour(contours)
#bigContours = findBigContours(contours)
#biggestContourIndex = findBestAR(bigContours)
if(len(contours) != 0):
# find box around contour and it's center
recta = cv2.minAreaRect(contours[biggestContourIndex])
rectb = cv2.minAreaRect(contours[secondBiggestIndex])
boxa = cv2.boxPoints(recta)
boxb = cv2.boxPoints(rectb)
rect = cv2.minAreaRect(np.concatenate([boxa,boxb]))
box = cv2.boxPoints(rect)
bx = int((box[0][0] + box[2][0])/2)
by = int((box[0][1] + box[2][1])/2)
#x,y,w,h = cv2.boundingRect(contours[biggestContourIndex])
#if(h != 0):
# print("aspect ratio: " + str(h/float(w)))
#print("center: " + str(bx) + ', ' + str(by))
box = np.int0(box)
img = cv2.drawContours(img,[box],0,(0,0,255),1)
img = cv2.circle(img,(bx,by),4,(0,255,255),-1)
# find centroid from moments
#M = cv2.moments(contours[biggestContourIndex])
#if(M['m00'] != 0):
# cx = int(M['m10']/M['m00'])
# cy = int(M['m01']/M['m00'])
# img = cv2.circle(img,(cx,cy),4,(255,255,0),-1)
#img = cv2.drawContours(img, contours, biggestContourIndex, (255,255,0), 3)
#img = cv2.drawContours(img, contours, secondBiggestIndex, (255,0,0), 3)
for i in range(len(contours)):
col = cv2.contourArea(contours[i]) / 20
img = cv2.drawContours(img, contours, i, (0,255-col,col), 1)
return img, bx, by
#print(str(len(contours)) + " " + str(secondBiggestIndex) + " " + str(biggestContourIndex))
def getBinocularDistance(xa, xb):
#xb should be the left camera
#half of the ipd (inches)
ipd = 2.1875
#fov angle from perpendicular
fov = 60.0 #54
#assume max is 640
xRes = 640.0
normA = xa/xRes
angleA = (normA * 2 * (90-fov)) + fov
slopeA = math.tan((math.pi/180.0)*angleA)
slopeA = math.tan( (math.pi/180.0) * (( normA*2*(90.0-fov)+fov )))
#print "slopeA " + str(slopeA)
normB = xb/xRes
slopeB = math.tan( (math.pi/180.0) * (( normB*2*(90.0-fov)+fov )))
#print "slopeB " + str(slopeB)
distance = (2*ipd*slopeA*slopeB)/(slopeA-slopeB)
return distance
while(True):
imgA, ax, ay = processCam(capA)
imgB, bx, by = processCam(capB)
#print str(ax) + " " + str(bx)
if(ax > 0 and bx > 0 and ax != bx):
#dist = 1
print getBinocularDistance(ax,bx)/12
# Display the resulting frames
cv2.imshow('frameA',imgA)
cv2.imshow('frameB',imgB)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
capA.release()
capB.release()
cv2.destroyAllWindows()
| Python | 0 | |
26dd65a282ada1e79309c4ff35cee4e49b086b66 | Create part3.py | part3.py | part3.py | import pygame
pygame.init()
display_width = 800
display_height = 600
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
carImg = pygame.image.load('racecar.png')
def car(x,y):
gameDisplay.blit(carImg,(x,y))
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
crashed = False
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
if event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
car(x,y)
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
| Python | 0.000002 | |
98663d644b90e0e4c6188555501bcbc2b42d391a | Create part4.py | part4.py | part4.py | import pygame
pygame.init()
display_width = 800
display_height = 600
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
car_width = 73
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
carImg = pygame.image.load('racecar.png')
def car(x,y):
gameDisplay.blit(carImg,(x,y))
def game_loop():
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
if event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
car(x,y)
if x > display_width - car_width or x < 0:
gameExit = True
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit()
| Python | 0.000001 | |
4727d86e5207dac3f53018b4ff2d1d0ade97d4e6 | Add http_json external pillar (#32741) | salt/pillar/http_json.py | salt/pillar/http_json.py | # -*- coding: utf-8 -*-
"""
A module that adds data to the Pillar structure retrieved by an http request
Configuring the HTTP_JSON ext_pillar
====================================
Set the following Salt config to setup Foreman as external pillar source:
.. code-block:: json
ext_pillar:
- http_json:
url: http://example.com/api/minion_id
::TODO::
username: username
password: password
Module Documentation
====================
"""
from __future__ import absolute_import
# Import python libs
import logging
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
url=None):
"""
Read pillar data from HTTP response.
:param url String to make request
:returns dict with pillar data to add
:returns empty if error
"""
# Set up logging
log = logging.getLogger(__name__)
data = __salt__['http.query'](url=url, decode=True, decode_type='json')
if 'dict' in data:
return data['dict']
log.error('Error caught on query to' + url + '\nMore Info:\n')
for k, v in data.iteritems():
log.error(k + ' : ' + v)
return {}
| Python | 0 | |
d2e5c2d20cf7e07f2dc8288d303e8f4088d5877a | Update module! | Modules/Update.py | Modules/Update.py | from ModuleInterface import ModuleInterface
from IRCResponse import IRCResponse, ResponseType
import GlobalVars
import re
import subprocess
class Module(ModuleInterface):
triggers = ["update"]
help = "update - pulls the latest code from GitHub"
def onTrigger(self, Hubbot, message):
if message.User.Name not in GlobalVars.admins:
return IRCResponse(ResponseType.Say, "Only my admins can update me!", message.ReplyTo)
subprocess.call(["git", "fetch"])
output = subprocess.check_output(["git", "whatchanged", "..origin/master"])
changes = re.findall('\n\n\s{4}(.+?)\n\n', output)
if len(changes) == 0:
return IRCResponse(ResponseType.Say, "The bot is already up to date.", message.ReplyTo)
changes = list(reversed(changes))
response = "New Commits: {}".format(" | ".join(changes))
subprocess.call(["git", "pull"])
return IRCResponse(ResponseType.Say, response, message.ReplyTo) | Python | 0 | |
555cfbb827532c54598cecde01ef4e6e5e07714d | Create a test for re-evaluating external tasks while a workflow is running. | test/worker_external_task_test.py | test/worker_external_task_test.py | # Copyright (c) 2015
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import luigi
import unittest
from mock import Mock, patch
from helpers import with_config
mock_external_task = Mock(spec=luigi.ExternalTask)
mock_external_task.complete.side_effect = [False, False, True]
class TestTask(luigi.Task):
"""
Requires a single file dependency
"""
def __init__(self):
super(TestTask, self).__init__()
self.has_run = False
def requires(self):
return mock_external_task
def output(self):
mock_target = Mock(spec=luigi.Target)
# the return is False so that this task will be scheduled
mock_target.exists.return_value = False
def run(self):
self.has_run = True
class WorkerExternalTaskTest(unittest.TestCase):
@with_config({'core': {'retry-external-tasks': 'true'}})
def test_external_dependency_satisified_later(self):
"""
Test that an external dependency that is not `complete` when luigi is invoked, but \
becomes `complete` while the workflow is executing is re-evaluated.
"""
assert luigi.configuration.get_config().getboolean('core',
'retry-external-tasks',
False) == True
test_task = TestTask()
luigi.build([test_task], local_scheduler=True)
assert test_task.has_run == True
assert mock_external_task.complete.call_count == 3
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
6232295511ee780a6438c9cdfdcf576cb4f3d8e8 | Add python script to convert .OBJ data for import | obj2json.py | obj2json.py | # obj2json: Convert OBJ export from polyHedronisme to Slitherlink3D JSON data
import sys, json
faces = []
vertices = []
name = "unknown"
num_edges = 0
class ParseError(SyntaxError):
"""Raised when there's trouble parsing the input."""
pass
def process(line):
# print("Processing", line)
if line.startswith("#") or len(line) == 0:
# ignore comments and blank lines
pass
elif line.startswith("g") or line.startswith("o"):
# "o" for object
# "g" for polygon group
input_group(line)
# Distinguish "v" from "vt", "vn", "vp"
elif line.startswith("v "):
input_vertex(line)
elif line.startswith("f"):
input_face(line)
else:
# We could raise warnings here. But it's probably not worth it.
pass
def input_group(line):
global name
s = line.split()
if len(s) > 1:
name = s[1]
def input_vertex(line):
global vertices # Not strictly necessary, as currently implemented.
s = line.split()
if len(s) < 4:
raise ParseError("Malformed vertex line: '%s'" % line)
else:
vertex = [float(coord) for coord in s[1:]]
# print("Appending vertex ", vertex)
vertices.append(vertex)
def input_face(line):
global faces, num_edges
# 1. Split into vertex "clusters" delimited by whitespace
# 2. Split clusters delimited by "/" and take only the first.
# 3. Convert to integer and subtract 1, because indices are 1-based.
vx_indices = [int(index_group.split('/')[0]) - 1
for index_group in line.split()[1:]]
if len(vx_indices) < 3:
raise ParseError("Invalid face line (not enough vertices): " + line)
# print("Appending face ", vx_indices)
faces.append(vx_indices)
num_edges += len(vx_indices) / 2.0 # Because each edge belongs to 2 faces.
# TODO maybe: Catch cases where a vertex index is out of bounds.
def output():
# Could use indent=2 here but it's not what I want.
print(json.dumps({
"id": name,
"name": name,
"nCells": len(faces), # "cell" == "face"
"nEdges": int(num_edges),
"nVertices": len(vertices),
# TODO: filter vertices and faces
"vertices": vertices,
"faces": faces,
"puzzles": []
}))
def main():
try:
with open(sys.argv[1], "r") as f:
for line in f:
process(line.rstrip())
if num_edges + 2 != len(faces) + len(vertices):
raise ParseError("F + V != E + 2: %d + %d != %0.1f + 2" %
(len(faces), len(vertices), num_edges))
output()
except ParseError as e:
print("Parse error: %s" % e.args)
sys.exit(1)
except IOError as e:
print("Couldn't read file: %s" % e)
sys.exit(1)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
7edcf7e1aa4824dc18584b88f21b2dc4ff9cab98 | Use the requests.session() helper to get a Session() object. | rightscale/httpclient.py | rightscale/httpclient.py | from functools import partial
import requests
DEFAULT_ROOT_RES_PATH = '/'
class HTTPResponse(object):
"""
Wrapper around :class:`requests.Response`.
Parses ``Content-Type`` header and makes it available as a list of fields
in the :attr:`content_type` member.
"""
def __init__(self, raw_response):
self.raw_response = raw_response
content_type = raw_response.headers.get('content-type', '')
ct_fields = [f.strip() for f in content_type.split(';')]
self.content_type = ct_fields
def __getattr__(self, name):
return getattr(self.raw_response, name)
class HTTPClient(object):
"""
Convenience wrapper around Requests.
:param str endpoint: URL for the API endpoint. E.g. ``https://blah.org``.
:param dict extra_headers: When specified, these key-value pairs are added
to the default HTTP headers passed in with each request.
"""
def __init__(
self,
endpoint='',
extra_headers=None,
):
self.endpoint = endpoint
s = requests.session()
s.headers['Accept'] = 'application/json'
if extra_headers:
s.headers.update(extra_headers)
self.s = s
# convenience methods
self.delete = partial(self.request, 'delete')
self.get = partial(self.request, 'get')
self.head = partial(self.request, 'head')
self.post = partial(self.request, 'post')
self.put = partial(self.request, 'put')
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs):
"""
Performs HTTP request.
:param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...)
:param str path: A path component of the target URL. This will be
appended to the value of ``self.endpoint``. If both :attr:`path`
and :attr:`url` are specified, the value in :attr:`url` is used and
the :attr:`path` is ignored.
:param str url: The target URL (e.g. ``http://server.tld/somepath/``).
If both :attr:`path` and :attr:`url` are specified, the value in
:attr:`url` is used and the :attr:`path` is ignored.
:param ignore_codes: List of HTTP error codes (e.g. 404, 500) that
should be ignored. If an HTTP error occurs and it is *not* in
:attr:`ignore_codes`, then an exception is raised.
:type ignore_codes: list of int
:param kwargs: Any other kwargs to pass to :meth:`requests.request()`.
Returns a :class:`requests.Response` object.
"""
_url = url if url else (self.endpoint + path)
r = self.s.request(method, _url, **kwargs)
if not r.ok and r.status_code not in ignore_codes:
r.raise_for_status()
return HTTPResponse(r)
| from functools import partial
import requests
DEFAULT_ROOT_RES_PATH = '/'
class HTTPResponse(object):
"""
Wrapper around :class:`requests.Response`.
Parses ``Content-Type`` header and makes it available as a list of fields
in the :attr:`content_type` member.
"""
def __init__(self, raw_response):
self.raw_response = raw_response
content_type = raw_response.headers.get('content-type', '')
ct_fields = [f.strip() for f in content_type.split(';')]
self.content_type = ct_fields
def __getattr__(self, name):
return getattr(self.raw_response, name)
class HTTPClient(object):
"""
Convenience wrapper around Requests.
:param str endpoint: URL for the API endpoint. E.g. ``https://blah.org``.
:param dict extra_headers: When specified, these key-value pairs are added
to the default HTTP headers passed in with each request.
"""
def __init__(
self,
endpoint='',
extra_headers=None,
):
self.endpoint = endpoint
s = requests.Session()
s.headers['Accept'] = 'application/json'
if extra_headers:
s.headers.update(extra_headers)
self.s = s
# convenience methods
self.delete = partial(self.request, 'delete')
self.get = partial(self.request, 'get')
self.head = partial(self.request, 'head')
self.post = partial(self.request, 'post')
self.put = partial(self.request, 'put')
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs):
"""
Performs HTTP request.
:param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...)
:param str path: A path component of the target URL. This will be
appended to the value of ``self.endpoint``. If both :attr:`path`
and :attr:`url` are specified, the value in :attr:`url` is used and
the :attr:`path` is ignored.
:param str url: The target URL (e.g. ``http://server.tld/somepath/``).
If both :attr:`path` and :attr:`url` are specified, the value in
:attr:`url` is used and the :attr:`path` is ignored.
:param ignore_codes: List of HTTP error codes (e.g. 404, 500) that
should be ignored. If an HTTP error occurs and it is *not* in
:attr:`ignore_codes`, then an exception is raised.
:type ignore_codes: list of int
:param kwargs: Any other kwargs to pass to :meth:`requests.request()`.
Returns a :class:`requests.Response` object.
"""
_url = url if url else (self.endpoint + path)
r = self.s.request(method, _url, **kwargs)
if not r.ok and r.status_code not in ignore_codes:
r.raise_for_status()
return HTTPResponse(r)
| Python | 0 |
d10505678fd5624e5e88f72ac7852109f149b264 | Add new kcov package (#14574) | var/spack/repos/builtin/packages/kcov/package.py | var/spack/repos/builtin/packages/kcov/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Kcov(CMakePackage):
"""Code coverage tool for compiled programs, Python and Bash which uses
debugging information to collect and report data without special
compilation options"""
homepage = "http://simonkagstrom.github.io/kcov/index.html"
url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz"
version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')
depends_on('cmake@2.8.4:', type='build')
depends_on('zlib')
depends_on('curl')
def cmake_args(self):
# Necessary at least on macOS, fixes linking error to LLDB
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb
return ['-DSPECIFY_RPATH=ON']
@run_after('install')
@on_package_attributes(run_tests=True)
def test(self):
# The help message exits with an exit code of 1
kcov = Executable(self.prefix.bin.kcov)
kcov('-h', ignore_errors=1)
| Python | 0 | |
303ff96b42b7eb41bb56f8a7f1d03a5319b6ba64 | Create configurator.py | configurator.py | configurator.py | #!/usr/bin/python3
import os
import sys
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, parse_qs
BASEDIR = "."
LISTENIP = "0.0.0.0"
LISTENPORT = 3218
INDEX = """<!DOCTYPE html>
<html>
<head>
<title>HASS-PoC-Configurator</title>
<meta charset="UTF-8">
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.12.1/jquery.min.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jstree/3.2.1/themes/default/style.min.css" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/jstree/3.2.1/jstree.min.js"></script>
<style type="text/css" media="screen">
body {
font-family: sans-serif;
}
#menu {
position:relative;
width: 19%;
float: left;
}
#buttons {
position: absolute;
top: 0;
right: 0;
}
#editor {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 20%;
}
</style>
</head>
<body>
<div id="menu">
<div id="buttons">
<button id="savebutton" type="button" onclick="save()">Save</button><br />
</div>
<div id="tree"></div>
</div>
<div id="editor"></div>
</body>
<script>
$('#tree').jstree(
{
'core' : {
'data' : {
'url' : '/api/files'
}
}
});
$('#tree').on("select_node.jstree", function (e, data) { load(); });
function load() {
var n = $("#tree").jstree("get_selected");
if (n) {
$.get("api/file?filename=" + n[0], function( data ) {
editor.setValue(data);
//editor.getValue(); // or session.getValue
});
}
}
function save() {
var n = $("#tree").jstree("get_selected");
if (n) {
data = new Object();
data.filename = n[0];
data.text = editor.getValue()
$.post("api/save", data).done(
function( resp ) {
alert( resp );
}
);
}
}
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/ace/1.2.6/ace.js" type="text/javascript" charset="utf-8"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/ace/1.2.6/ext-modelist.js" type="text/javascript" charset="utf-8"></script>
<script>
var editor = ace.edit("editor");
editor.getSession().setMode("ace/mode/yaml");
editor.setOption("showInvisibles", true);
editor.setOption("useSoftTabs", true);
editor.$blockScrolling = Infinity;
</script>
</html>
"""
class Node:
def __init__(self, id, text, parent):
self.id = id
self.text = text
self.parent = parent
self.icon = 'jstree-folder'
self.state = {'opened': self.id == '.'}
if os.path.isfile(os.path.join(parent, text)):
self.icon = "jstree-file"
def is_equal(self, node):
return self.id == node.id
def as_json(self):
return dict(
id=self.id,
parent=self.parent,
text=self.text,
icon=self.icon,
state=self.state
)
def get_nodes_from_path(path):
nodes = []
path_nodes = path.split(os.sep)
for idx, node_name in enumerate(path_nodes):
parent = None
node_id = os.sep.join(path_nodes[0:idx+1])
if idx != 0:
parent = os.sep.join(path_nodes[0:idx])
if os.path.isfile(os.path.join(parent, node_name)) and (not node_name.endswith('.yaml') and not node_name.endswith('.conf')):
continue
else:
parent = "#"
nodes.append(Node(node_id, node_name, parent))
return nodes
def getdirs(searchpath):
unique_nodes = []
for root, dirs, files in os.walk(searchpath, topdown=True):
if './deps' not in root and './.git' not in root and './www' not in root:
for name in files:
path = os.path.join(root, name)
nodes = get_nodes_from_path(path)
for node in nodes:
if not any(node.is_equal(unode) for unode in unique_nodes):
unique_nodes.append(node)
return json.dumps([node.as_json() for node in unique_nodes])
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
req = urlparse(self.path)
query = parse_qs(req.query)
self.send_response(200)
if req.path == '/api/files':
self.send_header('Content-type','text/json')
self.end_headers()
self.wfile.write(bytes(getdirs(BASEDIR), "utf8"))
return
elif req.path == '/api/file':
content = ""
self.send_header('Content-type','text/text')
self.end_headers()
filename = query.get('filename', None)
if filename:
if os.path.isfile(os.path.join(BASEDIR, filename[0])):
with open(os.path.join(BASEDIR, filename[0])) as fptr:
content += fptr.read()
self.wfile.write(bytes(content, "utf8"))
return
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes(INDEX, "utf8"))
return
def do_POST(self):
postvars = {}
response = "Failure"
try:
length = int(self.headers['content-length'])
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
print(err)
response = "%s" % (str(err))
if 'filename' in postvars.keys() and 'text' in postvars.keys():
if postvars['filename'] and postvars['text']:
try:
filename = postvars['filename'][0]
with open(os.path.join(BASEDIR, filename), 'wb') as fptr:
fptr.write(bytes(postvars['text'][0], "utf-8"))
self.send_response(200)
self.end_headers()
self.wfile.write(bytes("File saved successfully", "utf8"))
return
except Exception as err:
response = "%s" % (str(err))
print(err)
else:
#print(postvars)
response = "Missing filename or text"
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(response, "utf8"))
return
def run():
print('starting server...')
server_address = (LISTENIP, LISTENPORT)
httpd = HTTPServer(server_address, RequestHandler)
print('running server...')
httpd.serve_forever()
run()
| Python | 0.000001 | |
a6e65ac7378b12cc6889199cac602a8fbee4b6e8 | add nagios check on autoplot metrics | nagios/check_autoplot.py | nagios/check_autoplot.py | """Check autoplot stats"""
from __future__ import print_function
import sys
import psycopg2
def main():
"""Go Main Go"""
pgconn = psycopg2.connect(database='mesosite', host='iemdb',
user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
select count(*), avg(timing) from autoplot_timing
where valid > now() - '4 hours'::interval
""")
(count, speed) = cursor.fetchone()
speed = 0 if speed is None else speed
print(("Autoplot cnt:%s speed:%.2f | COUNT=%s;; SPEED=%.3f;;"
) % (count, speed, count, speed))
sys.exit(0)
if __name__ == '__main__':
main()
| Python | 0 | |
81b713d69408f6b5712f67d7707bbb17f9588ef6 | Update __init__.py | tendrl/node_agent/manager/__init__.py | tendrl/node_agent/manager/__init__.py | import signal
import threading
from tendrl.commons.event import Event
from tendrl.commons import manager as commons_manager
from tendrl.commons.message import Message
from tendrl.commons import TendrlNS
from tendrl.node_agent.provisioner.gluster.manager import \
ProvisioningManager as GlusterProvisioningManager
from tendrl import node_agent
from tendrl.node_agent.message.handler import MessageHandler
from tendrl.node_agent import node_sync
from tendrl.integrations.gluster import GlusterIntegrationNS
class NodeAgentManager(commons_manager.Manager):
def __init__(self):
# Initialize the state sync thread which gets the underlying
# node details and pushes the same to etcd
super(NodeAgentManager, self).__init__(
NS.state_sync_thread,
message_handler_thread=NS.message_handler_thread
)
node_sync.platform_detect.sync()
node_sync.sds_detect.sync()
def main():
# NS.node_agent contains the config object,
# hence initialize it before any other NS
node_agent.NodeAgentNS()
# Init NS.tendrl
TendrlNS()
# Init NS.provisioning
# TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster}
# provisioning.ProvisioningNS()
# Init NS.integrations.ceph
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
# ceph.CephIntegrationNS()
# Init NS.integrations.gluster
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
GlusterIntegrationNS()
# Compile all definitions
NS.compiled_definitions = \
NS.node_agent.objects.CompiledDefinitions()
NS.compiled_definitions.merge_definitions([
NS.tendrl.definitions, NS.node_agent.definitions,
NS.integrations.gluster.definitions])
NS.node_agent.compiled_definitions = NS.compiled_definitions
# Every process needs to set a NS.type
# Allowed types are "node", "integration", "monitoring"
NS.type = "node"
NS.first_node_inventory_sync = True
NS.state_sync_thread = node_sync.NodeAgentSyncThread()
NS.compiled_definitions.save()
NS.node_context.save()
NS.tendrl_context.save()
NS.node_agent.definitions.save()
# NS.integrations.ceph.definitions.save()
NS.node_agent.config.save()
NS.publisher_id = "node_agent"
NS.message_handler_thread = MessageHandler()
NS.gluster_provisioner = GlusterProvisioningManager(
NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"][
'gluster_provisioner']
)
if NS.config.data.get("with_internal_profiling", False):
from tendrl.commons import profiler
profiler.start()
NS.gluster_sds_sync_running = False
m = NodeAgentManager()
m.start()
complete = threading.Event()
def shutdown(signum, frame):
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Signal handler: stopping"}
)
)
complete.set()
m.stop()
if NS.gluster_sds_sync_running:
NS.gluster_integrations_sync_thread.stop()
def reload_config(signum, frame):
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Signal handler: SIGHUP"}
)
)
NS.config = NS.config.__class__()
NS.config.save()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGHUP, reload_config)
while not complete.is_set():
complete.wait(timeout=1)
if __name__ == "__main__":
main()
| import signal
import threading
from tendrl.commons.event import Event
from tendrl.commons import manager as commons_manager
from tendrl.commons.message import Message
from tendrl.commons import TendrlNS
from tendrl.node_agent.provisioner.gluster.manager import \
ProvisioningManager as GlusterProvisioningManager
from tendrl import node_agent
from tendrl.node_agent.message.handler import MessageHandler
from tendrl.node_agent import node_sync
from tendrl.integrations.gluster import GlusterIntegrationNS
class NodeAgentManager(commons_manager.Manager):
def __init__(self):
# Initialize the state sync thread which gets the underlying
# node details and pushes the same to etcd
super(NodeAgentManager, self).__init__(
NS.state_sync_thread,
message_handler_thread=NS.message_handler_thread
)
node_sync.platform_detect.sync()
node_sync.sds_detect.sync()
def main():
# NS.node_agent contains the config object,
# hence initialize it before any other NS
node_agent.NodeAgentNS()
# Init NS.tendrl
TendrlNS()
# Init NS.provisioning
# TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster}
# provisioning.ProvisioningNS()
# Init NS.integrations.ceph
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
# ceph.CephIntegrationNS()
# Init NS.integrations.gluster
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
GlusterIntegrationNS()
# Compile all definitions
NS.compiled_definitions = \
NS.node_agent.objects.CompiledDefinitions()
NS.compiled_definitions.merge_definitions([
NS.tendrl.definitions, NS.node_agent.definitions,
NS.integrations.gluster.definitions])
NS.node_agent.compiled_definitions = NS.compiled_definitions
# Every process needs to set a NS.type
# Allowed types are "node", "integration", "monitoring"
NS.type = "node"
NS.first_node_inventory_sync = True
NS.state_sync_thread = node_sync.NodeAgentSyncThread()
NS.compiled_definitions.save()
NS.node_context.save()
NS.tendrl_context.save()
NS.node_agent.definitions.save()
# NS.integrations.ceph.definitions.save()
NS.node_agent.config.save()
NS.publisher_id = "node_agent"
NS.message_handler_thread = MessageHandler()
NS.gluster_provisioner = GlusterProvisioningManager(
NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"][
'gluster_provisioner']
)
if NS.config.data.get("with_internal_profiling", False):
from tendrl.commons import profiler
profiler.start()
NS.gluster_sds_sync_running = False
m = NodeAgentManager()
m.start()
complete = threading.Event()
def shutdown(signum, frame):
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Signal handler: stopping"}
)
)
complete.set()
m.stop()
if NS.gluster_sds_sync_running:
NS.gluster_integrations_sync_thread.stop()
def reload_config(signum, frame):
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "Signal handler: SIGHUP"}
)
)
NS.config = NS.config.__class__()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGHUP, reload_config)
while not complete.is_set():
complete.wait(timeout=1)
if __name__ == "__main__":
main()
| Python | 0.000072 |
b39eeea0b25e1e5bcec1d762a041e5ecf465885c | add solution for Reorder List | src/reorderList.py | src/reorderList.py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return nothing
def reorderList(self, head):
if head is None or head.next is None:
return
slow = fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
fast, slow.next = slow.next, None
fast = self.reverseList(fast)
self.merge2Lists(head, fast)
def reverseList(self, head):
if head is None or head.next is None:
return head
pre, cur = head, head.next
while cur:
nxt, cur.next = cur.next, pre
cur, pre = nxt, cur
head.next = None
return pre
def merge2Lists(self, l1, l2):
while l2:
n1, n2 = l1.next, l2.next
l1.next, l2.next = l2, n1
l1, l2 = n1, n2
| Python | 0 | |
68e056459dd3818ebb0c5dbdc8b4f1089bec9f07 | Add a few behavior tests for selection | tests/selection_test.py | tests/selection_test.py | import os
import pytest
import yaml
from photoshell.selection import Selection
@pytest.fixture
def sidecar(tmpdir):
tmpdir.join("test.sidecar").write(yaml.dump({
'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"),
'datetime': '2014-10-10 00:00'
}, default_flow_style=False))
return os.path.join(tmpdir.strpath, "test.sidecar")
@pytest.fixture
def empty_selection():
s = Selection('', '')
return s
@pytest.fixture
def selection(empty_selection):
empty_selection.images.append('image')
empty_selection.photos.append('image')
return empty_selection
def test_current_default_selection(selection):
assert selection.current()
def test_current_is_none_if_selection_empty(empty_selection):
assert empty_selection.current() is None
def test_current_photo_default_selection(selection):
assert selection.current_photo()
def test_current_photo_is_none_if_selection_empty(empty_selection):
assert empty_selection.current_photo() is None
def test_next_prev_does_nothing_single_photo(selection):
assert selection.current() == selection.next()
assert selection.current() == selection.prev()
def test_next_prev_wrap_around(selection):
selection.photos.append('photo2')
selection.images.append('image2')
assert selection.next() == 'image2'
assert selection.next() == 'image'
assert selection.prev() == 'image2'
assert selection.prev() == 'image'
| Python | 0 | |
63a34000402f4253f16221b11d620e65e1786447 | add solution for Reverse Bits | src/reverseBits.py | src/reverseBits.py | class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
return int(bin(n)[2:].zfill(32)[::-1], 2)
| Python | 0.000001 | |
f81a612eabf5972d15a5b3f11d12897530cbf155 | Add dump-tree command (wip) | cvsgit/command/dump-tree.py | cvsgit/command/dump-tree.py | """Command to dump the full state of the source tree at a certain
point in time."""
import re
import subprocess
from subprocess import PIPE
import sys
from cvsgit.cvs import split_cvs_source
from cvsgit.i18n import _
from cvsgit.main import Command, Conduit
from cvsgit.utils import Tempdir, stripnl
class dump_tree(Command):
__doc__ = _(
"""Dump the source tree state at a certain date
Usage: %prog <date>
Computes and dumps the state of the source tree as it was at the
given <date>.
""")
def initialize_options(self):
pass
def finalize_options(self):
if len(self.args) > 0:
self.usage_error(_('too many arguments'))
def run(self):
conduit = Conduit()
cvs = conduit.cvs
for changeset in cvs.changesets():
print changeset
if __name__ == '__main__':
dump_tree()
| Python | 0.000003 | |
ff2c4b68a5eace4451eeef4fd6ca84d37435c556 | Add fields to privatemessage for network invitations. | project/editorial/migrations/0087_auto_20180226_1409.py | project/editorial/migrations/0087_auto_20180226_1409.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-26 22:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('editorial', '0086_auto_20180102_2145'),
]
operations = [
migrations.AddField(
model_name='privatemessage',
name='network_invitation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'),
),
migrations.AddField(
model_name='privatemessage',
name='network_invitation_response',
field=models.NullBooleanField(),
),
]
| Python | 0 | |
457e94d21e7bf237fc0b3e43e1154e948177a418 | Add ipadm_addrprop module (#19415) | lib/ansible/modules/network/illumos/ipadm_addrprop.py | lib/ansible/modules/network/illumos/ipadm_addrprop.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipadm_addrprop
short_description: Manage IP address properties on Solaris/illumos systems.
description:
- Modify IP address properties on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
addrobj:
description:
- Specifies the address object we want to manage.
required: true
aliases: [nic, interface]
property:
description:
- Specifies the name of the address property we want to manage.
required: true
aliases: [name]
value:
description:
- Specifies the value we want to set for the address property.
required: false
temporary:
description:
- Specifies that the address property value is temporary.
Temporary values do not persist across reboots.
required: false
default: false
state:
description:
- Set or reset the property value.
required: false
default: present
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
name: Mark address on addrobj as deprecated
ipadm_addrprop: property=deprecated value=on addrobj=e1000g0/v6
name: Set network prefix length for addrobj
ipadm_addrprop: addrobj=bge0/v4 name=prefixlen value=26
'''
RETURN = '''
property:
description: property name
returned: always
type: string
sample: deprecated
addrobj:
description: address object name
returned: always
type: string
sample: bge0/v4
state:
description: state of the target
returned: always
type: string
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: boolean
sample: True
value:
description: property value
returned: when value is provided
type: string
sample: 26
'''
from ansible.module_utils.basic import AnsibleModule
class AddrProp(object):
def __init__(self, module):
self.module = module
self.addrobj = module.params['addrobj']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
def property_exists(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addrprop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" on addrobj %s' %
(self.property, self.addrobj),
property=self.property,
addrobj=self.addrobj)
def property_is_modified(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addrprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_set(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addrprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('set-addrprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + '=' + self.value)
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('reset-addrprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
addrobj=dict(required=True, default=None, aliases=['nic, interface']),
property=dict(required=True, aliases=['name']),
value=dict(required=False),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
supports_check_mode=True
)
addrprop = AddrProp(module)
rc = None
out = ''
err = ''
result = {}
result['property'] = addrprop.property
result['addrobj'] = addrprop.addrobj
result['state'] = addrprop.state
result['temporary'] = addrprop.temporary
if addrprop.value:
result['value'] = addrprop.value
if addrprop.state == 'absent' or addrprop.state == 'reset':
if addrprop.property_exists():
if not addrprop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addrprop.reset_property()
if rc != 0:
module.fail_json(property=addrprop.property,
addrobj=addrprop.addrobj,
msg=err,
rc=rc)
elif addrprop.state == 'present':
if addrprop.value is None:
module.fail_json(msg='Value is mandatory with state "present"')
if addrprop.property_exists():
if not addrprop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addrprop.set_property()
if rc != 0:
module.fail_json(property=addrprop.property,
addrobj=addrprop.addrobj,
msg=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| Python | 0 | |
136dd3f0b5dd9d8eecb6e7bc20c25d4d2c131ad6 | add new tool to list shared libraries deps | cerbero/tools/depstracker.py | cerbero/tools/depstracker.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class RecursiveLister():
def list_file_deps(self, prefix, path):
raise NotImplemented()
def find_deps(self, prefix, lib, state={}, ordered=[]):
if state.get(lib, 'clean') == 'processed':
return
if state.get(lib, 'clean') == 'in-progress':
return
state[lib] = 'in-progress'
lib_deps = self.list_file_deps(prefix, lib)
for libdep in lib_deps:
self.find_deps(prefix, libdep, state, ordered)
state[lib] = 'processed'
ordered.append(lib)
return ordered
def list_deps(self, prefix, path):
return self.find_deps(prefix, os.path.realpath(path), {}, [])
class ObjdumpLister(RecursiveLister):
def list_file_deps(self, prefix, path):
files = shell.check_call('objdump -x %s' % path).split('\n')
files = [x.split(' ')[2][:-1] for x in files if 'DLL ' in x]
files = [os.path.join(prefix, 'bin', x) for x in files if \
x.lower().endswith('dll')]
return [os.path.realpath(x) for x in files if os.path.exists(x)]
class OtoolLister(RecursiveLister):
def list_file_deps(self, prefix, path):
files = shell.check_call('otool -L %s' % path).split('\n')[1:]
return [x.split(' ')[0][1:] for x in files if prefix in x]
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
| Python | 0 | |
e23b53a6326dbdb9df1e0f8d6711be1a9563c885 | Add tests for processes | tests/test_processes.py | tests/test_processes.py | from wallace import processes, networks, agents, db
class TestProcesses(object):
def setup(self):
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def test_random_walk_from_source(self):
net = networks.Network(self.db)
agent1 = net.add_agent()
agent2 = net.add_agent()
agent3 = net.add_agent()
agent1.connect_to(agent2)
agent2.connect_to(agent3)
self.db.add_all([agent1, agent2, agent3])
self.db.commit()
source = agents.RandomBinaryStringSource()
net.add_local_source(source, agent1)
process = processes.RandomWalkFromSource(net)
process.step()
agent1.receive_all()
msg = agent1.ome.contents
process.step()
agent2.receive_all()
process.step()
agent3.receive_all()
assert msg == agent3.ome.contents
def test_moran_process_cultural(self):
# Create a fully-connected network.
net = networks.Network(self.db)
agent1 = net.add_agent()
agent2 = net.add_agent()
agent3 = net.add_agent()
agent1.connect_to(agent2)
agent1.connect_to(agent3)
agent2.connect_to(agent1)
agent2.connect_to(agent3)
agent3.connect_to(agent1)
agent3.connect_to(agent2)
self.db.add_all([agent1, agent2, agent3])
self.db.commit()
# Add a global source and broadcast to all the agents.
source = agents.RandomBinaryStringSource()
net.add_global_source(source)
source.broadcast()
self.db.commit()
for agent in net.agents:
agent.receive_all()
# Run a Moran process for 100 steps.
process = processes.MoranProcessCultural(net)
for i in range(100):
process.step()
for agent in net.agents:
agent.receive_all()
# Ensure that the process had reached fixation.
assert agent1.ome.contents == agent2.ome.contents
assert agent2.ome.contents == agent3.ome.contents
assert agent3.ome.contents == agent1.ome.contents
def test_moran_process_sexual(self):
# Create a fully-connected network.
net = networks.Network(self.db)
agent1 = net.add_agent()
agent2 = net.add_agent()
agent3 = net.add_agent()
agent1.connect_to(agent2)
agent1.connect_to(agent3)
agent2.connect_to(agent1)
agent2.connect_to(agent3)
agent3.connect_to(agent1)
agent3.connect_to(agent2)
self.db.add_all([agent1, agent2, agent3])
self.db.commit()
# Add a global source and broadcast to all the agents.
source = agents.RandomBinaryStringSource()
net.add_global_source(source)
source.broadcast()
self.db.commit()
for agent in net.agents:
agent.receive_all()
all_contents = [agent1.ome.contents,
agent2.ome.contents,
agent3.ome.contents]
# Run a Moran process for 100 steps.
process = processes.MoranProcessSexual(net)
for i in range(100):
process.step()
for agent in net.agents:
agent.receive_all()
# Ensure that the process had reached fixation.
assert agent1.status == "dead"
assert agent2.status == "dead"
assert agent3.status == "dead"
for agent in net.agents:
assert agent.ome.contents in all_contents
| Python | 0.000001 | |
73084b964f964c05cb948be3acaa6ba68d62dc30 | test plotting particles | ws/CSUIBotClass2014/test/test_plot_particles.py | ws/CSUIBotClass2014/test/test_plot_particles.py | #!/usr/bin/python
# @author: vektor dewanto
# @obj: demonstrate how to plot particles in an occupancy grid map, _although_, for now, all positions are valid
import matplotlib.pyplot as plt
import numpy as np
import math
import matplotlib.cm as cmx
from matplotlib import colors
# Construct the occupancy grid map
grid_map = {'size': (10,10), 'res': 1.0}
grid = [1,1,1,1,1,1,1,1,1,1,\
1,0,0,1,0,1,0,0,0,1,\
1,0,0,1,0,1,0,0,0,1,\
1,0,0,0,0,1,0,1,1,1,\
1,1,1,1,0,0,0,0,0,1,\
1,0,0,1,0,0,0,0,0,1,\
1,0,0,0,0,0,0,0,0,1,\
1,0,0,1,0,0,0,0,0,1,\
1,0,0,1,0,0,0,0,1,1,\
1,1,1,1,1,1,1,1,1,1]
assert len(grid)==grid_map['size'][0]*grid_map['size'][1], 'grid size is mismatched'
grid = np.asarray(grid)
grid = grid.reshape(grid_map['size'][0], grid_map['size'][1])
grid_map['grid'] = grid
# Plot the map
plt.subplot(1,1,1)
plt.pcolormesh(grid_map['grid'], edgecolors='k', linewidths=0.1, cmap=colors.ListedColormap(['w','b']))
plt.title('The occupancy grid map with particles')
# At t=0, initiate X with n_particle particles drawn from a uniform distribution (since this is a global loc. problem)
# For now, we donot check whether the particle is on an occupied grid
n_particle = 100;
X_tmp = np.random.uniform(0.0, 10.0, n_particle)
Y_tmp = np.random.uniform(0.0, 10.0, n_particle)
THETA_tmp = np.random.uniform(0.0, math.pi*2.0, n_particle)
XYTHETA_tmp = zip(X_tmp, Y_tmp, THETA_tmp)
W = [1.0/n_particle] * n_particle# uniform
X = zip(XYTHETA_tmp, W)
# Plot positions, the color corresponds to the weight
ax = plt.axes()
ax.scatter([e[0][0] for e in X], [e[0][1] for e in X], c=[e[1] for e in X], marker='o', s=20, cmap=cmx.jet)
# Plot bearings
for e in X:
x = e[0][0]
y = e[0][1]
theta = e[0][2]
# convert polar to cartesian coord
r = 0.1
dx = r * math.cos(theta)
dy = r * math.sin(theta)
ax.arrow(x, y, dx, dy, head_width=0.05, head_length=0.1, fc='k', ec='k')
plt.show()
| Python | 0 | |
6342c6cab9b5dd0b34ca5de575ef82592474e1d5 | add mvnsite.py to build site without javadocs or test run | bin/mvnsite.py | bin/mvnsite.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import call
import sys
args = sys.argv[1 :]
# mvn site:site -Dmaven.javadoc.skip=true -DskipTests
call(["mvn.bat", "site:site", "-Dmaven.javadoc.skip=true", "-DskipTests"] + args)
| Python | 0 | |
598d937f3f180e22a1b4793644ffdb1b9a26f261 | update crawler_main.py | crawler_main.py | crawler_main.py | """
crawler study code
Author: smilexie1113@gmail.com
"""
import urllib.request
import os
import re
from collections import deque
ERROR_RETURN = "ERROR: "
def retrun_is_error(return_str):
return return_str[0 : len(ERROR_RETURN)] == ERROR_RETURN
def python_cnt(str):
return str.count("python")
def get_one_page(url):
try:
urlfd = urllib.request.urlopen(url, timeout = 2)
except Exception as ex:
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + " open failed. " + str(ex))
if "html" not in urlfd.getheader("Content-Type"):
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + "is not html page.")
try:
html_str = urlfd.read().decode("utf-8")
except:
return ERROR_RETURN + ("Fail to decode URL " + "\"" + url + "\"" + ".")
return html_str
if __name__ == "__main__":
start_url = "http://news.dbanotes.net/"
to_be_visited = deque()
visited = set()
cnt = 0
py_str_cnt = 0
to_be_visited.append(start_url)
while to_be_visited:
url = to_be_visited.popleft()
print(str(cnt) + "page(s) has been grabbed." + "URL " + "\"" + url + "\"" + " is being grabbed.")
html_str = get_one_page(url)
if retrun_is_error(html_str):
print(html_str)
continue
cnt += 1
visited |= {url}
py_cnt_tmp = python_cnt(html_str)
if py_cnt_tmp != 0:
py_str_cnt += py_cnt_tmp
print("Find %d \"python\" , total count %d" % (py_cnt_tmp, py_str_cnt))
#todo: parse the html_str
link_pattern = re.compile('href=\"(.+?)\"') #regular expression
for tmp_url in link_pattern.findall(html_str):
if "http" in tmp_url and tmp_url not in visited:
to_be_visited.append(tmp_url)
| """
crawler study code
Author: smilexie1113@gmail.com
"""
import urllib.request
import os
import re
from collections import deque
from filecmp import cmp
ERROR_RETURN = "ERROR:"
def retrun_is_error(return_str):
return return_str[0 : len(ERROR_RETURN)] == ERROR_RETURN
def python_cnt(str):
return str.count("python")
def get_one_page(url):
try:
urlfd = urllib.request.urlopen(url, timeout = 2)
except Exception as ex:
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + " open failed. " + str(ex))
if "html" not in urlfd.getheader("Content-Type"):
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + "is not html page.")
try:
html_str = urlfd.read().decode("utf-8")
except:
return ERROR_RETURN + ("Fail to decode URL " + "\"" + url + "\"" + ".")
return html_str
if __name__ == "__main__":
start_url = "http://news.dbanotes.net/"
to_be_visited = deque()
visited = set()
cnt = 0
py_str_cnt = 0
to_be_visited.append(start_url)
while to_be_visited:
url = to_be_visited.popleft()
print(str(cnt) + "page(s) has been grabbed." + "URL " + "\"" + url + "\"" + " is being grabbed.")
html_str = get_one_page(url)
if retrun_is_error(html_str):
print(html_str)
continue
cnt += 1
visited |= {url}
py_cnt_tmp = python_cnt(html_str)
if py_cnt_tmp != 0:
py_str_cnt += py_cnt_tmp
print("Find %d \"python\" , total count %d" % (py_cnt_tmp, py_str_cnt))
#todo: parse the html_str
link_pattern = re.compile('href=\"(.+?)\"') #links' regular expression
for tmp_url in link_pattern.findall(html_str):
if "http" in tmp_url and tmp_url not in visited:
to_be_visited.append(tmp_url)
| Python | 0.000001 |
e904341eb7b426ea583e345689249d7f13451dc9 | Add biome types. | biome_types.py | biome_types.py | biome_types = {
-1: "Will be computed",
0: "Ocean",
1: "Plains",
2: "Desert",
3: "Extreme Hills",
4: "Forest",
5: "Taiga",
6: "Swampland",
7: "River",
8: "Hell",
9: "Sky",
10: "FrozenOcean",
11: "FrozenRiver",
12: "Ice Plains",
13: "Ice Mountains",
14: "MushroomIsland",
15: "MushroomIslandShore",
16: "Beach",
17: "DesertHills",
18: "ForestHills",
19: "TaigaHills",
20: "Extreme Hills Edge",
21: "Jungle",
22: "JungleHills",
}
| Python | 0 | |
0a0b322ca7d42d28ba495b7786cd2bd92c0bfd34 | Add test_register.py | tests/test_assembler/test_register.py | tests/test_assembler/test_register.py | 'Test of videocore.Register'
from nose.tools import raises
from videocore.assembler import Register, AssembleError, REGISTERS
def test_register_names():
for name in REGISTERS:
assert name == REGISTERS[name].name
assert name == str(REGISTERS[name])
@raises(AssembleError)
def test_pack_of_accumulator():
REGISTERS['r0'].pack('nop')
@raises(AssembleError)
def test_pack_of_regfileB():
REGISTERS['rb0'].pack('nop')
@raises(AssembleError)
def test_unpack_of_regfileB():
REGISTERS['rb0'].unpack('nop')
| Python | 0.000003 | |
12266ffcb7fcb809ec0e0a3102077581e64eb9e0 | Update migrations | server/adventures/migrations/0002_auto_20160909_1901.py | server/adventures/migrations/0002_auto_20160909_1901.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-09 19:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adventures', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Setting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.AddField(
model_name='adventure',
name='publisher',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='adventures.Publisher'),
preserve_default=False,
),
migrations.AlterField(
model_name='adventure',
name='edition',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='adventures.Edition'),
),
migrations.AddField(
model_name='adventure',
name='setting',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='adventures.Setting'),
preserve_default=False,
),
]
| Python | 0.000001 | |
6dc1abdb92d1226071ea84e6352389ad01d21fe6 | Create sequencer_scripting.py | examples/sequencer_scripting.py | examples/sequencer_scripting.py | import unreal_engine as ue
from unreal_engine.classes import MovieSceneAudioTrack, LevelSequenceFactoryNew, MovieSceneSkeletalAnimationTrack, Character, SkeletalMesh, MovieScene3DTransformTrack, CineCameraActor
import time
from unreal_engine.structs import FloatRange, FloatRangeBound
from unreal_engine import FTransform, FVector
# create a new level sequence asset
factory = LevelSequenceFactoryNew()
seq = factory.factory_create_new('/Game/MovieMaster' + str(int(time.time())))
# add an audio track (without sound section ;) to the sequence
audio = seq.sequencer_add_master_track(MovieSceneAudioTrack)
# get a reference to the editor world (to spawn actors)
world = ue.get_editor_world()
# spawn a new character and modify it (post_edit_change will allow the editor/sequencer to be notified of actor updates)
character = world.actor_spawn(Character)
# notify modifications are about to happen...
character.modify()
character.Mesh.SkeletalMesh = ue.load_object(SkeletalMesh, '/Game/InfinityBladeAdversaries/Enemy/Enemy_Bear/Enemy_Bear.Enemy_Bear')
# finalize the actor
character.post_edit_change()
# add to the sequencer as a possessable (shortcut method returning the guid as string)
guid = seq.sequencer_add_actor(character)
# add an animation track mapped to the just added actor
anim = seq.sequencer_add_track(MovieSceneSkeletalAnimationTrack, guid)
# create 3 animations sections (assign AnimSequence field to set the animation to play)
anim_sequence = anim.sequencer_track_add_section()
anim_sequence.StartTime = 1
anim_sequence.EndTime = 3
anim_sequence.RowIndex = 0
anim_sequence2 = anim.sequencer_track_add_section()
anim_sequence2.RowIndex = 1
anim_sequence2.StartTime = 2
anim_sequence2.EndTime = 5
anim_sequence3 = anim.sequencer_track_add_section()
anim_sequence3.RowIndex = 1
anim_sequence3.SlotName = 'Hello'
anim_sequence3.StartTIme = 0
anim_sequence3.EndTime = 30
# add a transform track/section in one shot to the actor
transform = seq.sequencer_add_track(MovieScene3DTransformTrack, guid).sequencer_track_add_section()
transform.StartTime = 0
transform.EndTime = 5
# add keyframes to the transform section
transform.sequencer_section_add_key(0, FTransform(FVector(0, 0, 17 * 100)))
transform.sequencer_section_add_key(1, FTransform(FVector(0, 0, 22 * 100)))
transform.sequencer_section_add_key(2, FTransform(FVector(0, 0, 26 * 100)))
transform.sequencer_section_add_key(2.5, FTransform(FVector(0, 0, 30 * 100)))
# set playback range
float_range = FloatRange(LowerBound=FloatRangeBound(Value=0), UpperBound=FloatRangeBound(Value=10))
seq.MovieScene.PlaybackRange = float_range
# add camera cut track (can be only one)
camera_cut_track = seq.sequencer_add_camera_cut_track()
# add two camera views
camera1 = camera_cut_track.sequencer_track_add_section()
camera2 = camera_cut_track.sequencer_track_add_section()
# spawn 2 cine cameras in the stage and posses them with the sequencer
cine_camera = world.actor_spawn(CineCameraActor)
camera_guid = seq.sequencer_add_actor(cine_camera)
cine_camera2 = world.actor_spawn(CineCameraActor)
camera2_guid = seq.sequencer_add_actor(cine_camera2)
# assign the two cameras to the camera cut sections (via guid)
camera1.CameraGuid = ue.string_to_guid(camera_guid)
camera2.CameraGuid = ue.string_to_guid(camera2_guid)
# set cameras time slots
camera1.StartTime = 0
camera1.EndTime = 3.5
camera2.StartTime = 3.5
camera2.EndTime = 5
# notify the sequence editor that something heavily changed (True will focus to the sequence editor)
seq.sequencer_changed(True)
| Python | 0.000001 | |
360d4cd867f1ddd56a8487bea776e454d5954caf | Add textcat from config example | examples/textcat_from_config.py | examples/textcat_from_config.py | import random
from typing import Optional
from pathlib import Path
import thinc
from thinc.api import Config, fix_random_seed
from wasabi import msg
import typer
import numpy as np
import csv
from ml_datasets import loaders
from ml_datasets.util import get_file
from ml_datasets._registry import register_loader
from syntok.tokenizer import Tokenizer
# Partial config with some parameters for Embed and Softmax unspecified
CONFIG = """
[hyper_params]
width = 64
[model]
@layers = "chain.v0"
[model.*.list2ragged]
@layers = "list2ragged.v0"
[model.*.with_array]
@layers = "with_array.v0"
[model.*.with_array.layer]
@layers = "Embed.v0"
nO = ${hyper_params:width}
[model.*.meanpool]
@layers = "MeanPool.v0"
[model.*.softmax]
@layers = "Softmax.v0"
nI = ${hyper_params:width}
[optimizer]
@optimizers = "Adam.v1"
learn_rate = 0.001
[training]
batch_size = 8
n_iter = 10
"""
def main(
config_path: Optional[Path] = None,
n_examples: Optional[int] = 2000,
dataset: Optional[str] = "dbpedia_ontology",
):
fix_random_seed(0)
# Load data
supported_datasets = ["dbpedia_ontology", "imdb"]
if dataset not in supported_datasets:
msg.fail("Supported datasets:" + ", ".join(supported_datasets), exits=1)
msg.text(f"Loading dataset '{dataset}'...")
dataset_loader = loaders.get(dataset)
train_data, dev_data = dataset_loader(limit=n_examples)
train_texts, train_cats = zip(*train_data)
dev_texts, dev_cats = zip(*dev_data)
unique_cats = list(np.unique(np.concatenate((train_cats, dev_cats))))
nr_class = len(unique_cats)
msg.text(f" {len(train_data)} training instances")
msg.text(f" {len(dev_data)} dev instances")
msg.text(f" {nr_class} classes")
train_y = np.zeros((len(train_cats), nr_class), dtype="f")
for i, cat in enumerate(train_cats):
train_y[i][unique_cats.index(cat)] = 1
dev_y = np.zeros((len(dev_cats), nr_class), dtype="f")
for i, cat in enumerate(dev_cats):
dev_y[i][unique_cats.index(cat)] = 1
# Tokenize texts
train_tokenized = tokenize_texts(train_texts)
dev_tokenized = tokenize_texts(dev_texts)
# Generate simple vocab mapping, <unk> is 0
vocab = {}
count_id = 1
for text in train_tokenized:
for token in text:
if token not in vocab:
vocab[token] = count_id
count_id += 1
# Map texts using vocab
train_X = []
for text in train_tokenized:
train_X.append(np.array([vocab.get(t, 0) for t in text]))
dev_X = []
for text in dev_tokenized:
dev_X.append(np.array([vocab.get(t, 0) for t in text]))
# You can edit the CONFIG string within the file, or copy it out to
# a separate file and pass in the path.
if config_path is None:
config = Config().from_str(CONFIG)
else:
config = Config().from_disk(config_path)
# Set the remaining config parameters based on the loaded dataset
config["model"]["*"]["with_array"]["layer"]["nV"] = len(vocab)
config["model"]["*"]["softmax"]["nO"] = nr_class
# Load the config
loaded_config = thinc.registry.make_from_config(config)
# Here we have the model and optimizer, built for us by the registry.
model = loaded_config["model"]
optimizer = loaded_config["optimizer"]
# Get training parameters from config
batch_size = config["training"]["batch_size"]
n_iter = config["training"]["n_iter"]
# Train
msg.text("Training...")
row_widths = (4, 8, 8)
msg.row(("Iter", "Loss", f"Accuracy"), widths=row_widths)
msg.row("-"*width for width in row_widths)
zipped = list(zip(train_X, train_y))
for n in range(n_iter):
loss = 0.0
random.shuffle(zipped)
for i in range(0, len(zipped), batch_size):
X, Y = zip(*zipped[i : i + batch_size])
Yh, backprop = model.begin_update(X)
d_loss = []
for i in range(len(Yh)):
d_loss.append(Yh[i] - Y[i])
loss += ((Yh[i] - Y[i]) ** 2).sum()
backprop(np.array(d_loss))
model.finish_update(optimizer)
score = evaluate_textcat(model, dev_X, dev_y, batch_size)
msg.row((n, f"{loss:.2f}", f"{score:.3f}"), widths=row_widths)
def evaluate_textcat(model, dev_X, dev_Y, batch_size):
correct = 0.0
total = 0.0
for i in range(0, len(dev_X), batch_size):
Yh = model.predict(dev_X[i : i + batch_size])
Y = dev_Y[i : i + batch_size]
for j in range(len(Yh)):
correct += Yh[j].argmax(axis=0) == Y[j].argmax(axis=0)
total += len(Y)
return correct / total
def tokenize_texts(texts):
tok = Tokenizer()
return [[token.value for token in tok.tokenize(text)] for text in texts]
# Dataset loader for DBPedia Ontology from https://course.fast.ai/datasets
DBPEDIA_ONTOLOGY_URL = "https://s3.amazonaws.com/fast-ai-nlp/dbpedia_csv.tgz"
@register_loader("dbpedia_ontology")
def dbpedia_ontology(loc=None, limit=0):
if loc is None:
loc = get_file("dbpedia_csv", DBPEDIA_ONTOLOGY_URL, untar=True, unzip=True)
train_loc = Path(loc) / "train.csv"
test_loc = Path(loc) / "test.csv"
return read_dbpedia_ontology(train_loc, limit=limit), read_dbpedia_ontology(test_loc, limit=limit)
def read_dbpedia_ontology(data_file, limit=0):
examples = []
with open(data_file, newline='') as f:
reader = csv.reader(f)
for row in reader:
label = row[0]
title = row[1]
text = row[2]
examples.append((title + "\n" + text, label))
random.shuffle(examples)
if limit >= 1:
examples = examples[:limit]
return examples
if __name__ == "__main__":
typer.run(main)
| Python | 0 | |
df9b7cd8d1b34f8c29c372589ad9efd3a5435d0f | Implement TwitchWordsCounterBot class. | twitchbot/twitch_words_counter_bot.py | twitchbot/twitch_words_counter_bot.py | import irc.bot
import irc.strings
from .words_counter import WordsCounter
class TwitchWordsCounterBot(irc.bot.SingleServerIRCBot):
def __init__(self, channel, nickname, password, server, port=6667):
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, password)], nickname, nickname)
self.server = server
self.channel = channel
self.words_counter = WordsCounter()
def start(self):
print("Connecting to the server '%s'..." % self.server)
super(TwitchWordsCounterBot, self).start()
def on_welcome(self, c, e):
print("Connected to the server '%s'." % self.server)
print("Joining to the channel '%s'..." % self.channel)
c.join(self.channel)
def _on_join(self, c, e):
super(TwitchWordsCounterBot, self)._on_join(c, e)
print("Joined to the channel '%s'!" % self.channel)
def _on_disconnect(self, c, e):
super(TwitchWordsCounterBot, self)._on_disconnect(c, e)
print("Disconnected from the server '%s'." % self.server)
print(e)
def on_pubmsg(self, c, e):
message = e.arguments[0]
self.words_counter.count_words(message)
print(self.words_counter)
| Python | 0 | |
6136eef341f1ac5ce0be278c3ab78192192d0efa | check if OS is UNIX-y | posix.py | posix.py | #!/bin/py
from sys import platform
def osCheck():
# Check if OS is UNIX-y
if "darwin" or "linux" in platform.lower():
print platform
osCheck()
| Python | 0.999519 | |
2a0724922bde4cdd5219c721cdfd5460a2e5f3ed | Create Timely_Tweeter.py | Timely_Tweeter.py | Timely_Tweeter.py | #-=- Coding: Python UTF-8 -=-
import tweepy, time, sys
argfile = str(sys.argv[1])
#Twitter Account info
#Place Keys and Tokens bewteen the quotes
CONSUMER_KEY = '' #The Consumer Key (API Key)
CONSUMER_SECRET = '' #The Consumer Secret (API Secret)
ACCESS_KEY = '' #The Access Token
ACCESS_SECRET = '' #The Access Token Secret
SLEEPY_TIME = #Time to wait in seconds between tweets
#Now it checks in with Twitter and gets authenticated
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
filename=open(argfile, 'r') #Opens file
f=filename.readlines() #Pulls data from file
filename.close() #Closes file
for line in f:
api.update_status(line)
time.sleep(SLEEPY_TIME) #Time to wait
| Python | 0.000001 | |
9c0750ef401870e0187e3b7f0e4e39cf3d7e3944 | Make sure the profile data is unmarshallable as profile data. | test/test_benchmarks.py | test/test_benchmarks.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import pstats
import pytest
import six
from asv import benchmarks
from asv import config
from asv import environment
BENCHMARK_DIR = os.path.join(os.path.dirname(__file__), 'benchmark')
INVALID_BENCHMARK_DIR = os.path.join(
os.path.dirname(__file__), 'benchmark.invalid')
ASV_CONF_JSON = {
'benchmark_dir': BENCHMARK_DIR,
'repo': 'https://github.com/spacetelescope/asv.git',
'project': 'asv'
}
def test_find_benchmarks(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
b = benchmarks.Benchmarks(conf, regex='secondary')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='example')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
assert len(b) == 1
b = benchmarks.Benchmarks(conf)
assert len(b) == 7
envs = list(environment.get_environments(
conf.env_dir, conf.pythons, conf.matrix))
b = benchmarks.Benchmarks(conf)
times = b.run_benchmarks(envs[0], profile=True)
assert len(times) == 7
assert times[
'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
# Benchmarks that raise exceptions should have a time of "None"
assert times[
'time_secondary.TimeSecondary.time_exception']['result'] is None
assert times[
'subdir.time_subdir.time_foo']['result'] is not None
assert times[
'mem_examples.mem_list']['result'] > 2000
assert times[
'time_secondary.track_value']['result'] == 42.0
assert 'profile' in times[
'time_secondary.track_value']
profile_path = os.path.join(tmpdir, 'test.profile')
with open(profile_path, 'wb') as fd:
fd.write(times['time_secondary.track_value']['profile'])
pstats.Stats(profile_path)
def test_invalid_benchmark_tree(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['benchmark_dir'] = INVALID_BENCHMARK_DIR
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
with pytest.raises(ValueError):
b = benchmarks.Benchmarks(conf)
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import pytest
import six
from asv import benchmarks
from asv import config
from asv import environment
BENCHMARK_DIR = os.path.join(os.path.dirname(__file__), 'benchmark')
INVALID_BENCHMARK_DIR = os.path.join(
os.path.dirname(__file__), 'benchmark.invalid')
ASV_CONF_JSON = {
'benchmark_dir': BENCHMARK_DIR,
'repo': 'https://github.com/spacetelescope/asv.git',
'project': 'asv'
}
def test_find_benchmarks(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
b = benchmarks.Benchmarks(conf, regex='secondary')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='example')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
assert len(b) == 1
b = benchmarks.Benchmarks(conf)
assert len(b) == 7
envs = list(environment.get_environments(
conf.env_dir, conf.pythons, conf.matrix))
b = benchmarks.Benchmarks(conf)
times = b.run_benchmarks(envs[0], profile=True)
assert len(times) == 7
assert times[
'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
# Benchmarks that raise exceptions should have a time of "None"
assert times[
'time_secondary.TimeSecondary.time_exception']['result'] is None
assert times[
'subdir.time_subdir.time_foo']['result'] is not None
assert times[
'mem_examples.mem_list']['result'] > 2000
assert times[
'time_secondary.track_value']['result'] == 42.0
assert 'profile' in times[
'time_secondary.track_value']
def test_invalid_benchmark_tree(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['benchmark_dir'] = INVALID_BENCHMARK_DIR
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
with pytest.raises(ValueError):
b = benchmarks.Benchmarks(conf)
| Python | 0.000002 |
d1ecc996269a801c65d3b88791f7f5546c8af1b8 | add setup.py | setup.py | setup.py | from setuptools import setup
setup(
name='daria',
version='0.0.1',
description='pytorch trainer',
author='odanado',
author_email='odan3240@gmail.com',
url='https://github.com/odanado/daria',
license='MIT License',
packages=['daria'],
tests_require=['mock'],
test_suite='tests',
)
| Python | 0.000001 | |
db92ed5e523eafb7ccba553f1ee25365cc254798 | add setup.py | setup.py | setup.py | #!/usr/bin/env python
#encoding: utf-8
#
# Programa epbdcalc: Cálculo de la eficiencia energética ISO/DIS 52000-1:2015
#
# Copyright (C) 2015 Rafael Villar Burke <pachi@ietcc.csic.es>
# Daniel Jiménez González <danielj@ietcc.csic.es>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""epbdcalc - Cálculo de la eficiencia energética según ISO/DIS 52000-1:2015
Based on the pypa setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import codecs
import os.path
import re
from setuptools import setup, find_packages
def find_version(*file_paths, **kwargs):
with codecs.open(os.path.join(os.path.dirname(__file__), *file_paths),
encoding=kwargs.get("encoding", "utf8")) as fp:
version_file = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
here = os.path.abspath(os.path.dirname(__file__))
README = codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8').read()
NEWS = codecs.open(os.path.join(here, 'NEWS.txt'), encoding='utf-8').read()
setup(
name="pyepbd",
author="Rafael Villar Burke, Daniel Jiménez González",
author_email="pachi@ietcc.csic.es",
version=find_version("pyepbd", "__init__.py"),
description="Cálculo de la eficiencia energética según ISO/DIS 52000-1:2015",
long_description=README + "\n\n" + NEWS,
url="https://github.com/pachi/epbdcalc",
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: Implementation :: CPython',
# Environment
'Environment :: Console',
'Operating System :: OS Independent'
],
keywords=[u"energía", u"edificación", u"CTE", u"energy", u"buildings"],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
include_package_data = True,
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas >= 0.15', 'numpy >= 1.7'],
# dependencies for the setup script to run
setup_requires=['pytest-runner'],
# dependencies for the test command to run
tests_require=['pytest', 'pytest-cov'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['pytest', 'pytest-cov'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'epbdcalc=pyepbd.cli:main',
],},
)
| Python | 0.000003 | |
38bf3ce6db844999fe5903dad91e991c6fea57c7 | Add setup | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setupconf = dict(
name = 'contract',
version = '0.3',
license = 'BSD',
url = 'https://github.com/Deepwalker/contract/',
author = 'Barbuza, Deepwalker',
author_email = 'krivushinme@gmail.com',
description = ('Validation and parsing library'),
long_description = "Place README here",
packages = find_packages(),
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setupconf)
| Python | 0.000001 | |
ea7d55fa309d592669e86dae826b7cc08323de16 | update setup.py version to 0.2 | setup.py | setup.py | from distutils.core import setup
setup(name='mpmath',
description = 'Python library for arbitrary-precision floating-point arithmetic',
version='0.2',
url='http://mpmath.googlecode.com',
author='Fredrik Johansson',
author_email='fredrik.johansson@gmail.com',
license = 'BSD',
packages=['mpmath'],
)
| from distutils.core import setup
setup(name='mpmath',
description = 'Python library for arbitrary-precision floating-point arithmetic',
version='0.1',
url='http://mpmath.googlecode.com',
author='Fredrik Johansson',
author_email='fredrik.johansson@gmail.com',
license = 'BSD',
packages=['mpmath'],
)
| Python | 0 |
12ece36bf0355ad619635675b419d9d0e7163cf4 | Add setup.py file | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-cache-relation',
description="Non-magical object caching for Django.",
version='0.1',
url='http://code.playfire.com/',
author='Playfire.com',
author_email='tech@playfire.com',
license='BSD',
packages=find_packages(),
)
| Python | 0.000001 | |
30d3f42b4910b84b2a3419e43ea6e5e6da2ab7a0 | Add setup | setup.py | setup.py | from setuptools import setup
setup(name = 'enzynet',
description = 'EnzyNet: enzyme classification using 3D convolutional neural networks on spatial representation',
author = 'Afshine Amidi and Shervine Amidi',
author_email = '<author1-lastname>@mit.edu, <author2-firstname>@stanford.edu',
license = 'MIT',
packages = ['enzynet'])
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.