source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from aocd import get_data
def simulate(a, part2=False):
p = a.copy()
ip = 0
i = 0
while True:
if not 0 <= ip < len(p):
return i
prev_ip = ip
ip = ip + p[ip]
p[prev_ip] += 1 if not part2 or p[prev_ip] < 3 else -1
i += 1
def part1(a):
return simulate(a)
def part2(a):
return simulate(a, True)
if __name__ == '__main__':
data = get_data(day=5, year=2017)
inp = list(map(int, data.splitlines()))
print(part1(inp))
print(part2(inp))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | 2017/05.py | bernikr/advent-of-code |
from ._util import get_app_id, create_app, get_app_info
from ._constants import *
def get_id(c):
"""
Find app-id by project name. Add id to config.
"""
app_name = c.config.project.name
app_id = get_app_id(c, app_name)
if not isinstance(app_id, str):
return app_id
c.config.data.app_id = app_id
print(f'{GREEN}Found app-id: {c.config.data.app_id} for {app_name}{COL_END}')
return True
def create(c):
"""
Create main project app by name. Add id to config.
"""
app_name = c.config.project.name
app_id = c.config.data.get('app_id')
c.config.data.app_id = create_app(c, app_name, app_id)
# create tmp dir
c.run(f'mkdir -p /home/{c.config.project.user}/apps/{c.config.project.name}/tmp')
return True
def get_info(c):
"""
Find main project app-info by app-id. Add info + project paths to config.
"""
app_id = c.config.data.get('app_id')
if app_id:
c.config.data.app_info = get_app_info(c, app_id)
c.config.data.app_path = f'/home/{c.config.project.user}/apps/{c.config.project.name}'.lower()
c.config.data.log_path = f'/home/{c.config.project.user}/logs/apps/{c.config.project.name}'.lower()
c.config.data.src_path = f'{c.config.data.app_path}/{c.config.project.source}'
c.config.data.env_path = f'{c.config.data.app_path}/env'
c.config.data.backup_path = f'{c.config.data.app_path}/backup'
return True
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | tasks/application.py | webtweakers/deploy |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
def is_float(x):
# x is string
try:
float(x)
return True
except ValueError:
return False
def compute_full_path(root_path, file_path):
full_path = os.path.join(root_path, file_path)
full_path = full_path.replace('\\', os.path.sep)
full_path = full_path.replace('/', os.path.sep)
full_path = full_path.replace(os.path.sep + '.' + os.path.sep, os.path.sep)
return full_path
def let_data_to_variable(variable, data, ctx=None, data_name=None, variable_name=None):
try:
if data.dtype <= np.float64:
variable.data.cast(data.dtype)[...] = data
else:
variable.d = data
except:
if variable.shape != data.shape:
logger.critical('Shape does not match between data{} and variable{} ({} != {}).'.format(
' "' + data_name + '"' if data_name else '',
' "' + variable_name + '"' if variable_name else '',
data.shape, variable.shape))
raise
variable.need_grad = False
# Copy to device
if ctx:
try:
variable.data.cast(variable.data.dtype, ctx)
except:
if ctx.array_class != 'CpuArray':
# Fallback to cpu
ctx.array_class = 'CpuArray'
variable.data.cast(variable.data.dtype, ctx)
else:
raise
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | python/src/nnabla/utils/cli/utility.py | syoyo/nnabla |
import flask
from flask import Flask
from .database import Database
app = Flask(__name__)
DATABASE = '/tmp/kittens.db'
def get_db():
db = getattr(flask.g, '_database', None)
if not db:
db = flask.g._database = Database(DATABASE)
return db
@app.teardown_appcontext
def close_db(exception):
db = getattr(flask.g, '_database', None)
if db:
db.close()
import kittenkollector.api
import kittenkollector.views
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | kittenkollector/__init__.py | jedevc/hack-the-midlands |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_custom_resource_definition_version import V1beta1CustomResourceDefinitionVersion
class TestV1beta1CustomResourceDefinitionVersion(unittest.TestCase):
""" V1beta1CustomResourceDefinitionVersion unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceDefinitionVersion(self):
"""
Test V1beta1CustomResourceDefinitionVersion
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_custom_resource_definition_version.V1beta1CustomResourceDefinitionVersion()
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | kubernetes/test/test_v1beta1_custom_resource_definition_version.py | iguazio/python |
import functools
from torch.nn import functional as F
from torchsparse.sparse_tensor import *
__all__ = ['spact', 'sprelu', 'spleaky_relu']
def spact(inputs, act_funct=F.relu):
features = inputs.F
coords = inputs.C
cur_stride = inputs.s
output_features = act_funct(features)
output_tensor = SparseTensor(output_features, coords, cur_stride)
output_tensor.coord_maps = inputs.coord_maps
output_tensor.kernel_maps = inputs.kernel_maps
return output_tensor
def sprelu(inputs, inplace=True):
return spact(inputs, functools.partial(F.relu, inplace=inplace))
def spleaky_relu(inputs, negative_slope=0.1, inplace=True):
return spact(
inputs,
functools.partial(F.leaky_relu,
inplace=inplace,
negative_slope=negative_slope))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | torchsparse/nn/functional/activation.py | ashawkey/torchsparse |
"""
@name: PyHouse_Install/src/Install/private.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2016 by D. Brian Kimmel
@license: MIT License
@note: Created May 13, 2016
@Summary: Create .private
Create the /etc/pyhouse/.private.yaml file that will hold the secret information used by the pyhouse system.
HOSTNAME: hostname
MQTT: true
NODE_RED: false
"""
import yaml
Y_FILE = '/etc/pyhouse/.private.yaml'
class Private(object):
def __init__(self):
self.hostname = None
class API(object):
"""
"""
def __init__(self):
self.m_private = Private()
self.read_yaml()
def read_yaml(self):
l_file = open(Y_FILE)
# use safe_load instead load
self.m_private = yaml.safe_load(l_file)
l_file.close()
def write_yaml(self):
l_file = open('newtree.yaml', "w")
yaml.dump(self.m_private, l_file)
l_file.close()
if __name__ == '__main--':
API()
# ## END DBK
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false... | 3 | src/Install/private.py | DBrianKimmel/PyHouse_Install |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, HTML, Submit
from django import forms
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import UpdateView
from YtManagerApp.models import UserSettings
class SettingsForm(forms.ModelForm):
class Meta:
model = UserSettings
exclude = ['user']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-9'
self.helper.layout = Layout(
'mark_deleted_as_watched',
'delete_watched',
HTML('<h2>Download settings</h2>'),
'auto_download',
'download_path',
'download_file_pattern',
'download_format',
'download_order',
'download_global_limit',
'download_subscription_limit',
HTML('<h2>Subtitles download settings</h2>'),
'download_subtitles',
'download_subtitles_langs',
'download_subtitles_all',
'download_autogenerated_subtitles',
'download_subtitles_format',
Submit('submit', value='Save')
)
class SettingsView(LoginRequiredMixin, UpdateView):
form_class = SettingsForm
model = UserSettings
template_name = 'YtManagerApp/settings.html'
success_url = reverse_lazy('home')
def get_object(self, queryset=None):
obj, _ = self.model.objects.get_or_create(user=self.request.user)
return obj
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
... | 3 | app/YtManagerApp/views/settings.py | Netrecov/ytsm |
import math
from ga import GA, Individual, IndividualMeta, Selector
from ga_codec import CodecPlugin
from ga_cm import CmPlugin
from ga_iter import StopIterPlugin
from ga_generate import GeneratePlugin
from ga_selector import Selector
def get_fitness(individual: Individual):
val = individual.phenotype.phenotype
return -math.log(abs(val[0] - 19.1))
def run_simple_ga():
individual_meta = IndividualMeta()
individual_meta.range_list = [(19, 20)]
individual_meta.bit_count = [100]
codec_plugin = CodecPlugin()
cm_plugin = CmPlugin(0.1, 0.1)
stop_iter_plugin = StopIterPlugin(100)
generate_plugin = GeneratePlugin(100, individual_meta)
selector = Selector(get_fitness)
ga = GA(
codec_plugin,
cm_plugin,
stop_iter_plugin,
"",
generate_plugin
)
ga.setup_population() # 初始化种群
ga.use_selector(selector)
for idx, _ in enumerate(ga):
pass
if __name__ == "__main__":
run_simple_ga()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | example/simple_ga/simple_ga.py | lipopo/ga |
import torch
from lab.configs import BaseConfigs
class DeviceInfo:
def __init__(self, *,
use_cuda: bool,
cuda_device: int):
self.use_cuda = use_cuda
self.cuda_device = cuda_device
self.cuda_count = torch.cuda.device_count()
self.is_cuda = self.use_cuda and torch.cuda.is_available()
if not self.is_cuda:
self.device = torch.device('cpu')
else:
if self.cuda_device < self.cuda_count:
self.device = torch.device('cuda', self.cuda_device)
else:
self.device = torch.device('cuda', self.cuda_count - 1)
def __str__(self):
if not self.is_cuda:
return "CPU"
if self.cuda_device < self.cuda_count:
return f"GPU:{self.cuda_device} - {torch.cuda.get_device_name(self.cuda_device)}"
else:
return (f"GPU:{self.cuda_count - 1}({self.cuda_device}) "
f"- {torch.cuda.get_device_name(self.cuda_count - 1)}")
class DeviceConfigs(BaseConfigs):
cuda_device: int = 0
use_cuda: bool = True
device_info: DeviceInfo
device: torch.device
@DeviceConfigs.calc(DeviceConfigs.device)
def _device(c: DeviceConfigs):
return c.device_info.device
DeviceConfigs.set_hyperparams(DeviceConfigs.cuda_device, DeviceConfigs.use_cuda,
is_hyperparam=False)
@DeviceConfigs.calc(DeviceConfigs.device_info)
def _device_info(c: DeviceConfigs):
return DeviceInfo(use_cuda=c.use_cuda,
cuda_device=c.cuda_device)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | lab/helpers/pytorch/device.py | vidhiJain/lab |
from unittest.mock import MagicMock, patch, call
from tagtrain import data
from . import fake
from tagtrain.tagtrain.tt_remove import Remove
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_group(remove_user_from_group):
remove_user_from_group.side_effect = data.Group.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('Group `GroupName` does not exist. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_member(remove_user_from_group):
remove_user_from_group.side_effect = data.Member.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` is not a Member of Group `GroupName`. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=99)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 99 total Members.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good_no_members(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=0)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 0 total Members.')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | tests/tagtrain/test_remove.py | c17r/TagTrain |
libpath = ""
def _set_lib_path(p_path):
global libpath
if libpath != "":
libpath += " -L%s" % p_path
else:
libpath += "-L%s" % p_path
def _get_lib_path():
return libpath
libs = ""
def _add_lib(p_lib):
global libs
if libs != "":
libs += " -l%s" % p_lib
else:
libs += "-l%s" % p_lib
def _get_libs():
return libs | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | Config/libraries.py | HashzSoftware/YamlMake |
# coding: utf-8
"""
Algorithmia Management APIs
APIs for managing actions on the Algorithmia platform # noqa: E501
OpenAPI spec version: 1.0.1
Contact: support@algorithmia.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import algorithmia_api_client
from algorithmia_api_client.models.details import Details # noqa: E501
from algorithmia_api_client.rest import ApiException
class TestDetails(unittest.TestCase):
"""Details unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDetails(self):
"""Test Details"""
# FIXME: construct object with mandatory attributes with example values
# model = algorithmia_api_client.models.details.Details() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | env/Lib/site-packages/test/test_details.py | Vivek-Kamboj/Sargam |
import sys
from collections import defaultdict
def letter_counts(code):
counts = defaultdict(lambda: 0)
for c in code:
counts[c] += 1
return dict(counts)
def answer(path):
with open(path) as f:
codes = f.read().strip().split("\n")
n2, n3 = 0, 0
for code in codes:
counts = letter_counts(code)
values = set(counts.values())
if 2 in values:
n2 += 1
if 3 in values:
n3 += 1
return n2 * n3
if __name__ == "__main__":
print(answer(sys.argv[1]))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | 2018/day02.py | tcbegley/advent-of-code |
import json
from sanic import Sanic, text
from sanic.log import LOGGING_CONFIG_DEFAULTS, logger
LOGGING_CONFIG = {**LOGGING_CONFIG_DEFAULTS}
LOGGING_CONFIG["formatters"]["generic"]["format"] = "%(message)s"
LOGGING_CONFIG["loggers"]["sanic.root"]["level"] = "DEBUG"
app = Sanic("FakeServer", log_config=LOGGING_CONFIG)
@app.get("/")
async def handler(request):
return text(request.ip)
@app.before_server_start
async def app_info_dump(app: Sanic, _):
app_data = {
"access_log": app.config.ACCESS_LOG,
"auto_reload": app.auto_reload,
"debug": app.debug,
"noisy_exceptions": app.config.NOISY_EXCEPTIONS,
}
logger.info(json.dumps(app_data))
@app.after_server_start
async def shutdown(app: Sanic, _):
app.stop()
def create_app():
return app
def create_app_with_args(args):
try:
print(f"foo={args.foo}")
except AttributeError:
print(f"module={args.module}")
return app
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | tests/fake/server.py | Lin0818/sanic |
import logging
import base64
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
class EncoderStrategy(ABC):
"""
interface that defines the way the encoding is done
"""
@abstractmethod
def encode(self, string):
"""
encode
"""
pass
@abstractmethod
def decode(self, string):
"""
decode
"""
pass
class Base64Strategy(EncoderStrategy):
def encode(self, str):
logger.info(f"\n encoding Base64: {str}")
message_bytes = str.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
logger.info(f"\n encoded value: {base64_message}")
def decode(self, str):
logger.info(f"\n decoding Base64 :{str}")
message_bytes = str.encode('ascii')
base64_bytes = base64.b64decode(message_bytes)
base64_message = base64_bytes.decode('ascii')
logger.info(f"\n decoded value : {base64_message}")
class ASCCIStrategy(EncoderStrategy):
def encode(self, str):
logger.info(f"\n encoding ascci: {str}")
message_bytes = str.encode('ascii')
logger.info(f"\n encoded value: {message_bytes}")
def decode(self, str):
logger.info(f"\n decoding ascci: {str}")
message_bytes = str.decode('ascii')
logger.info(f"\n encoded value: {message_bytes}")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | EncoderStrategy.py | Samielleuch/PentboxClone |
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import logging
from cloudferry.lib.os.storage import cinder_db
from cloudferry.lib.utils import mysql_connector
LOG = logging.getLogger(__name__)
def mark_volumes_deleted(config, volume_ids):
def get_opt(name):
return (getattr(config.src_storage, name) or
getattr(config.src_mysql, name))
db_config = {opt: get_opt(opt) for opt in ('db_user', 'db_password',
'db_host', 'db_port',
'db_connection')}
db_name = config.src_storage.db_name
conn = mysql_connector.MysqlConnector(db_config, db_name)
src_db = cinder_db.CinderDBBroker(conn)
result = []
with conn.transaction():
for volume_id in volume_ids:
volume = src_db.get_cinder_volume(volume_id)
if volume is None:
LOG.error("Volume '%s' not found.", volume_id)
result.append((volume_id, None, 'not found'))
continue
if volume.deleted:
LOG.warning("Volume '%s' is already deleted.", volume_id)
result.append((volume.id, volume.deleted_at, 'skipped'))
continue
LOG.debug("Mark volume '%s' as deleted.", volume_id)
volume = src_db.delete_volume(volume_id)
result.append((volume.id, volume.deleted_at, 'deleted'))
return result
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | cloudferry/tools/mark_volumes_deleted.py | SVilgelm/CloudFerry |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" A test for the MapToForLoop transformation. """
import dace
import numpy as np
from dace.transformation.dataflow import MapExpansion, MapToForLoop
@dace.program
def map2for(A: dace.float64[20, 20, 20]):
for k in range(1, 19):
for i, j in dace.map[0:20, 0:20]:
with dace.tasklet:
inp << A[i, j, k]
inp2 << A[i, j, k - 1]
out >> A[i, j, k + 1]
out = inp + inp2
def test_map2for_overlap():
A = np.random.rand(20, 20, 20)
expected = np.copy(A)
for k in range(1, 19):
expected[:, :, k + 1] = expected[:, :, k] + expected[:, :, k - 1]
sdfg = map2for.to_sdfg()
assert sdfg.apply_transformations([MapExpansion, MapToForLoop]) == 2
sdfg(A=A)
assert np.allclose(A, expected)
if __name__ == '__main__':
test_map2for_overlap() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/transformations/maptoforloop_test.py | Walon1998/dace |
import os
import pandas as pd
from string import Template
import wget
csv_file_path = "https://docs.google.com/spreadsheets/d/1AlflVlTg1KmajQrWBOUBT2XeoAUqfjB9SCQfDIPvSXo/export?format=csv&gid=565678921"
project_card_path = "assets/templates/project_card.html"
projects_page_path = "assets/templates/template_projects.md"
def populate_project_card(title, description, leader):
with open(str(project_card_path), 'r') as card:
card_tpl = Template(card.read())
card_html = card_tpl.substitute(projectTitle=title,
projectDescription=description,
projectLeader=leader)
card.close()
return card_html
def populate_projects_page(html):
with open(str(projects_page_path), 'r') as prj:
prj_tpl = Template(prj.read())
prj_html = prj_tpl.substitute(projectCards=html,
link="/projects/")
prj.close()
return prj_html
def main():
# Download CSV file
filename = wget.download(csv_file_path)
# Read CSV file
df = pd.read_csv(filename)
df = df[df["Leader:"].notna()]
prj_card = ""
for pj_index, prj_row in df.iterrows():
prj_title = prj_row["Project title:"]
prj_descr = prj_row["Project description:"]
prj_leader = prj_row["Leader:"]
prj_card += populate_project_card(prj_title, prj_descr, prj_leader)
prj_page = populate_projects_page(prj_card)
with open("projects.md", "wb") as f:
f.write(prj_page.encode("utf-8"))
os.remove(filename)
if __name__ == "__main__":
main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | populate_projects.py | anibalsolon/brainhack-donostia.github.io |
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import qisys.sh
import qitoolchain.qipackage
import qisrc.svn
class SvnPackage(qitoolchain.qipackage.QiPackage): # pylint: disable=too-many-instance-attributes
""" A ``QiPackage`` managed by subversion """
def __init__(self, name):
super(SvnPackage, self).__init__(name)
self.url = None
self.revision = None
@property
def svn(self):
return qisrc.svn.Svn(self.path)
def update(self):
""" Run ``svn update`` with the appropriate revision """
cmd = ["update"]
if self.revision:
cmd.extend(["--revision", self.revision])
self.svn.call(*cmd)
def checkout(self):
""" Run ``svn checkout`` to create the package files """
qisys.sh.mkdir(self.path, recursive=True)
self.svn.call("checkout", self.url, ".", "--quiet")
def commit_all(self):
""" Commit all changes made to this package files """
self.svn.commit_all("Update %s" % self.name)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | python/qitoolchain/svn_package.py | vbarbaresi/qibuild |
import numpy as np
NW = (52.58363, 13.2035)
SE = (52.42755, 13.62648)
NE = (NW[0], SE[1])
SW = (SE[0], NW[1])
def flatten_list(irregularly_nested_list):
"""Generator which recursively flattens list of lists
:param irregularly_nested_list: iterable object containing iterable and non-iterable objects as elements
"""
for el in irregularly_nested_list:
if isinstance(el, list):
for sub_el in flatten_list(el):
yield sub_el
else:
yield el
def create_grid_of_berlin(cnt_x=60, cnt_y=40):
x = np.linspace(NW[0], SE[0], cnt_x)
y = np.linspace(NW[1], SE[1], cnt_y)
return x, y
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | backend_app/util.py | GGCarrotsBerlin/test |
from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from pyro.distributions.torch import TransformedDistribution
class ReflectedDistribution(TransformedDistribution):
"""
Equivalent to ``TransformedDistribution(base_dist, AbsTransform())``,
but additionally supports :meth:`log_prob` .
:param ~torch.distributions.Distribution base_dist: The distribution to
reflect.
"""
support = constraints.positive
def __init__(self, base_dist, validate_args=None):
if base_dist.event_shape:
raise ValueError("Only univariate distributions can be reflected.")
super().__init__(base_dist, AbsTransform(), validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(type(self), _instance)
return super().expand(batch_shape, _instance=new)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
dim = max(len(self.batch_shape), value.dim())
plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)
return self.base_dist.log_prob(plus_minus * value).logsumexp(0)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | pyro/distributions/reflected.py | ajrcampbell/pyro |
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphClassifier(nn.Module):
def __init__(self, hidden_dim: int, num_classes: int, pooling_op: str):
super(GraphClassifier, self).__init__()
# TODO: Define the graph classifier
# graph classifier can be an MLP
self.linear = nn.Linear(hidden_dim, hidden_dim)
self.graph_classifier = nn.Linear(hidden_dim, num_classes)
self.pooling_op = pooling_op
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Given node features x, applies two operations:
1. Pools the node representations using the pooling operation specified
2. Applies a classifier to the pooled representation
"""
# TODO: Implement the forward pass for the graph classifier
pooled_rep = self.pool(x, self.pooling_op)
out = self.linear(pooled_rep)
classifier_logits = self.graph_classifier(out)
return classifier_logits
def pool(self, x: torch.Tensor, operation: str = "last") -> torch.Tensor:
"""Given node features x, applies a pooling operation to return a
single aggregated feature vector.
Args:
x (torch.Tensor): [The node features]
operation (str, optional): [description]. Defaults to "last".
Raises:
NotImplementedError: [description]
Returns:
torch.Tensor: [A single feature vector for the graph]
"""
if operation == "mean":
return x.mean(dim=0)
elif operation == "max":
return x.max(dim=0)[0]
elif operation == "last":
return x[-1]
else:
raise NotImplementedError()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/modeling/tasks/graph_classification.py | Srijanb97/gcn_assignment |
#Copyright (c) 2020 Jan Kiefer
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time
from .position import Position
from .waypoint import Waypoint
class Path:
def __init__(self, start, duration, received_at):
self.start = start
self.duration = duration
self.waypoints = []
self.received_at = received_at
@staticmethod
def from_utfmsg(utfmsg):
if utfmsg == None:
return None
array = utfmsg.get_int_list_arg(0)
if array == None or len(array) < 5:
return None
path = Path(Position(array[1], array[2], array[3]), array[4] / 1000, time.time())
for x in range(array[0]):
wp = utfmsg.get_int_list_arg(x + 1)
path.add_waypoint(Waypoint(Position(wp[0], wp[1], wp[2]), wp[3] / 1000))
return path
def add_waypoint(self, waypoint):
self.waypoints.append(waypoint)
def age(self):
return time.time() - self.received_at | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | freggersbot/data/path.py | Jan000/Python-Freggers-Bot |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from cinder.message import defined_messages
from cinder.tests.unit import fake_constants as fake
FAKE_UUID = fake.OBJECT_ID
def stub_message(id, **kwargs):
message = {
'id': id,
'event_id': defined_messages.UNABLE_TO_ALLOCATE,
'message_level': "ERROR",
'request_id': FAKE_UUID,
'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'expires_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
}
message.update(kwargs)
return message
def stub_message_get(self, context, message_id):
return stub_message(message_id)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | cinder/tests/unit/api/v3/stubs.py | bswartz/cinder |
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.contrib.auth.models import User
from .models import Profile
from django.core.mail import send_mail
from django.conf import settings
def createProfile(sender, instance, created, **kwargs):
if created:
user = instance
profile = Profile.objects.create(
user=user,
username=user.username,
email=user.email,
name=user.first_name,
)
subject = 'Welcome to MsDevelopers'
message = 'We are glad you are here!'
send_mail(
subject,
message,
settings.EMAIL_HOST_USER,
[profile.email],
fail_silently = False
)
def updateUser(sender, instance, created, **kwargs):
profile = instance
user = profile.user
if created == False:
user.first_name = profile.name
user.username = profile.username
user.email = profile.email
user.save()
def deleteUser(sender, instance, **kwargs):
try:
user = instance.user
user.delete()
except:
pass
post_save.connect(createProfile, sender=User)
post_save.connect(updateUser, sender=Profile)
post_delete.connect(deleteUser, sender=Profile) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | users/signals.py | kiman121/ms-developers |
import numpy as np
from pddlgym.core import get_successor_states, InvalidAction
from pddlgym.inference import check_goal
def get_all_reachable(s, A, env, reach=None):
reach = {} if not reach else reach
reach[s] = {}
for a in A:
try:
succ = get_successor_states(s,
a,
env.domain,
raise_error_on_invalid_action=True,
return_probs=True)
except InvalidAction:
succ = {s: 1.0}
reach[s][a] = {s_: prob for s_, prob in succ.items()}
for s_ in succ:
if s_ not in reach:
reach.update(get_all_reachable(s_, A, env, reach))
return reach
def vi(S, succ_states, A, V_i, G_i, goal, env, gamma, epsilon):
V = np.zeros(len(V_i))
P = np.zeros(len(V_i))
pi = np.full(len(V_i), None)
print(len(S), len(V_i), len(G_i), len(P))
print(G_i)
P[G_i] = 1
i = 0
diff = np.inf
while True:
print('Iteration', i, diff)
V_ = np.copy(V)
P_ = np.copy(P)
for s in S:
if check_goal(s, goal):
continue
Q = np.zeros(len(A))
Q_p = np.zeros(len(A))
cost = 1
for i_a, a in enumerate(A):
succ = succ_states[s, a]
probs = np.fromiter(iter(succ.values()), dtype=float)
succ_i = [V_i[succ_s] for succ_s in succ_states[s, a]]
Q[i_a] = cost + np.dot(probs, gamma * V_[succ_i])
Q_p[i_a] = np.dot(probs, P_[succ_i])
V[V_i[s]] = np.min(Q)
P[V_i[s]] = np.max(Q_p)
pi[V_i[s]] = A[np.argmin(Q)]
diff = np.linalg.norm(V_ - V, np.inf)
if diff < epsilon:
break
i += 1
return V, pi
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | mdp.py | GCrispino/vi-pddlgym |
# coding: utf-8
from ac_engine.actions.abstract import AbstractAction
class AbstractStatistics(AbstractAction):
EXCLUSION_SET = ()
@property
def data_container_class(self):
return None
def prepare_data(self):
for offer in self.offers:
if offer:
self.offers_data.append(offer.to_array(self.EXCLUSION_SET))
self.offers_price.append(offer.price)
self.offers_data_keys = self.offers[0].get_param_names(self.EXCLUSION_SET)
def calculate(self):
from django.conf import settings
from ac_computing.algorithm.Statistics import Statistics
os = Statistics(
self.offers_data,
self.offers_data_keys,
self.offers_price
)
self.correlations = os.get_correlations()
self.statistics = os.get_statistics(settings.HINT_CORRELATIONS_RESULT_NUMBER)
def execute(self):
self.prepare_data()
self.calculate()
self.result = {
'statistics': self.statistics
}
@property
def serialize(self):
return {
"price": {
"min": round(float(self.result['statistics']['min']), 2),
"max": round(float(self.result['statistics']['max']), 2),
"mean": round(float(self.result['statistics']['mean']), 2),
"median": round(float(self.result['statistics']['median'][0]), 2)
if type(self.result['statistics']['median']) is list
else round(float(self.result['statistics']['median']), 2),
"deviation": round(float(self.result['statistics']['stdDev']), 2)
},
"correlations":
[{'name': c[0], 'value': round(float(c[1]), 2)} for c in self.result['statistics']['correlations']]
}
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | app/ac_engine/actions/statistics.py | marwahaha/allecena |
from flask import Flask, request, jsonify
from flask import templating
from . import helper
from . import db
from .config import Config
app = Flask(__name__)
Config = Config()
@app.route("/")
def index():
return templating.render_template("index.html", maxlength=Config.url_length)
@app.route("/r/<token>")
def token(token):
if token == "":
return "Please enter a token"
url = db.get_url(token)
if not url:
return templating.render_template("error.html")
return templating.render_template("redirect.html", url=url)
@app.route("/create_url", methods=["POST"])
def create_url():
request.get_json(force=True)
json = request.json
if json["token"] == "":
token = helper.generate_random(Config.url_length)
elif json["token"] != None:
token = json["token"]
else:
token = helper.generate_random(Config.url_length)
url = json["url"]
if not url:
return jsonify(
{"succes": False, "error": "no Url specified"}
)
if len(token) > Config.url_length:
return jsonify(
{"succes": False, "error": "token too long"}
)
info = db.create_short(token, url)
if type(info) == str:
return jsonify(
{"succes": False, "error": info}
)
return jsonify(
{"succes": True, "token": token}
)
if __name__ == "__main__":
app.run("127.0.0.1", "5000")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | URL_SHORTENER/app/__init__.py | NeonCrafter13/url_shortener |
# 多线程编程
# GIL锁在单cpu上只能执行一个,效率低
# 多进程可以在多个cpu上执行
# 多线程和多线程都能够并发为什么在IO操作是不使用多进程
# 进程切换比较耗时
# 1.对于耗cpu,多进程优于多线程
# 2.对于IO操作,多线程由于多进程
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import os
import time
# pid = os.fork() # 同时会存在两个进程,会拷贝父进程中的代码和数据到子进程中
#
# print("boby")
#
# if pid == 0:
# print("子进程 {} , 父进程 {}.".format(os.getpid(), os.getppid()))
# else:
# print("父进程 {}.".format(pid))
# 多进程
import multiprocessing
def get_html(n):
time.sleep(n)
return n
class MyProcess(multiprocessing.Process):
def run(self):
pass
if __name__ == '__main__':
process = multiprocessing.Process(target=get_html, args=(2,))
print(process.pid)
process.start()
process.join()
print(process.pid)
print("multiprocessing is end")
pool = multiprocessing.Pool(multiprocessing.cpu_count())
# result = pool.apply_async(get_html, (2,))
# pool.close()
# print(result.get())
for result in pool.imap(get_html, [1, 5, 3]):
print("{} sleep success".format(result))
for result in pool.imap_unordered(get_html, [1, 5, 3]):
print("{} sleep success".format(result))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | pythonBase/advancePyton/chapter11/process_test.py | cangchengkun/pythonbase |
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.session import Session
from caldavclientlibrary.protocol.webdav.head import Head
import unittest
class TestRequest(unittest.TestCase):
def test_Method(self):
server = Session("www.example.com")
request = Head(server, "/")
self.assertEqual(request.getMethod(), "HEAD")
class TestRequestHeaders(unittest.TestCase):
def test_NoSpecialHeaders(self):
server = Session("www.example.com")
request = Head(server, "/")
hdrs = request.generateRequestHeader()
self.assertFalse("If-None-Match:" in hdrs)
self.assertFalse("If-Match:" in hdrs)
def test_IfMatchHeader(self):
server = Session("www.example.com")
request = Head(server, "/")
request.setData(None, etag="\"12345\"")
hdrs = request.generateRequestHeader()
self.assertFalse("If-None-Match:" in hdrs)
self.assertTrue("If-Match: \"12345\"" in hdrs)
class TestRequestBody(unittest.TestCase):
pass
class TestResponse(unittest.TestCase):
pass
class TestResponseHeaders(unittest.TestCase):
pass
class TestResponseBody(unittest.TestCase):
pass
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | caldavclientlibrary/protocol/webdav/tests/test_head.py | LaudateCorpus1/ccs-caldavclientlibrary |
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.test import TestCase
from django_fire.hashers import (
FIRED_PASSWORD_PREFIX,
is_password_fired, make_fired_password,
)
class TestHashers(TestCase):
def test_is_password_fired(self):
# unusable password made by django_fire should be True
self.assertTrue(is_password_fired(FIRED_PASSWORD_PREFIX))
# unusable password not made by django_fire should be False
self.assertFalse(is_password_fired(UNUSABLE_PASSWORD_PREFIX))
def test_make_fired_password(self):
fired_password = make_fired_password()
# fired password should be UNUSABLE PASSWORD as django
self.assertTrue(fired_password.startswith(UNUSABLE_PASSWORD_PREFIX))
# fired password should be FIRED PASSWORD as django_fire
self.assertTrue(fired_password.startswith(FIRED_PASSWORD_PREFIX))
# django_fire should be able to check fired-password
self.assertTrue(is_password_fired(fired_password))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | django_fire/tests/test_hashers.py | lordpeara/django-fire |
#!python
#!/usr/bin/env python
from kivy.app import App
from kivy.uix.bubble import Bubble
from kivy.animation import Animation
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.clock import Clock
from actilectrum.gui.kivy.i18n import _
Builder.load_string('''
<MenuItem@Button>
background_normal: ''
background_color: (0.192, .498, 0.745, 1)
height: '48dp'
size_hint: 1, None
<ContextMenu>
size_hint: 1, None
height: '60dp'
pos: (0, 0)
show_arrow: False
arrow_pos: 'top_mid'
padding: 0
orientation: 'horizontal'
background_color: (0.1, 0.1, 0.1, 1)
background_image: ''
BoxLayout:
size_hint: 1, 1
height: '54dp'
padding: '0dp', '0dp'
spacing: '3dp'
orientation: 'horizontal'
id: buttons
''')
class MenuItem(Factory.Button):
pass
class ContextMenu(Bubble):
def __init__(self, obj, action_list):
Bubble.__init__(self)
self.obj = obj
for k, v in action_list:
l = MenuItem()
l.text = _(k)
def func(f=v):
Clock.schedule_once(lambda dt: f(obj), 0.15)
l.on_release = func
self.ids.buttons.add_widget(l)
def hide(self):
if self.parent:
self.parent.hide_menu()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | actilectrum/gui/kivy/uix/context_menu.py | Actinium-project/Actilectrum |
from typing import Dict
from msrc.appconfig import from_argv
import pytest
from common import AllTypes, all_args, all_values, En
def test_argv():
loaded = from_argv(AllTypes, all_args)
assert loaded == all_values
def test_argv_unknown():
unknown = ["--unknown"]
loaded = from_argv(AllTypes, all_args+unknown)
expected: Dict[str, object] = dict(all_values.items())
expected["_unknown_args"] = unknown
assert loaded == expected
def test_argv_aliases():
loaded = from_argv(
AllTypes,
["--integer", "34", "-o", "1", "2"],
{"o": "nested.options"})
assert loaded == dict(
integer=34,
nested=dict(options=(En.Option1, En.Option2))
)
def test_first_arg_not_class():
with pytest.raises(TypeError):
from_argv("something", []) # type: ignore
def test_first_arg_unsupported():
with pytest.raises(ValueError):
from_argv(int, [])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | msrc-appconfig/tests/from_argv_test.py | microsoft/msrc-appconf |
def insertion_sort(array):
N = len(array)
# Outer for-loop, such that for each iteration, k elements from 0, ... k
# are to be assumed to be sorted or about to be sorted.
for k in range(1, N):
for j in range(k, 0, -1):
if (array[j - 1] > array[j]):
array[j - 1], array[j] = array[j], array[j - 1]
else:
# As soon as we don't need to swap, the (k + 1)st in correct
# location. It's because of induction case that we can do this.
break
return array
def insertion_sort_optimized(array):
N = len(array)
for k in range(1, N):
temp = array[k]
for j in range(k, 0, -1):
if (array[j - 1] > temp):
# Shift or move value at j - 1 into position j.
array[j] = array[j - 1]
else:
array[j] = temp
break
if (array[0] > temp):
# Only executed if temp < array[0]
array[0] = temp
return array | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | Voltron/Voltron/Algorithms/Sorting/insertion_sort.py | ernestyalumni/HrdwCCppCUDA |
__title__ = "simulation"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "murlux@protonmail.com"
# Local imorts
from playground.enums import RunMode
from playground.simulation.backtesting import BackTestingOperation
from playground.simulation.forwardtesting import ForwardTestingOperation
OP_TYPES = {
RunMode.BACKTEST: BackTestingOperation,
RunMode.FORDWARDTEST: ForwardTestingOperation,
}
class SimulatedOperationType:
"""An object representing the Simulated Operation."""
def __init__(self, ):
"""
Simply initiate the SimulatedOpType.
"""
return
@staticmethod
def from_config(config, ):
"""
Simply return the corresponding type's SimulatedOp class based on config
param: config: A SimulatedOperationConfig
type: config: SimulatedOperationConfig
"""
return OP_TYPES[config.RunMode].from_config(config) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | playground/simulation/operations/types.py | murlokito/playground |
# -*- coding: utf-8 -*-
from enum import Enum
class LowerCaseNameEnum(Enum):
def str(self):
return self.name.lower()
class Case(LowerCaseNameEnum):
GENITIVE = 0
DATIVE = 1
ACCUSATIVE = 2
INSTRUMENTAL = 3
PREPOSITIONAL = 4
class Gender(LowerCaseNameEnum):
MALE = 0
FEMALE = 1
ANDROGYNOUS = 2
class NamePart(LowerCaseNameEnum):
LASTNAME = 0
FIRSTNAME = 1
MIDDLENAME = 2
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | pytrovich/enums.py | alexeyev/pytrovich |
import re
def isNice(a):
if str(a).find("ab") > -1:
return False
if str(a).find("cd") > -1:
return False
if str(a).find("xy") > -1:
return False
if str(a).find("pq") > -1:
return False
ca = str(a).count("a")
ce = str(a).count("e")
ci = str(a).count("i")
co = str(a).count("o")
cu = str(a).count("u")
count = ca + ce + cu + ci + co
if count < 3:
return False
pattern = re.compile(r"([a-z])\1")
match = re.search(pattern, str(a))
if match:
return True
return False
def newIsNice(a):
pattern1 = re.compile(r"([a-z]{2}).*?\1")
pattern2 = re.compile(r"([a-z]).\1")
match = re.search(pattern1, str(a))
if match:
match2 = re.search(pattern2, str(a))
if match2:
return True
return False
w = open("day05.txt").read().split('\n')
nice = 0
newNice = 0
for i in range(len(w) - 1):
# print(w[i], isNice(w[i]), newIsNice(w[i]))
if isNice(w[i]):
nice += 1
if newIsNice(w[i]):
newNice += 1
print(nice)
print(newNice)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | day05/day05.py | binarygondola/adventofcode-2015 |
# Python program for implementation of MergeSort
def mergeSort(arr):
if len(arr) >1:
mid = len(arr)//2 # Finding the mid of the array
L = arr[:mid] # Dividing the array elements
R = arr[mid:] # into 2 halves
mergeSort(L) # Sorting the first half
mergeSort(R) # Sorting the second half
i = j = k = 0
# Copy data to temp arrays L[] and R[]
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i+= 1
else:
arr[k] = R[j]
j+= 1
k+= 1
# Checking if any element was left
while i < len(L):
arr[k] = L[i]
i+= 1
k+= 1
while j < len(R):
arr[k] = R[j]
j+= 1
k+= 1
# Code to print the list
def printList(arr):
for i in range(len(arr)):
print(arr[i], end =" ")
print()
# driver code to test the above code
if __name__ == '__main__':
arr = [12, 11, 13, 5, 6, 7]
print ("Given array is", end ="\n")
printList(arr)
mergeSort(arr)
print("Sorted array is: ", end ="\n")
printList(arr)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | Python/merge_sort.py | enrinal/-HACKTOBERFEST2K20 |
import pytest
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy.addons import clientplayback
from mitmproxy.test import taddons
def tdump(path, flows):
w = io.FlowWriter(open(path, "wb"))
for i in flows:
w.add(i)
class MockThread():
def is_alive(self):
return False
class TestClientPlayback:
def test_playback(self):
cp = clientplayback.ClientPlayback()
with taddons.context() as tctx:
assert cp.count() == 0
f = tflow.tflow(resp=True)
cp.load([f])
assert cp.count() == 1
RP = "mitmproxy.proxy.protocol.http_replay.RequestReplayThread"
with mock.patch(RP) as rp:
assert not cp.current_thread
cp.tick()
assert rp.called
assert cp.current_thread
cp.flows = None
cp.current_thread = None
cp.tick()
assert tctx.master.has_event("processing_complete")
cp.current_thread = MockThread()
cp.tick()
assert cp.current_thread is None
def test_configure(self, tmpdir):
cp = clientplayback.ClientPlayback()
with taddons.context() as tctx:
path = str(tmpdir.join("flows"))
tdump(path, [tflow.tflow()])
tctx.configure(cp, client_replay=[path])
tctx.configure(cp, client_replay=[])
tctx.configure(cp)
with pytest.raises(exceptions.OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?"... | 3 | test/mitmproxy/addons/test_clientplayback.py | nikofil/mitmproxy |
""" Tests the creation of tables, and the methods of the sql class
"""
from pyrate.repositories.sql import Table
from utilities import setup_database
class TestSql:
""" Tests the Sql class
"""
def test_get_list_of_columns(self, setup_database):
db = setup_database
rows = [{'unit': 'days',
'description': 'At berth/anchor',
'name': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
def test_get_list_of_columns_lowerconversion(self, setup_database):
db = setup_database
rows = [{'uNit': 'days',
'Description': 'At berth/anchor',
'namE': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | tests/test_sql.py | vprakash-ucl/pyrate |
# coding: utf-8
import numpy as np
from PIL import Image, ImageDraw
from math import sin,cos
from numpngw import write_apng
W,H = 1024,1024
COLOR_BLACK = (0x00, 0x00, 0x00, 0x00)
COLOR_WHITE = (0xF0, 0xF0, 0xE0)
COLOR_BLUE = (0x0D, 0x36, 0xFF)
COLOR_BLYNK = (0x2E, 0xFF, 0xB9)
COLOR_RED = (0xFF, 0x10, 0x08)
COLOR_MAGENTA = (0xA7, 0x00, 0xFF)
def fr(color):
im = Image.new('RGBA', (W,H))
surface = ImageDraw.Draw(im)
surface.ellipse((10,10,W-20,H-20), fill=color)
del surface
return np.array(im.resize((16,16), Image.ANTIALIAS))
def fade(color):
frames = []
for i in range(25):
frames.append(fr(color + tuple([int(i*(255.0/25))])))
for i in range(25):
frames.append(fr(color + tuple([255-int(i*(255.0/25))])))
return frames
path = "./docs/images/states/"
write_apng(path + "0.png", map(fr,[COLOR_BLUE, COLOR_BLACK]), delay=[50, 500])
write_apng(path + "1.png", map(fr,[COLOR_BLUE, COLOR_BLACK]), delay=[200, 200])
write_apng(path + "2.png", map(fr,[COLOR_BLYNK, COLOR_BLACK]), delay=[50, 500])
write_apng(path + "3.png", map(fr,[COLOR_BLYNK, COLOR_BLACK]), delay=[100, 100])
write_apng(path + "4.png", fade(COLOR_BLYNK), delay=100)
write_apng(path + "5.png", map(fr,[COLOR_MAGENTA, COLOR_BLACK]), delay=[50, 50])
write_apng(path + "6.png", map(fr,[COLOR_RED, COLOR_BLACK, COLOR_RED, COLOR_BLACK]), delay=[80, 100, 80, 1000])
write_apng(path + "7.png", fade(COLOR_WHITE), delay=50)
write_apng(path + "8.png", map(fr,[COLOR_WHITE, COLOR_BLACK]), delay=[100, 100]) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | extras/gen-states.py | kayatmin/blynk-library |
import numpy as np
import os
import sys
# To import from sibling directory ../utils
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from data_loader.load_utils import load_obj
from data_loader.load_utils import try_to_load_as_pickled_object
from sklearn.model_selection import train_test_split
from data_loader.process_files import process_all_files
class DataGenerator:
def __init__(self, config):
self.config = config
# load data here
#input = try_to_load_as_pickled_object('./data/patches.pkl')
#y = try_to_load_as_pickled_object('./data/labels_patches.pkl')
print("\nloading the data")
input, y = process_all_files([0,1000,2000,3000,4000,5000,6000,7000,8000,9000])
print("\ndata loaded")
self.input, self.input_dev, self.y, self.y_dev = train_test_split(input,
y,
test_size=self.config.val_split)
def next_batch(self, batch_size):
idx = np.random.choice(len(self.input), batch_size)
yield self.input[idx], self.y[idx]
def next_batch_dev(self, batch_size):
idx = np.random.choice(len(self.input_dev), batch_size)
yield self.input_dev[idx], self.y_dev[idx]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | data_loader/baseline_generator.py | mmr12/DeepLearning18 |
import torchvision
from torchvision.models import resnet as vrn
import torch.utils.model_zoo as model_zoo
from .utils import register
class ResNet(vrn.ResNet):
'Deep Residual Network - https://arxiv.org/abs/1512.03385'
def __init__(self, layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[5], groups=1, width_per_group=64, url=None):
self.stride = 128
self.bottleneck = bottleneck
self.outputs = outputs
self.url = url
kwargs = {'block': bottleneck, 'layers': layers, 'groups': groups, 'width_per_group': width_per_group}
super().__init__(**kwargs)
self.unused_modules = ['fc']
def initialize(self):
if self.url:
self.load_state_dict(model_zoo.load_url(self.url))
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outputs = []
for i, layer in enumerate([self.layer1, self.layer2, self.layer3, self.layer4]):
level = i + 2
if level > max(self.outputs):
break
x = layer(x)
if level in self.outputs:
outputs.append(x)
return outputs
@register
def ResNet18C4():
return ResNet(layers=[2, 2, 2, 2], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet18'])
@register
def ResNet34C4():
return ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet34'])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | odtk/backbones/resnet.py | Mo5mami/retinanet-examples |
# -*- coding: utf-8 -*-
from keras_bert import Tokenizer
class TokenizerReturningSpace(Tokenizer):
"""
"""
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]')
else:
R.append('[UNK]')
return R
class EnglishTokenizer(Tokenizer):
"""
"""
pass
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
... | 3 | nlp_tasks/bert_keras/tokenizer.py | l294265421/AC-MIMLLN |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SubCommand(object):
name = NotImplementedError("Please add 'name' member in your SubCommand")
help = NotImplementedError("Please add 'help' member in your SubCommand")
def addParser(self, parser):
raise NotImplementedError("Please implement 'addParser' method in your SubCommand")
def execute(self):
raise NotImplementedError("Please implement 'execute' method in your SubCommand")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | src/ppb/cli/sub_cmd/_sub_command.py | Stibbons/python-project-bootstrap |
import pytest
from selen.client.driver import Driver, DriverInstance
from selen.client.helper import Tools
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(item):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
return rep
@pytest.fixture
def driver(request):
chrome_driver = Driver.chrome()
driver_instance = DriverInstance(driver=chrome_driver)
driver_instance.set_driver()
driver = driver_instance.get_driver()
driver.set_page_load_timeout(10)
driver.set_script_timeout(30)
yield driver
if request.node.rep_call.failed:
Driver.create_screenshot(driver, Tools.time_stamp(request.function.__name__ + '--'), 'allure')
driver.quit()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/conftest.py | SatoDeNoor/selen |
import re
import httpx
from bs4 import BeautifulSoup
from convert_to_json import hash_id
from cleanup import delete_from_index
from connection import app_search, engine_name
from upload_to_appsearch import upload_dict
womakers_location_page = httpx.get("https://womakerscode.org/locations")
soup = BeautifulSoup(womakers_location_page, "html.parser")
base_locations_wrapper = soup.select("div.container-cities")
def get_locations():
locations = []
for location in base_locations_wrapper:
name = location.select("h3")[0].text.strip()
url = location.select("a")[0]["href"]
city, region = location.select("strong")[0].text.strip().split("/")
city = str(city or name).strip()
asset = {
"name": f"WoMakersCode - {name}",
"url": url,
"organization_logo": "https://d33wubrfki0l68.cloudfront.net/16a1903b64a4d7e982440dec57fe954c2273d95b/90edb/assets/images/womakerscode-icone.png",
"city": f"{city}, {region}".strip(),
"diversity_focus": ["Women in Tech"],
"technology_focus": ["General Technology"],
"parent_organization": "WoMakersCode",
"global_org_url_from_parent_organization": "https://womakerscode.org",
}
asset["id"] = hash_id(name + url)
locations.append(asset)
return locations
def run():
delete_from_index("WoMakersCode")
upload_dict(get_locations())
if __name__ == "__main__":
# test()
run()
# print(soup.prettify())
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | upload/scrapers/womakerscode.py | kjaymiller/diversity-orgs-tech-appsearch-demo |
import re
import time
from pyquery import PyQuery as pq
from policy_crawl.common.fetch import get,post
from policy_crawl.common.save import save
from policy_crawl.common.logger import alllog,errorlog
def parse_detail(html,url):
alllog.logger.info("云南省教育厅: %s"%url)
doc=pq(html)
data={}
data["title"]=doc(".passage-contents h4").text()
data["content"]=doc(".TRS_Editor").text().replace("\n","")
data["content_url"]=[item.attr("href") for item in doc(".TRS_Editor a").items()]
try:
# data["publish_time"]=re.findall("(\d{4}年\d{1,2}月\d{1,2}日)",html)[0]
# data["publish_time"]=re.findall("(\d{4}/\d{1,2}/\d{1,2})",html)[0]
data["publish_time"]=re.findall("(\d{4}-\d{1,2}-\d{1,2})",html)[0]
except:
data["publish_time"]=""
errorlog.logger.error("url:%s 未找到publish_time"%url)
data["classification"]="云南省教育厅"
data["url"]=url
print(data)
save(data)
def parse_index(html):
doc=pq(html)
items=doc(".title-list li a").items()
for item in items:
url=item.attr("href")
if "http" not in url:
url="https://jyt.yn.gov.cn" + url.replace("./","/")
try:
html=get(url)
except:
errorlog.logger.error("url错误:%s"%url)
parse_detail(html,url)
time.sleep(1)
def main():
for i in range(1,12):
print(i)
if i==1:
url="https://jyt.yn.gov.cn/web/fada9b8aa1794bc99629d553c96fbe97/index.html"
else:
url="https://jyt.yn.gov.cn/web/fada9b8aa1794bc99629d553c96fbe97/index_"+str(i)+".html"
html=get(url)
parse_index(html)
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | spiders/moe/all/yunnan.py | JJYYYY/policy_crawl |
import numpy as np
JPL_OBLIQUITY = np.deg2rad(84381.448 / 3600.0)
def icrf_to_jpl_ecliptic(x, y, z, vx, vy, vz):
return _apply_x_rotation(JPL_OBLIQUITY, x, y, z, vx, vy, vz)
def jpl_ecliptic_to_icrf(x, y, z, vx, vy, vz):
return _apply_x_rotation(-JPL_OBLIQUITY, x, y, z, vx, vy, vz)
def _apply_x_rotation(phi, x0, y0, z0, vx0, vy0, vz0):
x = x0
y = y0 * np.cos(phi) + z0 * np.sin(phi)
z = -y0 * np.sin(phi) + z0 * np.cos(phi)
vx = vx0
vy = vy0 * np.cos(phi) + vz0 * np.sin(phi)
vz = -vy0 * np.sin(phi) + vz0 * np.cos(phi)
return [x, y, z, vx, vy, vz]
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | adam/astro_utils.py | moeyensj/adam_home |
import datetime
import pytest
from sepaxml import SepaDD
from sepaxml.validation import ValidationError
def test_name_too_long():
sdd = SepaDD({
"name": "TestCreditor",
"BIC": "BANKNL2A",
"IBAN": "NL50BANK1234567890",
"batch": True,
"creditor_id": "000000",
"currency": "EUR"
})
payment1 = {
"name": "Test von Testenstein Test von Testenstein Test von Testenstein",
"IBAN": "NL50BANK1234567890",
"BIC": "BANKNL2A",
"amount": 1012,
"type": "FRST",
"collection_date": datetime.date.today(),
"mandate_id": "1234",
"mandate_date": datetime.date.today(),
"description": "Test transaction1"
}
sdd.add_payment(payment1)
with pytest.raises(ValidationError):
sdd.export()
sdd.export(validate=False)
def test_invalid_mandate():
sdd = SepaDD({
"name": "TestCreditor",
"BIC": "BANKNL2A",
"IBAN": "NL50BANK1234567890",
"batch": True,
"creditor_id": "000000",
"currency": "EUR"
})
payment1 = {
"name": "Test von Testenstein",
"IBAN": "NL50BANK1234567890",
"BIC": "BANKNL2A",
"amount": 1012,
"type": "FRST",
"collection_date": datetime.date.today(),
"mandate_id": "1234ÄOÜ",
"mandate_date": datetime.date.today(),
"description": "Test transaction1"
}
sdd.add_payment(payment1)
with pytest.raises(ValidationError):
sdd.export()
sdd.export(validate=False)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/debit/test_validation.py | CaptainConsternant/python-sepaxml |
# This code is from
# Multi-Task Learning as Multi-Objective Optimization
# Ozan Sener, Vladlen Koltun
# Neural Information Processing Systems (NeurIPS) 2018
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
from .min_norm_solvers_numpy import MinNormSolver
def moo_mtl_search(multi_obj_fg, x=None,
max_iters=200, n_dim=20, step_size=1):
"""
MOO-MTL
"""
# x = np.random.uniform(-0.5,0.5,n_dim)
x = np.random.randn(n_dim) if x is None else x
fs = []
for t in range(max_iters):
f, f_dx = multi_obj_fg(x)
weights = get_d_moomtl(f_dx)
x = x - step_size * np.dot(weights.T, f_dx).flatten()
fs.append(f)
res = {'ls': np.stack(fs)}
return x, res
def get_d_moomtl(grads):
"""
calculate the gradient direction for MOO-MTL
"""
nobj, dim = grads.shape
if nobj <= 1:
return np.array([1.])
# # use cvxopt to solve QP
# P = np.dot(grads , grads.T)
#
# q = np.zeros(nobj)
#
# G = - np.eye(nobj)
# h = np.zeros(nobj)
#
#
# A = np.ones(nobj).reshape(1,2)
# b = np.ones(1)
#
# cvxopt.solvers.options['show_progress'] = False
# sol = cvxopt_solve_qp(P, q, G, h, A, b)
# print(f'grad.shape: {grads.shape}')
# use MinNormSolver to solve QP
sol, nd = MinNormSolver.find_min_norm_element(grads)
return sol
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": tru... | 3 | toy_experiments/solvers/moo_mtl.py | kelvin95/EPOSearch |
# don't mind me, I'm a quick and dirty python script
# I extract false positive rate from the output files of kbf
def lire(filemane):
TP, TN, FP, FN = 0, 0, 0, 0
with open(filemane, "r") as fichier:
for ligne in fichier:
cols = ligne.split()
if cols[1] == "0":
if cols[2] == "0":
TN += 1
else:
FN += 1
else:
if cols[2] == "0":
FP += 1
else:
TP += 1
return TP, TN, FP, FN
def analyser(filename):
TP, TN, FP, FN = lire(filename)
return filename, (FP / (FP + TN)) * 100
def main():
print(analyser("test_classic.txt"))
print(analyser("test_kbf1.txt"))
print(analyser("test_kbf2.txt"))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | analyse.py | lrobidou/kbf |
import logging
from env import ENDPOINT, ACCESS_ID, ACCESS_KEY, USERNAME, PASSWORD
from tuya_iot import (
TuyaOpenAPI,
AuthType,
TuyaOpenMQ,
TuyaDeviceManager,
TuyaHomeManager,
TuyaDeviceListener,
TuyaDevice,
TuyaTokenInfo,
TUYA_LOGGER
)
TUYA_LOGGER.setLevel(logging.DEBUG)
# Init
openapi = TuyaOpenAPI(ENDPOINT, ACCESS_ID, ACCESS_KEY, AuthType.CUSTOM)
openapi.connect(USERNAME, PASSWORD)
openmq = TuyaOpenMQ(openapi)
openmq.start()
print("device test-> ", openapi.token_info.uid)
# Get device list
# assetManager = TuyaAssetManager(openapi)
# devIds = assetManager.getDeviceList(ASSET_ID)
# Update device status
deviceManager = TuyaDeviceManager(openapi, openmq)
homeManager = TuyaHomeManager(openapi, openmq, deviceManager)
homeManager.update_device_cache()
# # deviceManager.updateDeviceCaches(devIds)
# device = deviceManager.deviceMap.get(DEVICE_ID)
class tuyaDeviceListener(TuyaDeviceListener):
def update_device(self, device: TuyaDevice):
print("_update-->", device)
def add_device(self, device: TuyaDevice):
print("_add-->", device)
def remove_device(self, device_id: str):
pass
deviceManager.add_device_listener(tuyaDeviceListener())
# Turn on the light
# deviceManager.sendCommands(device.id, [{'code': 'switch_led', 'value': True}])
# time.sleep(1)
# print('status: ', device.status)
# # Turn off the light
# deviceManager.sendCommands(device.id, [{'code': 'switch_led', 'value': False}])
# time.sleep(1)
# print('status: ', device.status)
flag = True
while True:
input()
# flag = not flag
# commands = {'commands': [{'code': 'switch_led', 'value': flag}]}
response = openapi.post(
"/v1.0/iot-03/users/token/{}".format(openapi.token_info.refresh_token)
)
openapi.token_info = TuyaTokenInfo(response)
# openapi.post('/v1.0/iot-03/devices/{}/commands'.format(DEVICE_ID), commands)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | example/device.py | andrzejressel/tuya-iot-python-sdk |
#tests
from calculus import finitesum
def test2_1():
exp=finitesum('2x^3y^4z^7')
assert(exp.derive('y')=='8x^3y^3z^7')
def test2_2():
exp2=finitesum('2x^2yz^3')
assert(exp2.derive('y')=='2x^2z^3')
def test2_3():
exp3=finitesum('y^3+2y')
assert(exp3.derive('y')=='2+3y^2')
def test2_4():
exp4=finitesum('0+x')
assert(exp4.simplify()=='x')
def test2_5():
exp5=finitesum('-y^6x^7')
assert(exp5.derive('y')=='-6y^5x^7')
def test2_6():
exp6=finitesum('y^-4')
assert(exp6.derive('y')=='-4y^-5')
def test2_7():
exp7=finitesum('y^-4')
assert(exp7.simplify()=='y^-4')
def test2_8():
exp8=finitesum('2x^-7-2y^-5')
assert(exp8.derive('y')=='10y^-6')
def test2_9():
exp9=finitesum('0')
assert(exp9.simplify()=='0')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/test_2.py | georgercarder/calculus |
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import os, socket
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URI']
db = SQLAlchemy(app)
hostname = socket.gethostname()
@app.route('/')
def index():
return 'Hello, from sunny %s!\n' % hostname
@app.route('/db')
def dbtest():
try:
db.create_all()
except Exception as e:
return e.message + '\n'
return 'Database Connected from %s!\n' % hostname
if __name__ == '__main__':
app.run()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | demo/app/demo.py | xod442/ansible-stack2 |
from programy.parser.template.nodes.resetlearn import TemplateResetLearnNode
from programy.parser.template.nodes.base import TemplateNode
from programytest.parser.template.base import TemplateTestsBaseClass
class MockTemplateResetLearnNode(TemplateResetLearnNode):
def __init__(self):
TemplateResetLearnNode.__init__(self)
def resolve_to_string(self, bot, clientid):
raise Exception("This is an error")
class TemplateResetLearnNodeTests(TemplateTestsBaseClass):
def test_node(self):
root = TemplateResetLearnNode()
self.assertIsNotNone(root)
self.assertEquals("", root.resolve(self.bot, self.clientid))
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateResetLearnNode()
root.append(node)
result = root.resolve(self.bot, self.clientid)
self.assertIsNotNone(result)
self.assertEquals("", result)
def test_to_string(self):
node = MockTemplateResetLearnNode()
self.assertEquals("RESETLEARN", node.to_string())
def test_to_xml(self):
node = TemplateResetLearnNode()
self.assertEquals("<resetlearn />", node.to_xml(self.bot, self.clientid)) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | test/programytest/parser/template/node_tests/test_resetlearn.py | ItsPhant/program-y |
import asyncio
import aiohttp
import requests
from top_articles.models import Story
from django.core.exceptions import ObjectDoesNotExist
def check_db_story_ids(articlesID_list):
new_articleID_list = []
for id in articlesID_list:
try:
Story.objects.get(id=id)
except ObjectDoesNotExist:
new_articleID_list.append(id)
return new_articleID_list
def get_article_urls(url):
"""
Fetch all ids of top trending articles
args: None
return:None
"""
articlesID_list = requests.get(
url).json()
url_list = []
print("article length",len(articlesID_list))
newarticlesID_list = check_db_story_ids(articlesID_list)
for id in newarticlesID_list:
url ="https://hacker-news.firebaseio.com/v0/item/%s.json?print=pretty" % id
url_list.append(url)
return url_list, articlesID_list, newarticlesID_list
async def fetch_url(session, url):
async with session.get(url, timeout=60 * 60) as response:
return await response.json()
async def fetch_all_urls(session, urls, loop):
results = await asyncio.gather(*[fetch_url(session, url) for url in urls],
return_exceptions=True)
return results
def fetch_articles(urls):
if len(urls) > 1:
loop = asyncio.get_event_loop()
connector = aiohttp.TCPConnector(limit=100)
with aiohttp.ClientSession(loop=loop, connector=connector) as session:
articles = loop.run_until_complete(fetch_all_urls(session, urls, loop))
raw_result = articles
return raw_result
else:
return None
def main(url):
urls_list, articlesID_list, newarticlesID_list = get_article_urls(url)
print(urls_list, articlesID_list, newarticlesID_list)
result_dict = fetch_articles(urls_list)
return result_dict, articlesID_list, newarticlesID_list
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | top_articles/cron_articles.py | Ramesh7128/hacker-news-clone |
# Owner(s): ["oncall: fx"]
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
import torch.nn as nn
from torch.testing._internal.common_fx2trt import AccTestCase
from parameterized import parameterized
class TestGetitemConverter(AccTestCase):
@parameterized.expand(
[
("slice_batch_dim", slice(None, None, None)),
("slice_basic", (slice(None, None, None), slice(0, 3, 2))),
("slice_full", (slice(None, None, None), slice(0, 10, 3))),
("ellipsis", (slice(None, None, None), ..., slice(0, 3, 2))),
(
"slice_all_none",
(slice(None, None, None), slice(None, None, None)),
),
(
"slice_start_none",
(slice(None, None, None), slice(None, 2, 1)),
),
("slice_end_none", (slice(None, None, None), slice(1, None, 1))),
(
"slice_step_none",
(slice(None, None, None), slice(0, 3, None)),
),
("slice_neg_idx", (slice(None, None, None), -1)),
("slice_neg_slice", (slice(None, None, None), slice(-8, -2, 3))),
("multi_dim", (slice(None, None, None), 0, 1)),
(
"slice_multi_dim",
(slice(None, None, None), slice(0, 3, 2), slice(1, -1, 3)),
),
(
"none",
(slice(None, None, None), None, slice(1, -1, 3), 1),
),
]
)
def test_getitem(self, _, idx):
class Getitem(nn.Module):
def __init__(self, idx):
super().__init__()
self.idx = idx
def forward(self, x):
x = x + x
return x[self.idx]
inputs = [torch.randn(2, 10, 10, 10)]
self.run_test(Getitem(idx), inputs, expected_ops={acc_ops.getitem})
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | test/fx2trt/converters/acc_op/test_getitem.py | xiaohanhuang/pytorch |
class Helpers:
_cache = {}
@classmethod
def cached(cls, key, scope=None, func=None):
if scope is not None:
if scope not in cls._cache:
cls._cache[scope] = {}
if key in cls._cache[scope]:
return cls._cache[scope][key]
else:
result = None if func is None else func()
cls._cache[scope][key] = result
return result
else:
if key in cls._cache:
return cls._cache[key]
else:
result = None if func is None else func()
cls._cache[key] = result
return result
@classmethod
def cache(cls, key, scope=None, object=None):
if scope is not None:
if scope not in cls._cache:
cls._cache[scope] = {}
cls._cache[scope][key] = object
else:
cls._cache[key] = object
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | stackl/helpers.py | ArtOfCode-/stackl |
"""About and help services.
(help browser anyone?)
"""
import importlib
import importlib_metadata
from gi.repository import Gtk
from gaphor.abc import ActionProvider, Service
from gaphor.core import action
class HelpService(Service, ActionProvider):
def __init__(self, session):
self.session = session
def shutdown(self):
pass
@property
def window(self):
return self.session.get_service("main_window").window
@action(name="app.about")
def about(self):
builder = Gtk.Builder()
with importlib.resources.path(
"gaphor.services.helpservice", "about.ui"
) as glade_file:
builder.add_objects_from_file(str(glade_file), ("about",))
about = builder.get_object("about")
about.set_version(importlib_metadata.version("gaphor"))
about.set_modal(True)
about.set_transient_for(self.window)
about.show()
@action(name="app.shortcuts")
def shortcuts(self):
builder = Gtk.Builder()
with importlib.resources.path(
"gaphor.services.helpservice", "shortcuts.ui"
) as glade_file:
builder.add_objects_from_file(str(glade_file), ("shortcuts-gaphor",))
shortcuts = builder.get_object("shortcuts-gaphor")
shortcuts.set_modal(True)
shortcuts.set_transient_for(self.window)
shortcuts.show()
return shortcuts
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | gaphor/services/helpservice/__init__.py | mrmonkington/gaphor |
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import shutil
import sys
import merge_api
def noop_merge(output_json, jsons_to_merge):
"""Use the first supplied JSON as the output JSON.
Primarily intended for unsharded tasks.
Args:
output_json: A path to a JSON file to which the results should be written.
jsons_to_merge: A list of paths to JSON files.
"""
if len(jsons_to_merge) > 1:
print >> sys.stderr, (
'Multiple JSONs provided: %s' % ','.join(jsons_to_merge))
return 1
if jsons_to_merge:
shutil.copyfile(jsons_to_merge[0], output_json)
else:
with open(output_json, 'w') as f:
json.dump({}, f)
return 0
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
return noop_merge(args.output_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | testing/merge_scripts/noop_merge.py | zealoussnow/chromium |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# AS discovery job
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC modules
from ..base import MODiscoveryJob
from noc.peer.models.asn import AS
from .prefix import PrefixCheck
from noc.core.span import Span
class ASDiscoveryJob(MODiscoveryJob):
model = AS
def handler(self, whois_route=None, **kwargs):
if whois_route:
self.set_artefact("whois_route", whois_route)
with Span(sample=0):
PrefixCheck(self).run()
def can_run(self):
return True
def get_interval(self):
return None
def get_failed_interval(self):
return None
def update_alarms(self):
"""
Disable umbrella alarms creation
:return:
"""
pass
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | services/discovery/jobs/as/job.py | xUndero/noc |
directions = ['up', 'down', 'left', 'right']
def get_air_distance_between_two_points(point1, point2):
x1 = point1['x']
y1 = point1['y']
x2 = point2['x']
y2 = point2['y']
distance = pow(pow((x2 - x1), 2) + pow((y2 - y1), 2), 0.5)
return distance
def not_deadly_location_on_board(goal, deadly_locations, width, height):
if goal[0] < 0 or goal[0] >= width or goal[1] < 0 or goal[1] >= height:
return False
if deadly_locations.__contains__(goal):
return False
return True
def get_neighbours(tile):
pass
def find_shortest_path(start, goal):
pass
def list_of_reachable_tiles(start, deadly_locations, width, height):
visited = []
queue = [start]
while queue:
cur = queue.pop(0)
visited.append(cur)
for d in directions:
cur_neighbour = next_field_with_tupel(d, cur)
if not visited.__contains__(cur_neighbour) and not queue.__contains__(cur_neighbour):
if not_deadly_location_on_board(cur_neighbour, deadly_locations, width, height):
queue.append(cur_neighbour)
return visited | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | app/path_finder.py | RavioliGitHub/starter-snake-python1 |
# -*- coding: utf-8 -*-
import torch.nn as nn
from torch.nn import functional as F
import pytorch_ssim
class MSE_Loss(nn.Module):
def __init__(self):
super(MSE_Loss, self).__init__()
def forward(self, input, target):
return F.mse_loss(input, target, reduction='mean')
class SSIM_Loss(nn.Module):
def __init__(self):
super(SSIM_Loss, self).__init__()
self.ssim_loss = pytorch_ssim.SSIM()
def forward(self, input, target):
return -self.ssim_loss(input, target)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?"... | 3 | LDCT_Denoising/Neural_Network/Loss_Func.py | BennyZhang-Codes/LDCT-denoising-with-DL-Methods-and-Dicom-Viewer-by-Benny |
import time
import sys
class ShowProcess():
# """
# 显示处理进度的类
# 调用该类相关函数即可实现处理进度的显示
# """
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
sys.stdout.write(process_bar) #这两句打印字符到终端
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
# ————————————————
# 版权声明:本文为CSDN博主「持久决心」的原创文章,遵循CC 4.0 by-sa版权协议,转载请附上原文出处链接及本声明。
# 原文链接:https://blog.csdn.net/u013832707/article/details/73608504
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | ShowProcess.py | 4a5g0030/line_follow |
# Copyright (c) 2018, NVIDIA CORPORATION.
import os.path
import numpy as np
import pyarrow as pa
import pytest
from numba import cuda
from cudf import DataFrame, Series
from cudf.comm.gpuarrow import GpuArrowReader
from cudf.testing._utils import assert_eq
def read_data():
import pandas as pd
basedir = os.path.dirname(__file__)
datapath = os.path.join(basedir, "data", "ipums.pkl")
try:
df = pd.read_pickle(datapath)
except Exception as excpr:
if type(excpr).__name__ == "FileNotFoundError":
pytest.skip(".pkl file is not found")
else:
print(type(excpr).__name__)
names = []
arrays = []
for k in df.columns:
arrays.append(pa.Array.from_pandas(df[k]))
names.append(k)
batch = pa.RecordBatch.from_arrays(arrays, names)
schema = batch.schema.serialize().to_pybytes()
schema = np.ndarray(
shape=len(schema), dtype=np.byte, buffer=bytearray(schema)
)
data = batch.serialize().to_pybytes()
data = np.ndarray(shape=len(data), dtype=np.byte, buffer=bytearray(data))
darr = cuda.to_device(data)
return df, schema, darr
def test_fillna():
_, schema, darr = read_data()
gar = GpuArrowReader(schema, darr)
masked_col = gar[8]
sr = Series(data=masked_col.data)
dense = sr.nans_to_nulls().fillna(123)
np.testing.assert_equal(123, dense.to_numpy())
assert len(dense) == len(sr)
assert dense.null_count == 0
def test_to_dense_array():
data = np.random.random(8)
mask = np.asarray([0b11010110], dtype=np.byte)
sr = Series.from_masked_array(data=data, mask=mask, null_count=3)
assert sr.has_nulls
assert sr.null_count != len(sr)
filled = sr.to_numpy(na_value=np.nan)
dense = sr.dropna().to_numpy()
assert dense.size < filled.size
assert filled.size == len(sr)
def test_reading_arrow_sparse_data():
pdf, schema, darr = read_data()
gar = GpuArrowReader(schema, darr)
gdf = DataFrame(gar.to_dict())
assert_eq(pdf, gdf)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"ans... | 3 | python/cudf/cudf/tests/test_sparse_df.py | Ahsantw/cudf |
#!/usr/bin/env python
# coding: utf-8
# In[4]:
def heapify(arr, n, i):
largest = i # Initialize largest as root
l = 2 * i + 1 # left = 2*i + 1
r = 2 * i + 2 # right = 2*i + 2
if l < n and arr[i] < arr[l]:
largest = l
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i],arr[largest] = arr[largest],arr[i] # swap
# Heapify the root.
heapify(arr, n, largest)
# The main function to sort an array of given size
def heapSort(arr):
n = len(arr)
for i in range(n // 2 - 1, -1, -1):
heapify(arr, n, i)
# One by one extract elements
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, i, 0)
# Driver code to test above
arr = [ 12, 11, 13, 5, 6, 7]
heapSort(arr)
n = len(arr)
print ("Sorted array is")
for i in range(n):
print ("%d" %arr[i]),
# This code is contributed by Mohit Kumra
# In[ ]:
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | heap sort.py | angelopassaro/Hacktoberfest-1 |
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from botocore.stub import Stubber
from treehugger.kms import kms_agent
from treehugger.remote import s3_client
@pytest.fixture(scope='function', autouse=True)
def kms_stub():
kms_agent.reset()
with Stubber(kms_agent.kms_client) as stubber:
yield stubber
stubber.assert_no_pending_responses()
@pytest.fixture(scope='function', autouse=True)
def s3_stub():
with Stubber(s3_client) as stubber:
yield stubber
stubber.assert_no_pending_responses()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/conftest.py | adamchainz/treehugger |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import SchleemsTestFramework
from test_framework.util import *
class MempoolLimitTest(SchleemsTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true... | 3 | test/functional/mempool_limit.py | BlenderSleuth/schleems |
#!/usr/bin/env python3
"""
Test for local-subnet identifier
"""
import unittest
import netifaces
from base_test import PschedTestBase
from pscheduler.limitprocessor.identifier.localsubnet import *
DATA = {
}
class TestLimitprocessorIdentifierLocalSubnet(PschedTestBase):
"""
Test the Identifier
"""
def test_data_is_valid(self):
"""Limit Processor / Identifier Local Subnet / Data Validation"""
self.assertEqual(data_is_valid(DATA), (True, "OK"))
self.assertEqual(data_is_valid({ "abc": 123 }),
(False, 'Data is not an object or not empty.'))
def test_identifier(self):
"""Limit Processor / Identifier Local Subnet / Identifier"""
test_ifaces = {
"lo0": {
netifaces.AF_INET: [
{'addr': '127.0.0.1', 'netmask': '255.0.0.0', 'peer': '127.0.0.1'}
],
netifaces.AF_INET6: [
{'addr': '::1', 'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128', 'peer': '::1', 'flags': 0},
{'addr': 'fe80::1%lo0', 'netmask': 'ffff:ffff:ffff:ffff::/64', 'flags': 0}
]
}
}
ident = IdentifierLocalSubnet(DATA, test_ifaces=test_ifaces)
self.assertEqual(
ident.evaluate({ "requester": "127.0.0.5" }),
True)
self.assertEqual(
ident.evaluate({ "requester": "fe80::1" }),
True)
self.assertEqual(
ident.evaluate({ "requester": "192.0.2.9" }),
False)
self.assertEqual(
ident.evaluate({ "requester": "2001:db8::1" }),
False)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | python-pscheduler/pscheduler/tests/limitprocessor_identifier_localsubnet_test.py | krihal/pscheduler |
from collections import OrderedDict
from typing import Dict, Generic, Mapping, TypeVar
CacheKey = TypeVar("CacheKey")
CacheValue = TypeVar("CacheValue")
class LRUCache(Generic[CacheKey, CacheValue], OrderedDict):
"""
A dictionary-like container that stores a given maximum items.
If an additional item is added when the LRUCache is full, the least
recently used key is discarded to make room for the new item.
"""
def __init__(self, cache_size: int) -> None:
self.cache_size = cache_size
super(LRUCache, self).__init__()
def __setitem__(self, key: CacheKey, value: CacheValue) -> None:
"""Store a new views, potentially discarding an old value."""
if key not in self:
if len(self) >= self.cache_size:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, value)
def __getitem__(self: Dict[CacheKey, CacheValue], key: CacheKey) -> CacheValue:
"""Gets the item, but also makes it most recent."""
value: CacheValue = OrderedDict.__getitem__(self, key)
OrderedDict.__delitem__(self, key)
OrderedDict.__setitem__(self, key, value)
return value
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer"... | 3 | rich/_lru_cache.py | hultner-technologies/rich |
import cgi
import datetime
import urllib
import urlparse
from django.conf import settings
from django.template import defaultfilters
from django.utils.html import strip_tags
from jingo import register
import jinja2
from .urlresolvers import reverse
# Yanking filters from Django.
register.filter(strip_tags)
register.filter(defaultfilters.timesince)
register.filter(defaultfilters.truncatewords)
@register.function
def thisyear():
"""The current year."""
return jinja2.Markup(datetime.date.today().year)
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
return reverse(viewname, args=args, kwargs=kwargs)
@register.filter
def urlparams(url_, hash=None, **query):
"""
Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = _urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
def _urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items])
@register.filter
def urlencode(txt):
"""Url encode a path."""
return urllib.quote_plus(txt)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | apps/commons/helpers.py | gene1wood/bugbro |
#Timers
# Execute code at timed intervals
import time
from threading import Timer
def display(msg):
print(msg + ' ' + time.strftime('%H:%M:%S'))
#Basic timer
def run_once():
display('Run Once : ')
t = Timer(5, display, ['Timeout:'])
t.start()
run_once()
print('Waiting ...')
#Interval Timer
# Wrap it into class
class RepeatTimer(Timer):
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
print('Done')
timer = RepeatTimer(1, display, ['Repeating '])
timer.start()
print('Treading Started ')
time.sleep(10) # suspend execution
print('Threading finished ')
timer.cancel() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | Timers/timers__.py | nageshnnazare/Python-Advanced-Concepts |
# 1.装包
# 2.导包
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
# 3.实例化
# 4.加密解密
class SecretOauth(object):
# 加密
def dumps(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.dumps(data)
return result.decode()
# 解密
def loads(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.loads(data)
return result
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | meiduo_mall/utils/secret.py | liusudo123/meiduo_project |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from hermes_python.hermes import Hermes
INTENT_HOW_ARE_YOU = "mikpan:how_are_you"
INTENT_GOOD = "bezzam:feeling_good"
INTENT_BAD = "bezzam:feeling_bad"
INTENT_ALRIGHT = "bezzam:feeling_alright"
INTENT_FILTER_FEELING = [INTENT_GOOD, INTENT_BAD, INTENT_ALRIGHT]
def main():
with Hermes("localhost:1883") as h:
h.subscribe_intent(INTENT_HOW_ARE_YOU, how_are_you_callback) \
.subscribe_intent(INTENT_GOOD, feeling_good_callback) \
.subscribe_intent(INTENT_BAD, feeling_bad_callback) \
.subscribe_intent(INTENT_ALRIGHT, feeling_alright_callback) \
.start()
def how_are_you_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "I'm doing great. How about you?"
hermes.publish_continue_session(session_id, response, INTENT_FILTER_FEELING)
def feeling_good_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's awesome! I'm happy to hear that."
hermes.publish_end_session(session_id, response)
def feeling_bad_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "Sorry to hear that. I hope you feel better soon."
hermes.publish_end_session(session_id, response)
def feeling_alright_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's cool."
hermes.publish_end_session(session_id, response)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | V2_action-how-are-you.py | mikpan/amld19-snips-workshop |
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
#
# Forked from zope.component.nextutility in 3.10.x.
#
from zope.interface import implementer
from zope.interface.interfaces import IComponentLookup
from zope.interface.interfaces import IComponents
@implementer(IComponents)
class SiteManagerStub(object):
__bases__ = ()
def __init__(self):
self._utils = {}
def setNext(self, next):
self.__bases__ = (next, )
def provideUtility(self, iface, util, name=''):
self._utils[(iface, name)] = util
def queryUtility(self, iface, name='', default=None):
return self._utils.get((iface, name), default)
def testingNextUtility(utility, nextutility, interface, name='',
sitemanager=None, nextsitemanager=None):
if sitemanager is None:
sitemanager = SiteManagerStub()
if nextsitemanager is None:
nextsitemanager = SiteManagerStub()
sitemanager.setNext(nextsitemanager)
sitemanager.provideUtility(interface, utility, name)
utility.__conform__ = (
lambda iface:
iface.isOrExtends(IComponentLookup) and sitemanager or None
)
nextsitemanager.provideUtility(interface, nextutility, name)
nextutility.__conform__ = (
lambda iface:
iface.isOrExtends(IComponentLookup) and nextsitemanager or None
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | src/zope/authentication/tests/utils.py | zopefoundation/zope.authentication |
from crypt import crypt
import sqlalchemy.exc
from typing import List
from error import DoesNotExist
from storage import UnixPasswordStorage
from password import UnixPassword
from .sqlite_api import (
Database,
DatabaseApi
)
from .password_schema import Password
def _fmt_password(password: Password) -> UnixPassword:
return UnixPassword(
name=password.name,
encrypted_password=password.encrypted_password,
days_since_epoch_last_change=password.days_since_epoch_last_change,
days_min=password.days_min,
days_max=password.days_max,
days_warn=password.days_warn,
days_inactive=password.days_inactive,
days_since_epoch_expires=password.days_since_epoch_expires
)
class UnixPasswordStorageSqlite(UnixPasswordStorage):
def __init__(self, db: Database):
self._db = DatabaseApi(db)
def update(self, user: str, new_password: str):
self._try_update(user, new_password)
def _try_update(self, name: str, new_password: str):
try:
password = self._db.get_one(Password, filters=(Password.name == name,))
password.encrypted_password = crypt(new_password)
self._db.update()
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
def get_by_name(self, name: str) -> UnixPassword:
try:
password = self._db.get_one(Password, filters=(Password.name == name,))
return _fmt_password(password)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
def get_all(self) -> List[UnixPassword]:
passwords = self._db.get(Password)
return [_fmt_password(sdw) for sdw in passwords]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | src/unix_accounts/storage_sqlite/password_api.py | 1nfiniteloop/unix-accounts |
from typing import Union
import numpy as np
from numba import njit
from jesse.helpers import get_candle_source, slice_candles
def supersmoother(candles: np.ndarray, period: int = 14, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Super Smoother Filter 2pole Butterworth
This indicator was described by John F. Ehlers
:param candles: np.ndarray
:param period: int - default=14
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
# Accept normal array too.
if len(candles.shape) == 1:
source = candles
else:
source = get_candle_source(candles, source_type=source_type)
res = supersmoother_fast(source, period)
return res if sequential else res[-1]
@njit
def supersmoother_fast(source, period):
a = np.exp(-1.414 * np.pi / period)
b = 2 * a * np.cos(1.414 * np.pi / period)
newseries = np.copy(source)
for i in range(2, source.shape[0]):
newseries[i] = (1 + a ** 2 - b) / 2 * (source[i] + source[i - 1]) \
+ b * newseries[i - 1] - a ** 2 * newseries[i - 2]
return newseries
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | jesse/indicators/supersmoother.py | noenfugler/jesse |
"""
SRTpy -- SRT (https://etk.srail.co.kr) wrapper for Python.
==========================================================
: copyright: (c) 2017 by Heena Kwag.
: URL: <http://github.com/dotaitch/SRTpy>
: license: BSD, see LICENSE for more details.
"""
import random
import requests
from xml.etree import ElementTree as ET
from .constants import *
def get_key_by_value(value, data):
for k, v in data.items():
if v == value:
return k
def get_key_by_value_list(value, data):
for k, v in data.items():
if value in v:
return k
def get_namespace(root):
tag = root.tag
ns = tag[tag.find('{')+1:tag.find('}')]
return ns
def find_col_elem(root, k):
ns = get_namespace(root)
tag = './/{{{0}}}Col[@id="{1}"]'.format(ns, k)
return root.find(tag)
def find_col_elem_text(root, k):
elem = find_col_elem(root, k)
return elem.text if elem is not None else None
def find_other_elem(root, k, flag):
ns = get_namespace(root)
tag = './/{{{0}}}{1}'.format(ns, k)
if flag == 1:
return root.find(tag)
else:
return root.findall(tag)
def request(url, data, filepath):
tree = ET.parse(filepath)
root = tree.getroot()
ns = get_namespace(root)
ET.register_namespace('', ns)
for k, v in data.items():
elem = find_col_elem(root, k)
elem.text = v
tree = ET.tostring(root, 'utf-8')
user_agent = random.choice(USER_AGENTS)
headers = {
'Content-Type': 'application/xml',
'User-Agent': user_agent,
}
response = requests.post(url, data=tree, headers=headers).content
return ET.fromstring(response)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | SRTpy/utils.py | ChangWanHong/SRTpy |
from __future__ import print_function
from orphics import maps,io,cosmology
from pixell import enmap
import numpy as np
import os,sys
from soapack import interfaces as sints
def get_coadd(imaps,wts,axis):
# sum(w*m)/sum(w)
twt = np.sum(wts,axis=axis)
retmap = np.sum(wts*imaps,axis=axis)/twt
retmap[~np.isfinite(retmap)] = 0
return retmap,twt
def get_npol(array):
if array=='545' or array=='857':return 1
else: return 3
mask = sints.get_act_mr3_crosslinked_mask('deep56',version='180323')
dm = sints.PlanckHybrid(region=mask)
bin_edges = np.arange(30,6000,40)
p1ds = {}
for array in dm.arrays:
splits = dm.get_splits(array,srcfree=False)[0]
ivars = dm.get_splits_ivar(array)[0]
coadd,wt = get_coadd(splits,ivars,axis=0)
npol = get_npol(array)
for i in range(npol):
cents,p1d = maps.binned_power(coadd[i],bin_edges=bin_edges,mask=mask)
p1ds[array+str(i)] = p1d.copy()
mivar = wt[i].mean()
print(array,mivar)
for i in range(3):
pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='C')
for array in dm.arrays:
npol = get_npol(array)
if i<npol:
pl.add(cents,p1ds[array+str(i)],label=array)
pl.done("powers%d.png" % i)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | bin/planck/verify_projection.py | ACTCollaboration/tilec |
from flask_jwt import JWT, jwt_required, current_identity
from werkzeug.security import safe_str_cmp
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return "User(id='%s')" % self.id
#temp usertest
users = [
User(1, 'user1', '123'),
User(2, 'admin', 'password'),
]
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(username, password):
user = username_table.get(username, None)
if user and safe_str_cmp(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def is_authenticate(username, password):
''' TEMP '''
global users
for user in users:
if user.username == username and user.password == password:
return True
return False
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | application/user.py | AxelGard/secure-castle |
#!../bin/python3
# -*- coding:utf-8 -*-
"""
Copyright 2021 Jerome DE LUCCHI
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
from env import _SERVER_DIR
sys.path.insert(0, _SERVER_DIR)
from api import db
__DATAMODEL_DIR = os.path.join(os.path.abspath('..'), 'datamodel')
__DATAMODEL_NODE_MODE_FILE = os.path.join(__DATAMODEL_DIR, 'node_mode.template.mapping')
__ES_ADDR = db.ES_PROTOCOL + """://""" + str(db.ES_HOSTNAME) + """:""" + str(db.ES_PORT)
__CREATE_INDEX_TEMPLATE = """curl -s -XPUT -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/_template/blast_node_mode -d@""" + __DATAMODEL_NODE_MODE_FILE
__NODE_MODES = [
{"name": "maintenance"},
{"name": "pause"},
{"name": "running"}
]
def defineIndexTemplate():
try:
if json.load(os.popen(__CREATE_INDEX_TEMPLATE))["acknowledged"]:
return True
except KeyError:
return False
def provisionDefault():
try:
for mode in __NODE_MODES:
__ES_PROVISION_DEFAULT = """curl -s -XPOST -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/blast_node_mode/_doc -d \'""" + json.dumps(mode) + """\'"""
if not json.load(os.popen(__ES_PROVISION_DEFAULT))["result"] == "created":
return False
return True
except KeyError:
return False
def main():
if defineIndexTemplate():
if provisionDefault():
sys.exit(0)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | backend/builder/build_db_node_mode.py | blast-eu-com/blast.eu.com |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import logger
from crhelper import CfnResource
helper = CfnResource()
try:
dynamodb = boto3.resource('dynamodb')
except Exception as e:
helper.init_failure(e)
@helper.create
@helper.update
def do_action(event, _):
""" Called as part of bootstrap template.
Inserts/Updates Settings table based upon the resources deployed inside bootstrap template
We use these settings inside tenant template
Args:
event ([type]): [description]
_ ([type]): [description]
"""
logger.info("Updating settings")
settings_table_name = event['ResourceProperties']['SettingsTableName']
cognitoUserPoolId = event['ResourceProperties']['cognitoUserPoolId']
cognitoUserPoolClientId = event['ResourceProperties']['cognitoUserPoolClientId']
table_system_settings = dynamodb.Table(settings_table_name)
response = table_system_settings.put_item(
Item={
'settingName': 'userPoolId-pooled',
'settingValue' : cognitoUserPoolId
}
)
response = table_system_settings.put_item(
Item={
'settingName': 'appClientId-pooled',
'settingValue' : cognitoUserPoolClientId
}
)
@helper.delete
def do_nothing(_, __):
pass
def handler(event, context):
logger.info(event)
helper(event, context)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | server/custom_resources/update_settings_table.py | snetty/aws-saas-factory-ref-solution-serverless-saas |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from test import LisaTest
TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
TESTS_CONF = os.path.join(TESTS_DIRECTORY, "rfc.config")
class RFC(LisaTest):
"""Tests for the Energy-Aware Scheduler"""
test_conf = TESTS_CONF
experiments_conf = TESTS_CONF
@classmethod
def setUpClass(cls, *args, **kwargs):
super(RFC, cls).runExperiments(args, kwargs)
def test_run(self):
"""A dummy test just to run configured workloads"""
pass
# vim :set tabstop=4 shiftwidth=4 expandtab
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | tests/eas/rfc.py | MIPS/external-lisa |
from .base_model import BaseModel
from . import networks
from .cycle_gan_model import CycleGANModel
class TestModel(BaseModel):
def name(self):
return 'TestModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
assert not is_train, 'TestModel cannot be used in train mode'
parser = CycleGANModel.modify_commandline_options(parser, is_train=False)
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='',
help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will'
' be loaded as the generator of TestModel')
return parser
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = []
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['real_A', 'fake_B']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
self.model_names = ['G' + opt.model_suffix]
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see BaseModel.load_networks
setattr(self, 'netG' + opt.model_suffix, self.netG)
def set_input(self, input):
# we need to use single_dataset mode
self.real_A = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
self.fake_B = self.netG(self.real_A)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | models/test_model.py | yunyanxing/pairwise_xray_augmentation |
import numpy as np
#This is crafted especially for normal distribution for MLE.
class GradientDescentOptimizer:
def __init__(self, X, tolerance, learning_rate):
self.learning_rate = learning_rate
self.tolerance = tolerance
self.X = X
if(len(X.shape) == 1):
self.number_of_points = X.shape[0]
self.number_of_variables = 2
else:
self.number_of_points, self.number_of_variables = X.shape
# self.number_of_variables -= 1 #we subtract the extra bias term
print(self.number_of_points, self.number_of_variables, "hello")
def optimize(self):
self.theta = np.array([np.random.randint(1, 10) for _ in range(self.number_of_variables)]) #we choose a random value for theta
self.theta = np.resize(self.theta, new_shape=(self.number_of_variables, 1))
self.theta = self.theta.astype(float)
prev_value = 1
current_value = 2
print("theta assigned", self.theta)
print("X", self.X)
while abs(prev_value - current_value) >= self.tolerance:
gradient = self.theta.copy()
for i in range(2):
if i == 0:
gradient[i][0] = self.learning_rate * (1.0 / self.number_of_points) * np.sum((self.X - self.theta[0]))
else :
gradient[i][0] = self.learning_rate * (1.0 / self.number_of_points) * np.sum((-1.0 / (2.0 * self.theta[1])) + ((self.X - self.theta[0]) ** 2 / (2.0 * (self.theta[1]) ** 2)))
# print("gradient ", gradient)
if self.theta[1] + gradient[1][0] < 0:
break
self.theta = self.theta + gradient
prev_value = current_value
current_value = np.sum(-np.log(np.sqrt(2 * np.pi)) - np.log(np.sqrt(self.theta[1])) - ((self.X - self.theta[0]) ** 2 / (2 * self.theta[1])))
print("loss function " + str(current_value))
print("theta ", self.theta)
return self.theta | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | utilities/mini_batch_gradient_descent.py | Puneethnaik/Generative-Adversarial-Networks |
import numpy as np
def regularization_cost(kwargs):
thetas = kwargs.get('thetas', np.array([0]))
return kwargs.get('reg_p', 0) * thetas.T.dot(thetas)
def regularization_cost_2(thetas, kwargs):
return kwargs.get('reg_p', 0) * thetas.T.dot(thetas)
def calc_cost_linear(m, **kwargs):
return kwargs['ers'].T.dot(kwargs['ers'])/(2*m) + regularization_cost(kwargs)
def calc_cost_logistic(m, **kwargs):
targets = kwargs['ts']
predictions = kwargs['ps']
return (-targets.T.dot(np.log(predictions)) - (1-targets).T.dot(np.log(1-predictions))) / m + regularization_cost(kwargs)
def calc_cost_multiclass_logistic(m, **kwargs):
targets = kwargs['ts']
predictions = kwargs['ps']
costs = []
for i in range(len(targets)):
thetas = kwargs.get('thetas', np.array([0] * len(targets))).T[i]
cost = (-targets[i].T.dot(np.log(predictions[i])) - (1-targets[i]).T.dot(np.log(1-predictions[i]))) / m + regularization_cost_2(thetas, kwargs)
costs.append(cost)
return costs
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | Coursera/costs.py | nalkhish/MachineLearning |
# Copyright 2020 Zachary Frost
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Define AssignAction class."""
from py2glsl.shader.statement.action.base import ShaderActionBase
from py2glsl.shader.statement.action.types import ActionVar
class AssignAction(ShaderActionBase):
"""AssignAction"""
def __init__(self, source: ActionVar, target: ActionVar):
self.__source = source
self.__target = target
@property
def source(self):
"""source"""
return ShaderActionBase._eval_action_var(self.__source)
@property
def target(self):
"""target"""
return ShaderActionBase._eval_action_var(self.__target)
def generate(self):
return f' {self.target} = {self.source};'
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | src/py2glsl/shader/statement/action/assign.py | zfzackfrost/py2glsl |
# qubit number=4
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += Z(3) # number=7
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += SWAP(3,0) # number=5
prog += SWAP(3,0) # number=6
prog += SWAP(3,0) # number=8
prog += SWAP(3,0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil95.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"an... | 3 | data/p4VQE/R4/benchmark/startPyquil95.py | UCLA-SEAL/QDiff |
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
from random import shuffle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import sys
import os
import time
import math
import pickle
# Provide the predefined digit sequence tasks
def interleaved(sequence):
if len(sequence) <= 1:
return list(sequence)
else:
return [sequence[0], sequence[-1]] + interleaved(sequence[1:-1])
def transform(sequence, task):
if task == "auto":
return sequence
if task == "rev":
return sequence[::-1]
if task == "sort":
return sorted(sequence)
if task == "interleave":
return interleaved(sequence)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | tasks.py | tommccoy1/tpdn |
import json
import numpy as np
import math
import numbers
def is_nan(x):
return (x is np.nan or x != x)
def convert_simple_numpy_type(obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
# elif isinstance(obj,(np.ndarray,)): #### This is the fix
# return obj.tolist()
return None
class NumpyJSONEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
res = convert_simple_numpy_type(obj)
if res is not None:
return res
return json.JSONEncoder.default(self, obj)
def convert_nan_inf(params):
if type(params) is dict:
for key, value in params.items():
params[key] = convert_nan_inf(value)
elif type(params) is list:
for idx, value in enumerate(params):
params[idx] = convert_nan_inf(value)
else:
if isinstance(params, numbers.Number) and math.isinf(params):
params = None
if is_nan(params):
params = None
return params
def json_dumps_np(data, allow_nan=False):
if not allow_nan:
data = convert_nan_inf(data)
return json.dumps(data, cls=NumpyJSONEncoder, allow_nan=allow_nan)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | a2ml/api/utils/json_utils.py | augerai/a2ml |
from typing import Type, List
from jivago.inject import typing_meta_helper
from jivago.lang.annotations import Override
from jivago.lang.stream import Stream
from jivago.serialization.deserialization_strategy import DeserializationStrategy, T
TYPES_WHICH_DESERIALIZE_TO_LISTS = ('List', 'Iterable', 'Collection')
class TypedListDeserializationStrategy(DeserializationStrategy):
def __init__(self, deserializer: "Deserializer"):
self.deserializer = deserializer
@Override
def can_handle_deserialization(self, declared_type: type) -> bool:
return typing_meta_helper.is_typing_meta_collection(declared_type, TYPES_WHICH_DESERIALIZE_TO_LISTS)
@Override
def deserialize(self, obj: list, declared_type: Type[List[T]]) -> list:
list_content_type = declared_type.__args__[0]
return Stream(obj).map(lambda x: self.deserializer.deserialize(x, list_content_type)).toList()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | jivago/serialization/deserialization/typed_list_deserialization_strategy.py | keotl/jivago |
from regression_tests import *
class TestBase(Test):
def test_c_contains_for_or_while_loop(self):
assert self.out_c.contains(r'(for|while) \(')
def test_c_contains_no_gotos(self):
assert not self.out_c.contains(r'goto .*;')
def test_c_contains_all_strings(self):
assert self.out_c.has_string_literal('%d')
assert self.out_c.has_string_literal('test')
class TestRunBase(TestBase):
def test_c_produces_correct_output_when_run(self):
self.assert_c_produces_output_when_run(
input='3',
expected_output='testtest',
expected_return_code=0
)
def test_c_contains_just_main(self):
assert self.out_c.has_just_funcs('main')
class Test_2018_x64Pe(TestBase):
settings_2018 = TestSettings(
input=files_in_dir('2018-09-17', matching=r'.*\.exe'),
)
class Test_2018(TestRunBase):
settings_2018 = TestSettings(
input=files_in_dir('2018-09-17', excluding=r'.*\.exe'),
)
class Test_2017(TestRunBase):
settings = TestSettings(
input=files_in_dir('2017-11-14'),
)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | integration/for-loop/test.py | xbabka01/retdec-regression-tests |
import torch
import numpy
# codes of this function are borrowed from https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/models/pointnet2_utils.py
def index_points(device, points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
# batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def knn_l2(device, net, k, u):
'''
Input:
k: int32, number of k in k-nn search
net: (batch_size, npoint, c) float32 array, points
u: int32, block size
Output:
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
INF = 1e8
batch_size = net.size(0)
npoint = net.size(1)
n_channel = net.size(2)
square = torch.pow(torch.norm(net, dim=2,keepdim=True),2)
def u_block(batch_size, npoint, u):
block = numpy.zeros([batch_size, npoint, npoint])
n = npoint // u
for i in range(n):
block[:, (i*u):(i*u+u), (i*u):(i*u+u)] = numpy.ones([batch_size, u, u]) * (-INF)
return block
# minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).to(device)
minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).cuda()
_, indices = torch.topk(minus_distance, k, largest=True, sorted=False)
return indices
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"ans... | 3 | Intra_MLP.py | suyukun666/UFO |
"""Integration tests for Glesys"""
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTestsV1
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
# TODO: migrate to IntegrationTestsV2 and its extended test suite
class GlesysProviderTests(TestCase, IntegrationTestsV1):
"""TestCase for Glesys"""
provider_name = 'glesys'
domain = "capsulecd.com"
def _filter_headers(self):
return ['Authorization']
# TODO: enable the skipped tests
@pytest.mark.skip(reason="new test, missing recording")
def test_provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | lexicon/tests/providers/test_glesys.py | HelixEducation/lexicon |
from __future__ import unicode_literals
from dvc.command.base import CmdBase
class CmdCheckout(CmdBase):
def run(self):
if not self.args.targets:
self.project.checkout(force=self.args.force)
else:
for target in self.args.targets:
self.project.checkout(
target=target, with_deps=self.args.with_deps, force=self.args.force
)
return 0
def add_parser(subparsers, parent_parser):
CHECKOUT_HELP = "Checkout data files from cache."
checkout_parser = subparsers.add_parser(
"checkout",
parents=[parent_parser],
description=CHECKOUT_HELP,
help=CHECKOUT_HELP,
)
checkout_parser.add_argument(
"-d",
"--with-deps",
action="store_true",
default=False,
help="Checkout all dependencies of the specified target.",
)
checkout_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Do not prompt when removing working directory files.",
)
checkout_parser.add_argument("targets", nargs="*", help="DVC files.")
checkout_parser.set_defaults(func=CmdCheckout)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | dvc/command/checkout.py | yfarjoun/dvc |
"""SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 2013/03/03 09:48:35 garyo"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Tool/suncc.py | Acpharis/protein_prep |
class Graph(object):
def __init__(self):
self.children = {} # dictionnary giving the list of childrens for each node.
self.q = [] # configuration associated to each node.
self.connex = [] # ID of the connex component the node is belonging to.
self.nconnex = 0 # number of connex components.
self.existing_connex = [] # List of existing connex component ID.
def add_node(self, q=None, new_connex=False):
'''
Create the memory to store a new edge. Initialize all components to None.
Create an empty list of children.
'''
idx = len(self.children)
self.children[idx] = []
self.q.append(q)
self.connex.append(None)
if new_connex:
self.new_connex(idx)
return idx
def add_edge(self, first, second, orientation=0):
'''
Add edge from first to second. Also add edge from second to first if orientation
is null.
'''
assert(first in self.children and second in self.children)
if orientation >= 0:
self.children[first].append(second)
if orientation <= 0:
self.children[second].append(first)
def new_connex(self, idx):
'''
Create a new connex component for node <idx>
'''
self.connex[idx] = self.nconnex
self.existing_connex.append(self.nconnex)
self.nconnex += 1
def rename_connex(self, past, future):
'''
Change the index of the all the nodes belonging to a connex component.
Useful when merging two connex components.
'''
try:
self.existing_connex.remove(past)
self.connex = [c if c != past else future for c in self.connex]
except:
pass
def connexIndexes(self, connex):
'''Return the list of all node indexes belonging to connex component <connex>.'''
return [i for i, c in enumerate(self.connex) if c == connex]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | doc/d-practical-exercises/src/graph.py | thanhndv212/pinocchio |
# -*- coding: utf-8 -*-
from datetime import datetime
from flask import g, jsonify, request
from flask_httpauth import HTTPBasicAuth
from app import app, db
from app.models import User, Post
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
user = db.session.query(User).filter_by(username=username).first()
if not user or not user.check_password(password):
return False
g.user = user
return True
@app.route('/curl/post', methods=['POST'])
@auth.login_required
def curl_post():
try:
content = request.json.get('content')
title = request.json.get('title')
post = Post(user=g.user, title=title, content=content, pub_date=datetime.now())
post.save()
return jsonify({'data': 'Hello, %s!' % g.user.username})
except:
return jsonify({'data': 'Something Went Wrong.'})
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | app/views/curl.py | daghan/MarkDownBlog |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.