source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from todo.api import get_tasks, create_task, finish_task, delete_task
def test_list_tasks(test_app):
# make sure there are no existing tasks
assert get_tasks() == []
create_task('buy milk')
create_task('buy cookies')
assert len(get_tasks()) == 2
def test_create_task(test_app):
create_task('Get milk')
existing_tasks = get_tasks()
assert len(existing_tasks) == 1
first_task = existing_tasks[0]
assert first_task.body == 'Get milk'
assert first_task.done is False
def test_finish_task(test_app):
create_task('Get milk')
assert len(get_tasks()) == 1
get_milk = get_tasks().pop()
get_milk_id = get_milk.id
finish_task(get_milk_id)
get_milk = get_tasks().pop()
assert get_milk.done is True
def test_delete_task(test_app):
create_task('Get milk')
assert len(get_tasks()) == 1
get_milk = get_tasks().pop()
get_milk_id = get_milk.id
delete_task(get_milk_id)
assert len(get_tasks()) == 0
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | tests/test_app.py | samdis/TodoApp |
import requests
import socket
import threading
requests.packages.urllib3.disable_warnings() # noqa
class ScanPort:
def __init__(self, target, start_port=None, end_port=None):
self.target = target
self.from_port = start_port
self.to_port = end_port
self.ports = []
def scanner(self, target, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.3)
sock.connect((target, port))
self.ports.append(port)
except Exception as f: # noqa
pass
def __call__(self, *args, **kwargs):
if self.from_port is None:
self.from_port = 1
if self.to_port is None:
self.to_port = 10000
try:
num = 1
for port in range(self.from_port, self.to_port+1):
num += 1
t1 = threading.Thread(target=self.scanner, args=[self.target, port])
t1.start()
if num >= 50:
for stop_thread in range(1, 100):
t1.join()
num = 0
except Exception as e: # noqa
pass
def scanport(target, start_port=None, end_port=None):
return ScanPort(target, start_port, end_port)()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | webpt/port_scanner.py | cool-RR/webpt |
from typing import Callable
class Solution:
def setZeroes(self, matrix: list[list[int]]) -> None:
"""Do not return anything, modify matrix in-place instead."""
first_column_zero = False
for row in matrix:
for j, cell in enumerate(row):
if cell != 0:
continue
row[0] = 0
if j == 0:
first_column_zero = True
else:
matrix[0][j] = 0
for i, row in enumerate(matrix[1:], start=1):
for j, cell in enumerate(row[1:], start=1):
if row[0] == 0:
row[j] = 0
if matrix[0][j] == 0:
matrix[i][j] = 0
# first row check
if matrix[0][0] == 0:
first_row = matrix[0]
for i in range(len(first_row)):
first_row[i] = 0
# first column check
if first_column_zero:
for i in range(len(matrix)):
matrix[i][0] = 0
tests = [
(
([[1, 1, 1], [1, 0, 1], [1, 1, 1]],),
[[1, 0, 1], [0, 0, 0], [1, 0, 1]],
),
(
([[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]],),
[[0, 0, 0, 0], [0, 4, 5, 0], [0, 3, 1, 0]],
),
(
([[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]],),
[[0, 0, 3, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
),
]
def validator(
setZeroes: Callable[[list[list[int]]], None],
inputs: tuple[list[list[int]]],
expected: list[list[int]]
) -> None:
matrix, = inputs
setZeroes(matrix)
assert matrix == expected, (matrix, expected)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return t... | 3 | set_matrix_zeroes.py | tusharsadhwani/leetcode |
class BaseRecordingProvider:
def __init__(self, event):
self.event = event
super().__init__()
def get_recording(self, submission):
"""
Returns a dictionary {"iframe": …, "csp_header": …}
Both the iframe and the csp_header should be strings.
"""
raise NotImplementedError
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | src/pretalx/agenda/recording.py | realitygaps/pretalx |
from problems.problem import Problem
def generate_pythagorean_triples(ub: int) -> []:
# https://en.wikipedia.org/wiki/Pythagorean_triple
result = []
for a in range(1, ub):
aa = a * a
b = a + 1
c = b + 1
while c <= ub:
cc = aa + b * b
while c * c < cc:
c += 1
if c * c == cc and c <= ub:
result.append([a + b + c, a, b, c])
b += 1
return result
class Problem039(Problem):
def calculate_answer(self) -> int:
# p = perimeter
# a < b < c = hypotenuse
answer = 0
max_perimeter = 1000
solution_counts = [0 for i in range(max_perimeter + 1)]
triangles = generate_pythagorean_triples(max_perimeter // 2 + 1)
max_solutions = 0
for triangle in triangles:
p = triangle[0]
if p <= max_perimeter:
solution_counts[p] += 1
solutions = solution_counts[p]
if (solutions > max_solutions):
max_solutions = solutions
answer = p
self.print_detail(f"p = {answer}; solutions = {solutions}")
return answer
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"an... | 3 | ProjectEulerPython/problems/problem_039.py | geo-desic/project-euler |
from django import forms
from django.utils.safestring import mark_safe
from django.conf import settings
import json
class ImgerWidget(forms.Widget):
def __init__(self, attrs=None, **kwargs):
self.imger_settings = attrs['imger_settings']
super(ImgerWidget, self).__init__(**kwargs)
class Media:
js = (
'imger/js/jquery-1.11.1.min.js',
'imger/js/jquery.nouislider.js',
'imger/js/form2js.js',
'imger/js/canvas-to-blob.min.js',
'imger/js/imger-compress.js',
'imger/js/imger-ui.js',
'imger/js/imger-init.js'
)
css = {
'all': (
'imger/css/bootstrap.css',
'imger/css/bootstrap-theme.css',
'imger/css/imger.css',
'imger/css/jquery.nouislider.css'
)
}
def render(self, name, value, attrs=None):
imger_settings = self.imger_settings
imger_json = json.dumps(imger_settings)
static_url = settings.STATIC_URL
if value is None:
currently = r''
current_link = r'Nothing yet'
else:
currently = r'%s' % (value)
current_link = r'<a href="%s%s">%s</a>' % (
settings.MEDIA_URL,
value,
value
)
if not static_url.endswith('/'):
static_url = r'%s/' % (static_url)
return mark_safe("<p>Currently: %s<br/>Change: <span><button data-static_url=\"%s\" data-imger='%s' class=\"ImgerBrowseBTN\" type=\"button\">Browse</button> <span class=\"ImgerBrowseLabel\">No image selected...</span><input value=\"%s\" class=\"ImgerDataURL\" name=\"%s\" type=\"hidden\" /></span></p>" % (current_link, static_url, imger_json, currently, name))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (... | 3 | imger/widgets.py | 4shaw/django-imger |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import setup, find_packages
def get_version():
filepath = os.path.join(
os.path.dirname(__file__),
"interpro7dw",
"__init__.py"
)
with open(filepath) as fh:
text = fh.read()
m = re.search(r'^__version__\s*=\s*[\'"]([^\'"]+)[\'"]', text, re.M)
return m.group(1)
def get_requirements():
filepath = os.path.join(os.path.dirname(__file__), "requirements.txt")
with open(filepath) as fh:
requirements = fh.read().splitlines()
return requirements
setup(
name="interpro7dw",
version=get_version(),
description="",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=get_requirements(),
entry_points={
"console_scripts": [
"interprodw-build = interpro7dw.cli:build",
"interprodw-dropdb = interpro7dw.cli:drop_database",
]
}
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | setup.py | matthiasblum/i7dw |
import sys
import numpy as np
import cv2
import time
import argparse
import yolov2tiny
def resize_input(im):
imsz = cv2.resize(im, (416, 416))
imsz = imsz / 255.0
imsz = imsz[:, :, ::-1]
return np.asarray(imsz, dtype=np.float32)
def image_object_detection(in_image, out_image, debug):
frame = cv2.imread(in_image)
y2t = yolov2tiny.YOLO2_TINY([1, 416, 416, 3], "./y2t_weights.onnx", debug)
t_end2end = time.time()
_frame = resize_input(frame)
_frame = np.expand_dims(_frame, axis=0)
t_inference = time.time()
tout = y2t.inference(_frame)
t_inference = time.time() - t_inference
tout = np.squeeze(tout)
frame = yolov2tiny.postprocessing(
tout, cv2.resize(frame, (416, 416), interpolation=cv2.INTER_CUBIC)
)
t_end2end = time.time() - t_end2end
cv2.imwrite(out_image, frame)
print("DNN inference elapsed time: %.3f" % t_inference)
print("End-to-end elapsed time : %.3f" % t_end2end)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("IN_IMAGE", help="path to the input jpg")
parser.add_argument("OUT_IMAGE", help="path to the output jpg")
parser.add_argument("--debug", action="store_true", help="turn on debug flag")
args = parser.parse_args()
image_object_detection(args.IN_IMAGE, args.OUT_IMAGE, args.debug)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | proj4/src/__init__.py | hsyis/object-detection-yolo2-tiny |
import os
import mitogen
import mitogen.lxc
try:
any
except NameError:
from mitogen.core import any
import unittest2
import testlib
def has_subseq(seq, subseq):
return any(seq[x:x+len(subseq)] == subseq for x in range(0, len(seq)))
class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
lxc_attach_path = testlib.data_path('stubs/stub-lxc-attach.py')
def test_okay(self):
context = self.router.lxc(
container='container_name',
lxc_attach_path=self.lxc_attach_path,
)
argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
self.assertEquals(argv[0], self.lxc_attach_path)
self.assertTrue('--clear-env' in argv)
self.assertTrue(has_subseq(argv, ['--name', 'container_name']))
def test_eof(self):
e = self.assertRaises(mitogen.parent.EofError,
lambda: self.router.lxc(
container='container_name',
lxc_attach_path='true',
)
)
self.assertTrue(str(e).endswith(mitogen.lxc.Connection.eof_error_hint))
if __name__ == '__main__':
unittest2.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | tests/lxc_test.py | webcoast-dk/mitogen |
from prettytable import PrettyTable
class StackTable(PrettyTable):
def __init__(self, multicloud_stack):
super().__init__()
self.field_names = ["Attribute", "Value"]
self.align["Attribute"] = "r"
self.align["Value"] = "l"
self.add_row(["Name", multicloud_stack.stack_name])
self.add_row(["Desired count", multicloud_stack.count])
self.add_row(["Count parameter", multicloud_stack.count_parameter])
weights = []
for cloud_name, weight in multicloud_stack.weights.items():
weight = round(weight * 100, 1)
weights.append(f"{cloud_name} ({weight}%)")
self.add_row(["Weights", "\n".join(weights)])
class StacksTable(PrettyTable):
def __init__(self, multicloud_stack_list):
super().__init__()
self.field_names = [
"Stack name",
"Desired count",
"Count parameter",
"Clouds",
]
self.align = "l"
for multicloud_stack in multicloud_stack_list["stacks"]:
self.add_row(
[
multicloud_stack.stack_name,
multicloud_stack.count,
multicloud_stack.count_parameter,
", ".join(multicloud_stack.weights.keys()),
]
)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | src/heatspreader/shell/views.py | acticloud/heat-spreader |
import re
with open('input.txt') as file:
CMDS = []
for line in file:
cmd, v = line.strip().split(' ')
CMDS.append((cmd, int(v)))
def go(cmds):
visited = []
acc = 0
index = 0
while True:
if index in visited:
return acc
visited.append(index)
try:
cmd, v = cmds[index]
except IndexError:
print(f"Index out of range. Index: {index}, list length: {len(cmds)}, acc: {acc}")
raise
if cmd == 'acc':
acc += v
elif cmd == 'jmp':
index += v - 1
index += 1
v = go(CMDS)
print(v)
def brute_force():
i = -1
while True:
ccc = CMDS.copy()
# change one command
while True:
i += 1
cmd, v = ccc[i]
if cmd == 'nop':
ccc[i] = ('jmp', v)
break
elif cmd == 'jmp':
ccc[i] = ('nop', v)
break
go(ccc)
brute_force() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | 2020/Day 8/8.py | Brollof/Advent-of-Code |
from typing import Any, Dict, Optional, Union
from sqlalchemy.orm import Session
from app.core.security import get_password_hash, verify_password
from app.crud.base import CRUDBase
from app.models.user import User
from app.schemas.user import UserCreate, UserUpdate
class CRUDUser(CRUDBase[User, UserCreate, UserUpdate]):
def get_by_email(self, db: Session, *, email: str) -> Optional[User]:
return db.query(User).filter(User.email == email).first()
def create(self, db: Session, *, obj_in: UserCreate) -> User:
db_obj = User(email=obj_in.email,
hashed_password=get_password_hash(obj_in.password),
full_name=obj_in.full_name,
is_superuser=obj_in.is_superuser)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def update(self, db: Session, *, db_obj: User,
obj_in: Union[UserUpdate, Dict[str, Any]]) -> User:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
if update_data["password"]:
hashed_password = get_password_hash(update_data["password"])
del update_data["password"]
update_data["hashed_password"] = hashed_password
return super().update(db, db_obj=db_obj, obj_in=update_data)
def authenticate(self, db: Session, *, email: str,
password: str) -> Optional[User]:
user = self.get_by_email(db, email=email)
if not user:
return None
if not verify_password(password, user.hashed_password):
return None
return user
def is_active(self, user: User) -> bool:
return user.is_active
def is_superuser(self, user: User) -> bool:
return user.is_superuser
user = CRUDUser(User)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 2... | 3 | backend/app/crud/crud_user.py | ralphribeiro/debito_automatico |
### This gears will pre-compute (encode) all sentences using BERT tokenizer for QA
tokenizer = None
def loadTokeniser():
global tokenizer
from transformers import BertTokenizerFast
tokenizer = BertTokenizerFast.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
# tokenizer = AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT")
return tokenizer
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
def parse_sentence(record):
import redisAI
import numpy as np
global tokenizer
if not tokenizer:
tokenizer=loadTokeniser()
hash_tag="{%s}" % hashtag()
for idx, value in sorted(record['value'].items(), key=lambda item: int(item[0])):
tokens = tokenizer.encode(value, add_special_tokens=False, max_length=511, truncation=True, return_tensors="np")
tokens = np.append(tokens,tokenizer.sep_token_id).astype(np.int64)
tensor=redisAI.createTensorFromBlob('INT64', tokens.shape, tokens.tobytes())
key_prefix='sentence:'
sentence_key=remove_prefix(record['key'],key_prefix)
token_key = f"tokenized:bert:qa:{sentence_key}:{idx}"
# execute('SET', token_key, tokens)
redisAI.setTensorInKey(token_key, tensor)
execute('SADD',f'processed_docs_stage3_tokenized{hash_tag}', token_key)
gb = GB()
gb.foreach(parse_sentence)
gb.count()
gb.run('sentence:*',keyTypes=['hash']) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excl... | 3 | the-pattern-api/qasearch/tokeniser_gears_redisai.py | redis-developer/the-pattern |
"""
Module: 'math' on esp32 1.10.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.10.0', version='v1.10 on 2019-01-25', machine='ESP32 module with ESP32')
# Stubber: 1.3.2
def acos():
pass
def acosh():
pass
def asin():
pass
def asinh():
pass
def atan():
pass
def atan2():
pass
def atanh():
pass
def ceil():
pass
def copysign():
pass
def cos():
pass
def cosh():
pass
def degrees():
pass
e = 2.718282
def erf():
pass
def erfc():
pass
def exp():
pass
def expm1():
pass
def fabs():
pass
def floor():
pass
def fmod():
pass
def frexp():
pass
def gamma():
pass
def isfinite():
pass
def isinf():
pass
def isnan():
pass
def ldexp():
pass
def lgamma():
pass
def log():
pass
def log10():
pass
def log2():
pass
def modf():
pass
pi = 3.141593
def pow():
pass
def radians():
pass
def sin():
pass
def sinh():
pass
def sqrt():
pass
def tan():
pass
def tanh():
pass
def trunc():
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | stubs/micropython-esp32-1_10/math.py | RonaldHiemstra/micropython-stubs |
from sys import stderr
class Logger:
ERROR=0
QUIET=0
BASIC=1
WARNING=2
DETAIL=3
DEBUG=4
io_level = 0
@classmethod
def set_io_level(cls, lev):
cls.io_level = lev
@classmethod
def basic(cls, message):
if cls.io_level >= cls.BASIC:
print(message)
@classmethod
def warning(cls, message):
if cls.io_level >= cls.WARNING:
print("Warning! " + message)
@classmethod
def error(cls, message):
print("Error! " + message, file = stderr)
@classmethod
def detail(cls, message):
if cls.io_level >= cls.DETAIL:
print(message)
@classmethod
def debug(cls, message):
if cls.io_level >= cls.DEBUG:
print(message) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | xmot/logger.py | velatkilic/mot |
from abc import abstractmethod
from utils.utils import init_sess
class Gan:
def __init__(self):
self.oracle = None
self.generator = None
self.discriminator = None
self.gen_data_loader = None
self.dis_data_loader = None
self.oracle_data_loader = None
self.sess = init_sess()
self.metrics = list()
self.epoch = 0
self.pre_epoch_num = 80
self.adversarial_epoch_num = 100
self.log = None
self.reward = None
def set_oracle(self, oracle):
self.oracle = oracle
def set_generator(self, generator):
self.generator = generator
def set_discriminator(self, discriminator):
self.discriminator = discriminator
def set_data_loader(self, gen_loader, dis_loader, oracle_loader):
self.gen_data_loader = gen_loader
self.dis_data_loader = dis_loader
self.oracle_data_loader = oracle_loader
def set_sess(self, sess):
self.sess = sess
def add_metric(self, metric):
self.metrics.append(metric)
def add_epoch(self):
self.epoch += 1
def reset_epoch(self):
# current not in use
return
self.epoch = 0
def evaluate(self):
from time import time
log = "epoch:" + str(self.epoch) + '\t'
scores = list()
scores.append(self.epoch)
for metric in self.metrics:
tic = time()
score = metric.get_score()
log += metric.get_name() + ":" + str(score) + '\t'
toc = time()
print('time elapsed of ' + metric.get_name() + ': ' + str(toc - tic))
scores.append(score)
print(log)
return scores
def check_valid(self):
# TODO
pass
@abstractmethod
def train_oracle(self):
pass
def train_cfg(self):
pass
def train_real(self):
pass
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | models/Gan.py | debashishc/texygan-analysis |
import pytest
import os
import sys
sys.path.append (os.getcwd () + "/src")
from config import Config
class TestConfig:
@pytest.mark.parametrize ("string, pickit_type, include, exclude, include_type",
[
("0",0, [], [], "OR"), ("1",1, [], [], "OR"), ("2",2, [], [], "OR"),
("1, (AMAZONSKILLER, ASSASINSKILLER, BARBARIANSKILLER, DRUIDSKILLER, NECROMANCERSKILLER, PALADINSKILLER, SORCERESSSKILLER, (LIFE, 3_MAXIMUM_DAMAGE, ATTACK_RATING))",
1,
[["AMAZONSKILLER"], ["ASSASINSKILLER"], ["BARBARIANSKILLER"], ["DRUIDSKILLER"], ["NECROMANCERSKILLER"], ["PALADINSKILLER"], ["SORCERESSSKILLER"], ["LIFE", "3_MAXIMUM_DAMAGE", "ATTACK_RATING"]],
[],
"OR"
),
(
"1, AND(15_INCREASED_ATTACK_SPEED, ENHANCED_DAMAGE)", 1, [["15_INCREASED_ATTACK_SPEED"], ["ENHANCED_DAMAGE"]], [],"AND"),
("1, SOCKETED_4, ETHEREAL", 1, [["SOCKETED_4"]], [["ETHEREAL"]], "OR"),
("1, 2_ASSASIN_SKILLS", 1, [["2_ASSASIN_SKILLS"]], [], "OR"),
("1, ENHANCED_DEFENSE, ETHEREAL", 1, [["ENHANCED_DEFENSE"]], [["ETHEREAL"]], "OR"),
("1, AND(LIGHTNING_RESIST, FIRE_RESIST, COLD_RESIST), ETHEREAL", 1, [["LIGHTNING_RESIST"], ["FIRE_RESIST"], ["COLD_RESIST"]], [["ETHEREAL"]], "AND"),
("1, AND(ENHANCED_DEFENSE, SOCKETED), (SOCKETED_1, SOCKETED_2)", 1, [["ENHANCED_DEFENSE"], ["SOCKETED"]], [["SOCKETED_1"], ["SOCKETED_2"]],"AND")
])
def test_string_to_item_prop (self, string, pickit_type, include, exclude, include_type):
cfg = Config()
Itemprops = cfg.string_to_item_prop (string)
assert (Itemprops.pickit_type == pickit_type)
assert (Itemprops.include == include)
assert (Itemprops.exclude == exclude)
assert (Itemprops.include_type == include_type)
def test_123 (self):
print (os.getcwd())
print (sys.path) | [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excl... | 3 | test/config_test.py | Cho0joy/botty |
from pydantic import BaseModel
from .utils import BaseEvent
class MainPublisherEvent(BaseEvent):
pass
class CheckStatus(MainPublisherEvent):
channel: str
class WaitLiveVideo(MainPublisherEvent):
pass
class WaitStream(MainPublisherEvent):
time: int
class DownloaderEvent(BaseEvent):
pass
class StartDownloading(DownloaderEvent):
id: str
class PlaylistUpdate(DownloaderEvent):
total_size: int
to_load: int
class DownloadedChunk(DownloaderEvent):
pass
class StopDownloading(DownloaderEvent):
pass
class DownloadingProgress(BaseModel): # type: ignore
total_segments: int = 0
total_downloaded_segments: int = 0
last_chunk_size: int = 0
downloaded_segments: int = 0
def chunk_loaded(self) -> None:
self.downloaded_segments += 1
self.total_downloaded_segments += 1
class ExceptionEvent(BaseEvent):
message: str
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | twlived/events.py | tausackhn/twlived |
import pytest
@pytest.mark.slow
def test_long_computation():
...
@pytest.mark.timeout(10, method="thread")
def test_topology_sort():
...
def test_foo():
pass
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | Code Bundle/Chapter03/tests/test_slow.py | ghanigreen/pytest_code |
import os
def main():
try:
while True:
while True:
mode = input('Mode: ').lower()
if 'search'.startswith(mode):
mode = False
break
elif 'destroy'.startswith(mode):
mode = True
break
print('"search" or "destroy"')
path = input('Path: ')
extention = input('Extention: ')
for path_name in search(path, extention, mode):
print('Found:', path_name)
except:
pass
def search(path, extention, destroy):
assert os.path.isdir(path)
path_list = list()
for name in os.listdir(path):
path_name = os.path.join(path, name)
try:
if os.path.isdir(path_name):
path_list += search(path_name, extention, destroy)
elif os.path.isfile(path_name):
if path_name.endswith(extention) or not extention:
if destroy:
os.remove(path_name)
else:
path_list.append(path_name)
except:
print('Error:', path_name)
return path_list
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | tests/data23/recipe-511429.py | JohannesBuchner/pystrict3 |
import ast, sys
ast.FunctionDef
class AstTransformer():
# Processes an individual AST node attribute
def _processAttribute(self, attrib):
return self.visit(attrib) if isinstance(attrib, ast.AST) else attrib
# Processes an AST node attribute that might be a list
def _processAttributeList(self, attrib):
return list([self._processAttribute(a) for a in attrib]) if isinstance(attrib, list) else self._processAttribute(attrib)
def visit(self, node):
transformed = {'type': node.__class__.__name__}
for key, value in node.__dict__.items():
transformed[key] = self._processAttributeList(value)
return transformed
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | server/languages/python/module/AstTransformer.py | adamrehn/language-toolbox |
class QuizBrain:
def __init__(self, question_list):
self.question_number = 0
self.question_list = question_list
self.score = 0
def next_question(self):
current_q = self.question_list[self.question_number]
self.question_number += 1
user_answer = input(f"Q.{self.question_number}: {current_q.question} (True/False)? ")
self.check_answer(user_answer, current_q.correct_answer)
def still_has_questions(self):
return self.question_number < len(self.question_list)
def check_answer(self, user_answer, correct_answer):
if user_answer.lower() == correct_answer.lower():
self.score += 1
print("Correct! ✅")
else:
print("Wrong Answer! ❌")
print(f"The correct answer was: {correct_answer}")
print(f"Your current score is {self.score}/{self.question_number}\n")
def final_score(self):
print("-----------------------\nYou've completed the quiz!")
print(f"Your final score was {self.score}/{self.question_number}\n") | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | 100_days_of_code/Intermediate/day_17/quiz_brain.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp |
from flask import current_app, render_template
from flask_restful import Resource, reqparse
from flask_mail import Message
from utils.authorizations import admin_required
from models.user import UserModel
class Email(Resource):
NO_REPLY = "noreply@codeforpdx.org" # Should this be dwellingly address?
parser = reqparse.RequestParser()
parser.add_argument("user_id", required=True)
parser.add_argument("subject", required=True)
parser.add_argument("body", required=True)
@admin_required
def post(self):
data = Email.parser.parse_args()
user = UserModel.find_by_id(data.user_id)
message = Message(data.subject, sender=Email.NO_REPLY, body=data.body)
message.recipients = [user.email]
current_app.mail.send(message)
return {"message": "Message sent"}
@staticmethod
def send_reset_password_msg(user):
token = user.reset_password_token()
msg = Message(
"Reset password for Dwellingly",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/reset_msg.txt", user=user, token=token)
msg.html = render_template("emails/reset_msg.html", user=user, token=token)
current_app.mail.send(msg)
@staticmethod
def send_user_invite_msg(user):
token = user.reset_password_token()
msg = Message(
"Create Your Dwellingly Account",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/invite_user_msg.txt", user=user, token=token)
msg.html = render_template(
"emails/invite_user_msg.html", user=user, token=token
)
current_app.mail.send(msg)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | resources/email.py | donovan-PNW/dwellinglybackend |
from abstract.expresion import *
from tools.tabla_tipos import *
from abstract.retorno import *
class tableId(expresion):
def __init__(self, valor, line, column, tipo, num_nodo):
super().__init__(line, column)
self.valor = valor
self.tipo = tipo
#Nodo AST
self.nodo = nodo_AST('ID', num_nodo)
self.nodo.hijos.append(nodo_AST(str(valor), num_nodo+1))
#Gramatica
self.grammar_ = '<TR><TD> ID ::= ' + str(valor) +' </TD><TD> ID = new ID(' + str(valor) + '); </TD></TR>'
def ejecutar(self):
return retorno(self.valor, self.tipo) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | parser/team23/expresion/tableId.py | 18SebastianVC/tytus |
class Graph:
graph_dict={}
def addEdge(self,node,neighbour):
if node not in self.graph_dict:
self.graph_dict[node]=[neighbour]
else:
self.graph_dict[node].append(neighbour)
def show_edges(self):
for node in self.graph_dict:
neighbours = ""
for neighbour in self.graph_dict[node]:
neighbours += "{} ".format(neighbour)
print("{} --> {}".format(node, neighbours))
def find_path(self,start,end,path=[]):
path = path + [start]
if start==end:
return path
for node in self.graph_dict[start]:
if node not in path:
newPath=self.find_path(node,end,path)
if newPath:
return newPath
return None
g= Graph()
g.addEdge('1', '2')
g.addEdge('1', '3')
g.addEdge('1', '4')
g.addEdge('2', '1')
g.addEdge('2', '4')
g.addEdge('3', '1')
g.addEdge('3', '4')
g.addEdge('4', '1')
g.addEdge('4', '2')
g.addEdge('4', '3')
g.show_edges()
print(g.find_path('3', '2'))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | main/codeSamples/DataStructures/graph.py | JKUATSES/dataStructuresAlgorithms |
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2021 Ingram Micro. All Rights Reserved.
import os
import sys
def unimport():
for m in ('connect.cli.plugins.play.commands', 'connect.cli.ccli'):
if m in sys.modules:
del sys.modules[m]
def test_play_commands(fs, mocker):
os.environ['CCLI_SCRIPTS'] = os.path.join(os.path.dirname(__file__), 'scripts')
unimport()
from connect.cli.ccli import main
mocker.patch('connect.cli.plugins.play.commands.PlayOptions.context_file', None)
mocker.patch('sys.argv', ['cmd', 'play', 'script1'])
main()
def test_play_commands_rel(fs, mocker):
os.environ['CCLI_SCRIPTS'] = 'tests/plugins/play/scripts'
unimport()
from connect.cli.ccli import main
mocker.patch('connect.cli.plugins.play.commands.PlayOptions.context_file', None)
mocker.patch('sys.argv', ['cmd', 'play', 'script1'])
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | tests/plugins/play/test_play_commands.py | cloudblue/product-sync |
import typing
__all__ = ['remove_suffix', 'remove_prefix', 'row_pad_prefix']
def remove_suffix(string: str, suffix: str):
if string.endswith(suffix):
return string[:len(string) - len(suffix)]
return string
def remove_prefix(string: str, prefix: str):
if string.startswith(prefix):
return string[len(prefix):]
return string
def row_pad_prefix(strings: str or typing.List[str], prefix: str):
if isinstance(strings, str):
strings = strings.split('\n')
return '\n'.join([prefix + string for string in strings])
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | third_party/pytools/pytools/pyutils/misc/string/helpers.py | Kipsora/docker-curator |
"""Platzigram middleware catalog."""
# Django
from django.shortcuts import redirect
from django.urls import reverse
class ProfileCompletionMiddleware:
"""Profile completion middleware.
Ensure every user that is interacting with the platform
have their profile picture and biography.
"""
def __init__(self, get_response):
"""Middleware initialization."""
self.get_response = get_response
def __call__(self, request):
"""Code to be executed for each request before the view is called."""
if not request.user.is_anonymous:
if not request.user.is_staff:
profile = request.user.profile
if not profile.picture or not profile.biography:
if request.path not in [reverse('users:update'), reverse('users:logout')]:
return redirect('users:update')
response = self.get_response(request)
return response | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | platzigram/middleware.py | eicarranza/platzigram |
from typing import List
from rich import box
from rich.panel import Panel
from rich.table import Table
from vaccibot.models import AppointmentMatch
# ----- Data ----- #
COLUMNS_SETTINGS = {
"CENTER": dict(
justify="left",
header_style="bold",
style="bold",
), # no_wrap=True),
"CITY": dict(justify="center", header_style="magenta", style="magenta", no_wrap=True),
"DISTANCE (KM)": dict(
justify="center", header_style="medium_turquoise", style="medium_turquoise", no_wrap=True
),
"NEXT APPOINTMENT": dict(justify="right", header_style="bold green3", style="bold green3", no_wrap=True),
"AVAILABLE VACCINES": dict(
justify="right", header_style="bold dark_orange3", style="bold dark_orange3", no_wrap=True
),
"URL": dict(
justify="right", header_style="bold cornflower_blue", style="bold cornflower_blue", no_wrap=True
),
}
# ----- Helpers ----- #
def _default_table() -> Table:
"""Create the default structure for the Tasks Table, hard coded columns and no rows added."""
table = Table(box=box.SIMPLE_HEAVY)
for header, header_col_settings in COLUMNS_SETTINGS.items():
table.add_column(header, **header_col_settings)
return table
def make_department_table(appointments: List[AppointmentMatch]) -> Table:
table = _default_table()
for appointment in appointments:
table.add_row(
appointment.center_name,
appointment.center_city,
str(appointment.distance_km),
appointment.next_appointment_time,
",".join(v for v in appointment.vaccines),
str(appointment.url),
)
return table
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | vaccibot/render.py | fsoubelet/vaccibot |
import argparse
import os
import torch
from tinynn.converter import TFLiteConverter
from tinynn.util.converter_util import export_converter_files, parse_config
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
def export_files():
from models.cifar10.mobilenet import DEFAULT_STATE_DICT, Mobilenet
model = Mobilenet()
model.load_state_dict(torch.load(DEFAULT_STATE_DICT))
model.cpu()
model.eval()
dummy_input = torch.rand((1, 3, 224, 224))
export_dir = 'out'
export_name = 'mbv1_224'
export_converter_files(model, dummy_input, export_dir, export_name)
json_file = os.path.join(CURRENT_PATH, export_dir, f'{export_name}.json')
return json_file
def main_worker(args):
json_file = args.path
if json_file is None:
json_file = export_files()
# We will try to parse the config and prepare the inputs for you.
# If you want to use your own inputs, just assign it to `generated_inputs` here.
torch_model_path, tflite_model_path, input_transpose, generated_inputs, output_transpose = parse_config(json_file)
# When converting quantized models, please ensure the quantization backend is set.
torch.backends.quantized.engine = 'qnnpack'
with torch.no_grad():
model = torch.jit.load(torch_model_path)
model.cpu()
model.eval()
# Pay attention to the arguments `input_transpose` and `output_transpose` in the next line.
# By default, we will perform nchw -> nhwc transpose every 4D input and output tensor.
# If you don't want to do this, please pass in False for them.
converter = TFLiteConverter(model, generated_inputs, tflite_model_path, input_transpose, output_transpose)
converter.convert()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', metavar='DIR', default=None, help='path to the config (.json)')
args = parser.parse_args()
main_worker(args)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | examples/converter/convert_from_json.py | steven0129/TinyNeuralNetwork |
import pytest
import numpy as np
import random
from cem.backend import backend, NumpyBackend
try:
from cem.backend import CuPyBackend
import cupy as cp
skip_cupy_test = False
except ImportError:
skip_cupy_test = True
def test_numpy_backend():
X = random.randint(0, 10) * 10
Y = random.randint(0, 10) * 10
zeros = backend.zeros((X, Y))
ones = backend.ones((X, Y))
assert isinstance(backend, NumpyBackend)
assert isinstance(zeros, np.ndarray)
assert isinstance(ones, np.ndarray)
assert backend.int == np.int64
assert backend.float == np.float64
assert zeros.shape == (X, Y)
assert ones.shape == (X, Y)
assert backend.sin(ones).any() == np.sin(ones).any()
assert backend.cos(ones).any() == np.cos(ones).any()
@pytest.mark.skipif(skip_cupy_test, reason='CuPy is not installed.')
def test_cupy_backend():
backend.set_backend('cupy')
X = random.randint(0, 10) * 10
Y = random.randint(0, 10) * 10
zeros = backend.zeros((X, Y))
ones = backend.ones((X, Y))
assert isinstance(backend, CuPyBackend)
assert isinstance(zeros, cp.ndarray)
assert isinstance(ones, cp.ndarray)
assert backend.int == cp.int64
assert backend.float == cp.float64
assert zeros.shape == (X, Y)
assert ones.shape == (X, Y)
assert backend.sin(ones).all() == cp.sin(ones).all()
assert backend.cos(ones).all() == cp.cos(ones).all()
@pytest.mark.skipif(skip_cupy_test, reason='CuPy is not installed.')
def test_set_backend():
backend.set_backend('numpy')
assert isinstance(backend, NumpyBackend)
backend.set_backend('cupy')
assert isinstance(backend, CuPyBackend)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/test_backend.py | dantehustg/cem |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def assign(service,arg):
if service == "qibocms":
return True, arg
def audit(arg):
payload = "f/job.php?job=getzone&typeid=zone&fup=..\..\do\js&id=514125&webdb[web_open]=1&webdb[cache_time_js]=-1&pre=qb_label%20where%20lid=-1%20UNION%20SELECT%201,2,3,4,5,6,0,md5(233),9,10,11,12,13,14,15,16,17,18,19%23"
url = arg + payload
code, head, res, errcode,finalurl = curl.curl('"%s"' % url)
if code == 200:
if 'e165421110ba03099a1c0393373c5b43' in res:
security_hole(url)
if __name__ == "__main__":
from dummy import *
audit(assign('qibocms', 'http://www.bangniban.cc/')[1])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | Bugscan_exploits-master/exp_list/exp-356.py | csadsl/poc_exp |
import sys
import chess
import re
def print_game(game):
'''
print Game data and moves
'''
game = " ".join(game)
game = re.sub("\d+\.", " ", game).strip()
moves = re.split("\s+", game)
board = chess.Bitboard()
end = moves[-1]
moves = moves[:-1]
after_str = str(board)
for move in moves:
mv = board.push_san(move)
after_str = str(board)
# Piece that moved
#piece = board.piece_at(mv.to_square)
sys.stdout.write("%s\t%s\t%s\t%s\n"%(after_str, board.is_game_over(), board.is_checkmate(), board.is_stalemate()))
def main(argv):
'''
Process pgn file - ignores meta data.
For each move in the game prints a line -
piece which moved\tboard status before move\tboard status after move
'''
game_start = False
for line in sys.stdin:
line = line.strip()
if line and not line.startswith("["):
if not game_start:
game_start = True
game = []
game.append(line)
elif game_start:
try:
print_game(game)
except:
sys.stderr.write(str(game)+"\n")
game_start = False
if __name__ == '__main__':
sys.stderr.write("Chess data extractor arguments: %s\n"%sys.argv)
main(sys.argv) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | chess/checkmateclassifier/process_data.py | nivm/learningchess |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``
Revision ID: 30867afad44a
Revises: e9304a3141f0
Create Date: 2021-06-04 22:11:19.849981
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '30867afad44a'
down_revision = 'e9304a3141f0'
branch_labels = None
depends_on = None
airflow_version = '2.2.0'
def upgrade():
"""Apply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'concurrency',
new_column_name='max_active_tasks',
type_=sa.Integer(),
nullable=False,
)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
def downgrade():
"""Unapply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'max_active_tasks',
new_column_name='concurrency',
type_=sa.Integer(),
nullable=False,
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer... | 3 | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow |
from injector import inject
from backend_application.repository import DatabaseRepository
class ProjectService:
@inject
def __init__(self, repository: DatabaseRepository):
self.repository = repository
"""
returns project relevant data, like application
domains, projects per application domain etc...
"""
def get_portfolios(self):
return self.repository.get_portfolios()
def get_projects_by_portfolio(self, portfolio_key: str):
return self.repository.get_projects_by_portfolio(portfolio_key)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | visualization/backend/backend_application/service/project_service.py | INSO-TUWien/portfoliometrix |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert h.startswith('getblockchaininfo\n')
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | test/functional/rpc_named_arguments.py | orobio/gulden-official |
from ward import raises, test
from learning.entities import Todo
from learning.errors import TitleLengthError
@test("create todo")
def _():
todo = Todo("test todo")
assert todo.title == "test todo"
assert not todo.id
assert not todo.done
assert not todo.created_at
@test("title length is less than 0")
def _():
with raises(TitleLengthError) as exc:
Todo("")
assert str(exc.raised) == "title length is not between 1 and 256, length: 0"
for title, id in [("a" * 257, "single byte"), ("あ" * 257, "multi byte")]:
@test(f"title length is greater than 256 [{id}]")
def _():
with raises(TitleLengthError) as exc:
Todo(title)
assert str(exc.raised) == "title length is not between 1 and 256, length: 257"
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | ward_tests/test_entities.py | koichiro8/learning |
from django.core.management.base import BaseCommand
from ksiazkaadresowa.models import Person
class Command(BaseCommand):
help = 'Moj tekst pomocy'
def add_arguments(self, parser):
parser.add_argument(
'--file',
dest='file',
nargs='?',
help='Log File',
)
parser.add_argument(
'--format',
nargs='?',
dest='format',
help='Log File Format',
)
def handle(self, *args, **options):
filename = options['file']
format = options['format']
content = []
with open(filename) as file:
for line in file:
line = self.parse_line(line)
content.append(line)
print('\n'.join(content))
return
for p in Person.objects.all():
p.first_name = p.first_name.title()
p.last_name = p.last_name.title()
p.save()
def parse_line(self, line):
return line.upper()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | django/solution/untitled/ksiazkaadresowa/management/commands/clean.py | giserh/book-python |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = "test@test.com"
password = "Testpass_12345"
user = get_user_model().objects.create_user(
email_address=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = "test@TEST.COM"
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
email_address=None,
password="password"
)
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
"test@test.com",
"test123"
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | app/core/tests/test_models.py | YevheniiM/RecipeDjangoApp |
import os
import sys
from pathlib import Path
sys.path.insert(1, '../Phase1')
sys.path.insert(2, '../Phase2')
import misc
import numpy as np
class Feedback:
def __init__(self):
self.task5_result = None
self.reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,
'Phase2', 'pickle_files')
self.set_task5_result()
self.dataset = list()
self.X = None
self.y = None
self.dataset=list()
def generate_input_data_set(self, rorir_map, dataset_features):
for image_id, label in rorir_map.items():
image_id = os.path.basename(image_id)
if label==0 or label==1:
feat = dataset_features[image_id].tolist()
feat+=[label]
self.dataset.append(np.array(feat))
return
def set_task5_result(self):
self.task5_result = misc.load_from_pickle(self.reduced_pickle_file_folder, 'Task_5_Result')
def generate_input_data(self, rorir_map, dataset_features):
X = []
y = []
for image_id, label in rorir_map.items():
image_id = os.path.basename(image_id)
if label == 0 or label == 1:
X.append(dataset_features[image_id])
y+=[rorir_map[image_id]]
X = np.array(X)
y = np.array(y)
self.X=X
self.y=y
return
def euclidean_distance(self, dist1, dist2):
return (sum([(a - b) ** 2 for a, b in zip(dist1, dist2)])) ** 0.5
def save_result(self, result):
reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,
'Phase2', 'pickle_files')
misc.save2pickle(result, reduced_pickle_file_folder, 'Task_5_Result') | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | Phase3/Feedback.py | Surya97/MWDB-project |
from firebase_admin import auth
from package_tests.models import User
# Stubs
stub_firebase_token = 'stub_firebase_token'
stub_firebase_uid = 'stub_firebase_uid'
stub_email = 'daniel@danieljs.tech'
stub_username = 'dspacejs'
# Mock classes
class MockUserRecord(object):
email = None
display_name = None
uid = None
def __init__(self, email, display_name, uid):
self.email = email
self.display_name = display_name
self.uid = uid
# Mock methods
def mock_firebase_verify_id_token(encoded_token, app, check_revoked):
if encoded_token == stub_firebase_token:
return {
'uid': stub_firebase_uid,
}
elif encoded_token is None:
raise ValueError('Token is not valid.')
elif encoded_token == '':
raise ValueError('Token is not valid.')
raise auth.AuthError(code='USER_NOT_FOUND', message='User not found.')
def mock_firebase_get_user(firebase_uid):
if firebase_uid == stub_firebase_uid:
return MockUserRecord(
email=stub_email,
display_name=stub_username,
uid=stub_firebase_uid,
)
raise auth.AuthError(code='USER_NOT_FOUND', message='User not found.')
def create_user(firebase_uid=stub_firebase_uid, email='email@gmail.com'):
user = User.objects.create(email=email, firebase_uid=firebase_uid)
user.set_password('supersekret')
user.save()
return user
def setup_mocks():
auth.verify_id_token = mock_firebase_verify_id_token
auth.get_user = mock_firebase_get_user
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | package_tests/tests/helpers.py | oleo65/graphene-django-firebase-auth |
import pandas as pd
import hiquant as hq
class StrategyLongHold( hq.BasicStrategy ):
symbol_max_value = {}
symbol_cost = {}
def __init__(self, fund):
super().__init__(fund, __file__)
self.max_stocks = 10
self.max_weight = 1.0
fund.set_name('耐心持有策略')
def schedule_task(self, trader):
trader.run_daily(self.trade, None, time='09:30')
trader.run_on_bar_update(self.trade, None)
#def select_targets(self):
# return pd.read_csv('stockpool/t0_white_horse_20.csv', dtype=str)['symbol'].tolist()
def get_trade_decision(self, symbol, market, portfolio, max_value_pos_stock):
max_stocks = min(self.max_stocks, len(self.targets))
init_value_per_stock = portfolio.init_value / max_stocks * self.max_weight
if symbol in portfolio.positions:
return symbol, 0, 0, ''
else:
return symbol, 1, max_value_pos_stock, ''
def get_signal_comment(self, symbol, signal):
return '耐心持有'
def init(fund):
strategy = StrategyLongHold(fund)
if __name__ == '__main__':
backtest_args = dict(
#start_cash= 1000000.00,
date_start= hq.date_from_str('6 months ago'),
#date_end= hq.date_from_str('yesterday'),
#out_file= 'output/demo.png',
#parallel= True,
compare_index= 'sh000300',
)
hq.backtest_strategy( StrategyLongHold, **backtest_args )
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | strategy/000_long_hold.py | floatinghotpot/hiquant |
class Jumble(object):
def __init__(self):
self.dict = self.make_dict()
def make_dict(self):
dic = {}
f = open('/usr/share/dict/words', 'r')
for word in f:
word = word.strip().lower()
sort = ''.join(sorted(word))
dic[sort] = word
return dic
def unjumble(self, lst):
for word in lst:
word = word.strip().lower()
sorted_word = "".join(sorted(word))
if sorted_word in self.dict:
self.dict[sorted_word]
else:
return None
if __name__ == "__main__":
f_list = ['prouot', 'laurr', 'jobum', 'lethem']
s_list = ['siconu', 'tefon', 'tarfd', 'laisa']
t_list = ['sokik', 'niumem', 'tenjuk', 'doore']
unjumble = Jumble()
print(unjumble.unjumble(f_list))
print(unjumble.unjumble(s_list))
print(unjumble.unjumble(t_list)) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | Code/word_jumble.py | Nyapal/CS1.3 |
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Cipher import PKCS1_OAEP, Salsa20
from Crypto.Hash import SHA256
from Crypto.Cipher import DES
from Crypto.Cipher import AES
import zlib
def encrypt(message):
encrypt_key = RSA.generate(2048)
encrypted_message = PKCS1_OAEP.new(encrypt_key).encrypt(message)
return encrypt_key, encrypted_message
def decrypt(encrypt_key, message):
return PKCS1_OAEP.new(encrypt_key).decrypt(message)
def sign(encrypted_2_message, private):
hash_encrypted_2_message = SHA256.new(encrypted_2_message)
signature = pkcs1_15.new(private).sign(hash_encrypted_2_message)
return signature
def check_sign(encrypted_2_message, public_key, sign):
"""Проверка подписи от пользоваетеля регистратором"""
hash_encrypted_message = SHA256.new(encrypted_2_message)
try:
pkcs1_15.new(public_key).verify(hash_encrypted_message, sign)
except:
return False
return True
# Делает пользователь
# message = b'test'
#
# encrypt_key, encrypted_message = encrypt(message)
# private_key_user, public_key_user, sign_user = sign(encrypted_message)
#
# # Делает регистратор
# check_sign(encrypted_message, public_key_user, sign_user)
# private_key_registrator, public_key_registrator, sign_registrator = sign(encrypted_message)
#
# # Делает пользователь
# # message = PKCS1_OAEP.new(encrypt_key).decrypt(encrypted_message)
#
# # Делает учитыватель
# check_sign(encrypted_message, public_key_user, sign_user)
# check_sign(encrypted_message, public_key_registrator, sign_registrator)
#
# # Учитыватель получает секретный ключ для расшифровки от пользователя
# decrypt(encrypt_key, encrypted_message)
# print(message)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | source/auth/client_protocol.py | raldenprog/electronic_vote |
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 13:17:06 2020
@author: Chandan
"""
import engine as en
from engine import Engine, wait_for_command
import query
import process
ass = Engine()
ass.engine_rate(210)
def listen():
command = en.wait_for_command()
try:
result = None
if query.close(command.lower()):
flag_close = False
ass.speak("are you sure sir? like, its ok ill wait for your command")
confirm = en.wait_for_command()
if query.confirm(confirm.lower()):
ass.speak("fine sir! please call me if you need any assisstent")
result = "close"
if "ok" == command.lower():
ass.speak("i am always hear for you sir")
return result
elif query.read(command.lower()):
flag_read = True
ass.speak("for you sir... always")
run_jarvis()
else:
return None
except:
return None
def run_jarvis():
result = None
query = wait_for_command()
result = process.take_jarvis_query(query.lower())
if result!=None:
ass.speak(result)
ass.speak("any thing else sir")
query = en.wait_for_command()
if "no" == query.lower():
ass.speak("ok sir")
if "yes" == query.lower():
ass.speak("cammand me sir")
run_jarvis()
ass.engine_rate(210)
if __name__ == "__main__":
ass.speak("Initializing jarvis...")
while True:
res = listen()
if res == "close":
break
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | main.py | chandannaidu/virtual-assistant-jarvis- |
def table(name=None, primary_key="id", column_map=None):
"""数据据保存的表名"""
def decorate(clazz):
setattr(clazz, "__table_name__", clazz.__name__ if name is None else name)
setattr(clazz, "__primary_key__", primary_key)
setattr(clazz, "__column_map__", None if column_map is None else column_map)
return clazz
return decorate
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | db_hammer/entity.py | liuzhuogood/db-hammer |
"""
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import collections
import functools
class memoize:
"""
Memoize dectorator.
Caches a function's return value each time it is called. If called later
with the same arguments, the cache value is returned (not reevaluated).
Taken from https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
"""
def __init__(self, func):
self._func = func
self._cache = {}
def __call__(self, *args):
if not isinstance(args, collections.abc.Hashable):
return self._func(args)
if args in self._cache:
return self._cache[args]
value = self._func(*args)
self._cache[args] = value
return value
def __repr__(self):
# Return the function's docstring
return self._func.__doc__
def __get__(self, obj, objtype):
# Support instance methods
return functools.partial(self.__call__, obj)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | s2e_env/utils/memoize.py | michaelbrownuc/s2e-env |
"""Add required grants
Revision ID: e9fbe7694450
Revises: c0b039d92792
Create Date: 2021-04-19 12:59:52.861502
"""
from alembic import op
import sqlalchemy as sa
from app import app
# revision identifiers, used by Alembic.
revision = 'e9fbe7694450'
down_revision = 'c0b039d92792'
branch_labels = None
depends_on = None
def upgrade():
if app.config['ENV'] in ('staging', 'production'):
op.execute("""
GRANT ALL ON TABLE "user" TO steuerlotse;
GRANT ALL ON SEQUENCE user_id_seq TO steuerlotse;
""")
def downgrade():
pass
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | webapp/migrations/versions/e9fbe7694450_add_required_grants.py | ramboldio/steuerlotse |
from .parser import EnsemblParser
import biothings.hub.dataload.uploader as uploader
from biothings.utils.common import dump2gridfs
class EnsemblGeneUploader(uploader.MergerSourceUploader):
name = "ensembl_gene"
main_source = "ensembl"
__metadata__ = {"mapper" : 'ensembl2entrez'}
def load_data(self, data_folder):
ep = EnsemblParser(data_folder,load_ensembl2entrez=False)
ensembl_genes = ep.load_ensembl_main()
return ensembl_genes
def get_mapping_to_entrez(self, data_folder):
ep = EnsemblParser(data_folder)
ep._load_ensembl2entrez_li()
return ep.ensembl2entrez_li
def post_update_data(self,*args,**kwargs):
self.logger.info('Uploading "mapping2entrezgene" to GridFS...')
x2entrezgene_list = self.get_mapping_to_entrez(self.data_folder)
dump2gridfs(x2entrezgene_list, self.name + '__2entrezgene_list.pyobj', self.db)
@classmethod
def get_mapping(klass):
mapping = {
"taxid": {"type": "integer"},
"symbol": {"type": "keyword",
"normalizer" : "keyword_lowercase_normalizer",
"boost": 5.0,
'copy_to': ['all'],},
"name": {"type": "text",
"boost": 0.8, # downgrade name field a little bit
'copy_to': ['all'],},
}
return mapping
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | src/hub/dataload/sources/ensembl/gene_upload.py | inambioinfo/mygene.info |
# -*- coding: UTF-8 -*-
import sys
from correctores.common.corrector_variables import corrector_variables
########################################################################
#### Esto es lo que hay que cambiar en cada problema: ####
#### - epsilon: para comparar floats y complex, si lo necesitas ####
#### - genera_casos: devuelve una lista de casos de prueba ####
########################################################################
def epsilon():
return 1E-9
def genera_casos():
# Generar los casos de prueba que se quieren comprobar
return [([], [ ('cantidad', 10), ('adicional', 5), ('total', 15)])]
#################################
#### Esto no hay que tocarlo ####
#################################
def ejecutor_caso(g,l):
# Aqui se pega el codigo del alumno
codigo = """
@@@CODE@@@
"""
exec(codigo,g,l)
# 'l' contendrá los valores de salida
if __name__ == "__main__":
corrector_variables(sys.argv[1], genera_casos(), epsilon(), ejecutor_caso, globals())
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | temas/Python3/correctores/asignaciones/asig1.py | emartinm/TutorialesInteractivos |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common
class TestMrpMulticompany(common.TransactionCase):
def setUp(self):
super(TestMrpMulticompany, self).setUp()
group_user = self.env.ref('base.group_user')
group_stock_manager = self.env.ref('stock.group_stock_manager')
company_2 = self.env.ref('stock.res_company_1')
self.multicompany_user_id = self.env['res.users'].create({
'name': 'multicomp',
'login': 'multicomp',
'groups_id': [(6, 0, [group_user.id, group_stock_manager.id])],
'company_id': company_2.id,
'company_ids': [(6, 0, [company_2.id])]
})
def test_00_multicompany_user(self):
"""check no error on getting default mrp.production values in multicompany setting"""
StockLocation = self.env['stock.location'].sudo(self.multicompany_user_id)
fields = ['location_src_id', 'location_dest_id']
defaults = StockLocation.default_get(['location_id', 'location_dest_id', 'type'])
for field in fields:
if defaults.get(field):
try:
StockLocation.check_access_rule([defaults[field]], 'read')
except Exception as exc:
assert False, "unreadable location %s: %s" % (field, exc)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | addons/mrp/tests/test_multicompany.py | jjiege/odoo |
# RPS bot
import random
name = 'adaptivebot'
class RPSBot(object):
name = name
def __init__(self):
self.winners = {"R": "P", "P": "S", "S": "R"}
def get_hint(self, other_past, my_past):
is_other_constant = len(set([other_claim for other_claim, other_move in other_past[-2:]])) == 1
return self.winners[other_past[-1][0]] if is_other_constant else random.choice(list(self.winners.keys()))
def get_move(self, other_past, my_past, other_next, my_next):
is_other_honest = all([other_claim == other_move for other_claim, other_move in other_past[-2:]])
return self.winners[other_next] if is_other_honest else my_next | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | adaptivebot.py | coolioasjulio/Rock-Paper-Scissors-Royale |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import requests
import json
class KodiController(object):
__serverconf = None
def __init__(self, serverconf):
self.__serverconf = serverconf
def post(self,command):
url = "http://" + self.__serverconf["hostname"] + ":" + str(self.__serverconf["port"]) + "/jsonrpc"
headers = {"Content-Type": "application/json"}
r = requests.post(
url,
data=json.dumps(command),
headers=headers,
auth=(self.__serverconf["user"], self.__serverconf["password"]))
return r.json()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | scripts/kodicontroller.py | koji88/kodi-gpio-controller |
from stability.Utility import limitByRate, mapInput, limit
from stability.filters import LowPassFilter
class Actuator:
# class for the control surface actuator
def __init__(self, max, servo_to_hinge, rate):
self.max = max # max min limit of the FINAL control surface deflection in degrees (e.g. 30 degrees of aileron deflection is +-15 deg)
self.in_to_servo = 1000/175 # ratio of input channel to the corresponding servo deflection in degrees (in our case 1000 equals ~170-175 deg)
self.servo_to_hinge = servo_to_hinge # ratio of the servo deflection in degrees to the corresponding control surface deflection, depends on the mechanical arrangement
self.rate = rate # target deflection speed of the control surface in deg / s
self.output = 0 # set to zero to initialise
self.antiAliasing = LowPassFilter(1, 0.05) # anti-wonkifying smoothing of the input signal to the servo
def step(self, stick_input, sas_input, dt): #stick_input is in 0 to 1000 whereas sas_input is stability augmentation input in degrees corresponding to the surface deflection
max_1 = self.max * self.servo_to_hinge * self.in_to_servo
stick_input_1 = mapInput(stick_input, 0, 1000, 0, max_1)
rate_1 = self.rate * self.servo_to_hinge * self.in_to_servo
sas_to_in = sas_input * self.servo_to_hinge * self.in_to_servo
input_1 = self.antiAliasing.step(limit(stick_input_1 + sas_to_in, max_1, 0), dt)
self.output = limitByRate(input_1, self.output, rate_1, dt)
return self.output
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | pycom/lib/stability/Actuator.py | o-gent/aero_one |
class FittingAngleUsage(Enum, IComparable, IFormattable, IConvertible):
"""
An enumerated type representing the options for how to limit the angle values applicable to fitting content.
enum FittingAngleUsage,values: UseAnAngleIncrement (1),UseAnyAngle (0),UseSpecificAngles (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
UseAnAngleIncrement = None
UseAnyAngle = None
UseSpecificAngles = None
value__ = None
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py | YKato521/ironpython-stubs |
import os
import time
import click
from . import procs
class Interpreter:
def __init__(self, ctx, verbose):
self.ctx = ctx
self.verbose = verbose
self.lines = []
self.in_comment = False
def feed(self, line):
if len(self.lines) > 0:
# End of multi-line comment
if self.lines[0].startswith('#==') and line.endswith('==#'):
self.lines = []
self.in_comment = False
return False
return True
start_time = time.time()
# Handle exit command or EOF
if line == 'exit':
self.ctx.exit()
# Blank lines
elif line.strip() == '':
pass
# Print debug information
elif line == 'debug':
click.echo('Configuration values:')
for key, val in self.ctx.obj.config.items():
click.echo(f' {key} = {repr(val)}')
# cd
elif line.startswith('cd '):
try:
dirname = line[3:].strip()
os.chdir(os.path.expanduser(dirname))
except OSError as e:
click.echo(e, err=True)
# Start of multiline comments
elif line.startswith('#=='):
self.lines.append(line)
self.in_comment = True
self.ctx.obj.previous_cmd_duration = 0
return True
# Single-line comments
elif line.strip()[0] == '#':
pass
# Normal commands
else:
try:
with self.ctx:
procs.run_line(line, echo_args=self.verbose)
except FileNotFoundError as e:
click.echo(f'Command not found: {e.filename}', err=True)
self.lines = []
self.ctx.obj.previous_cmd_duration = time.time() - start_time
return False
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | dish/interpreter.py | dullbananas/dish |
import configparser
import functools
from os import path
from pathlib import Path
class Config():
"""Config wrapper that reads global config and user config."""
PROJECT_ROOT = path.join(path.dirname(path.realpath(__file__)), '..')
CONFIG_INI = path.join(PROJECT_ROOT, 'config.ini')
HOME_DIR = Path.home()
CONN_INI = path.join(HOME_DIR, '.snowflake-cicd.ini')
def __init__(self):
pass
def __lazy_init(func):
"""Reads and parses global config file and user's conn file."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not hasattr(self, '_config'):
assert path.exists(Config.CONFIG_INI), f"Missing config file at path {Config.CONFIG_INI}"
self._config = configparser.ConfigParser()
self._config.read(Config.CONFIG_INI)
if not hasattr(self, '_conn'):
assert path.exists(Config.CONN_INI), f"Missing connection settings file at path {Config.CONN_INI}"
self._conn = configparser.ConfigParser()
self._conn.read(Config.CONN_INI)
return func(self, *args, **kwargs)
return wrap
@__lazy_init
def read_config(self, key, section='default', default=None) -> str:
"""Reads [section] key from user's conn file or use global file
if the key is missing."""
return self._conn[section].get(key,
self._config[section].get(key, default))
@__lazy_init
def read_user_config(self, key, section='default', default=None) -> str:
"""Reads [section] from user .snowflake-cicd.ini file."""
return self._conn[section].get(key, default)
@__lazy_init
def sql(self, query_id) -> str:
"""Returns value from config section 'queries'."""
return self._config['queries'].get(query_id)
config = Config()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | src/cicd/utils/config.py | szn/snowflake-cicd |
import kfserving
from typing import Optional
from adserver.base.model import CEModel
from alibi_detect.utils.saving import load_detector, Data
class AlibiDetectModel(CEModel): # pylint:disable=c-extension-no-member
def __init__(self, name: str, storage_uri: str, model: Optional[Data] = None):
"""
Outlier Detection Model
Parameters
----------
name
The name of the model
storage_uri
The URI location of the model
"""
super().__init__(name)
self.name = name
self.storage_uri = storage_uri
self.ready = False
self.model: Optional[Data] = model
def load(self):
"""
Load the model from storage
"""
model_folder = kfserving.Storage.download(self.storage_uri)
self.model: Data = load_detector(model_folder)
self.ready = True
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true... | 3 | components/alibi-detect-server/adserver/base/alibi_model.py | glindsell/seldon-core |
""" Simple utilities for figures"""
import numpy as np
import matplotlib as mpl
def log_me(val, err):
"""
Generate log and error from linear input
Args:
val (float):
err (float):
Returns:
float, (float/None):
Returns none if the err is negative
"""
if err < 0.:
xerr = None
else:
xerr = np.array([[np.log10(val) - np.log10(val - err)],
[-np.log10(val) + np.log10(val + err)]])
return np.log10(val), xerr
def set_fontsize(ax,fsz):
"""
Set the fontsize throughout an Axis
Args:
ax (Matplotlib Axis):
fsz (float): Font size
Returns:
"""
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fsz)
def set_bokeh_fontsize(p, fsz):
"""
Adjust font size for Bokeh axes
Args:
p (Bokeh plot class):
sz (int): Font size
"""
p.xaxis.axis_label_text_font_size = '{:d}pt'.format(fsz)
p.xaxis.major_label_text_font_size = "{:d}pt".format(fsz)
#
p.yaxis.axis_label_text_font_size = '{:d}pt'.format(fsz)
p.yaxis.major_label_text_font_size = "{:d}pt".format(fsz)
def set_mplrc():
"""
Font fussing for matplotlib
Returns:
"""
mpl.rcParams['mathtext.default'] = 'it'
mpl.rcParams['font.size'] = 12
mpl.rc('font',family='Times New Roman')
mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rc('text', usetex=True)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (... | 3 | frb/figures/utils.py | Lachimax/FRB |
from ..resources.I18nResources import I18nLanguageListResponse, I18nRegionListResponse
from googleapiclient.discovery import Resource
class I18n:
def __init__(self, client: Resource) -> None:
self.client = client
def list_languages(self):
req = self.client.i18nLanguages().list(part='snippet')
return I18nLanguageListResponse._from_response_dict(req.execute())
def list_regions(self):
req = self.client.i18nRegions().list(part='snippet')
return I18nRegionListResponse._from_response_dict(req.execute())
...
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | src/ytwrapper/apis/I18ns.py | Robert-Phan/yt-wrapper |
# parser.py
#
# Author: Jan Piotr Buchmann <jan.buchmann@sydney.edu.au>
# Description:
#
# Version: 0.0
import sys
from . import sequence
class FastaParser:
def __init__(self):
self.sequences = {}
self.doFhClose = False
self.src = sys.stdin
def parse(self, src=None, fil=None, stream=False):
print ("Parsing file %s" % fil)
if fil != None:
src = open(fil, 'r')
self.doFhClose = True
if src == None:
print('Fasta parser. Error. No source to parse')
seq = ''
header = ''
for i in src:
if i[0] == '>':
if len(seq) > 0:
self.add_sequence(sequence.FastaSequence(header, seq), stream)
seq = ''
header = i[1:].strip()
print ("found header in parser.py %s" % header)
else:
seq += i.strip()
self.add_sequence(sequence.FastaSequence(header, seq), stream)
if self.doFhClose == True:
src.close()
def write_file(self, fname):
fh = open(fname, 'w')
for i in self.sequences:
fh.write(self.sequences[i].get_sequence())
fh.close()
return fname
def add_sequence(self, seq, stream):
if stream == True:
print(seq.get_sequence())
self.sequences[seq.header] = seq
def reset(self):
self.sequences = {}
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | lib/fasta/parser.py | NCBI-Hackathons/VirusML |
"""
-----
soup:
-----
find
find_all
select
find_by_class
find_by_id
find_by_tag
find_by_text
--------
selenium:
--------
find_element_by_id(id_)
find_elements_by_id(id_)
find_element_by_class_name(name)
find_elements_by_class_name(name)
find_element_by_css_selector(css_selector)
find_elements_by_css_selector(css_selector)
find_element_by_xpath(xpath)
find_elements_by_xpath(xpath)
find_element_by_link_text(text)
find_elements_by_link_text(text)
"""
class stepHelper:
def get_soup_steps_helper():
helper = {
'find_element_by_id': '.find(id="{param}")',
'find_elements_by_id': '.find_all(id="{param}")',
'find_element_by_class': '.find(class_="{param}")',
'find_elements_by_class': '.find_all(class_="{param}")',
'select_one': '.select("{param}")[0]',
'select_all': '.select("{param}")',
'ext_str_get_text': '.get_text()'
}
return helper
def get_selenium_steps_helper():
helper = {
"find_element_by_id": '.find_element_by_id("{param}")',
"find_elements_by_id": '.find_elements_by_id("{param}")',
"find_element_by_class": '.find_element_by_class_name("{param}")',
"find_elements_by_class": '.find_elements_by_class_name("{param}")',
"find_element_by_css_selector": '.find_element_by_css_selector("{param}")',
"find_elements_by_css_selector": '.find_elements_by_css_selector("{param}")',
"click": '.click()',
"ext_str_get_text": '.text',
"ext_str_get_attribute": '.get_attribute("{param}")'
}
return helper
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | login/lib/step_helper.py | sifeng86/crawla-the-spider-tool |
import unittest
from unittest.mock import mock_open, patch, MagicMock
from unittest import mock
from src.zad3.friendships_storage import FriendshipsStorage
from src.zad3.friendships import Friendships
class TestFriendshipsStorage(unittest.TestCase):
def test_friendships_storage_add_raises_typeError_with_not_friendships(self):
friendships_storage = FriendshipsStorage()
friendships_storage.add = MagicMock(side_effect=TypeError)
self.assertRaises(TypeError, friendships_storage.add, "Andrzej")
friendships_storage.add.assert_called_with("Andrzej")
def test_friendships_storage_add_note_success(self):
friendships = Friendships()
friendships.make_friends("anmdrej", "marek")
friendships_storage = FriendshipsStorage()
friendships_storage.add = MagicMock(return_value="Added Friendships succesfully")
friendships_storage.add(friendships)
friendships_storage.add.assert_called_once_with(friendships)
def test_friendships_storage_clear(self):
friendships_storage = FriendshipsStorage()
friendships_storage.clear = MagicMock(return_value="Cleared all Friendships")
self.assertEqual(friendships_storage.clear(), "Cleared all Friendships")
friendships_storage.clear.assert_any_call()
def test_friendships_storage_get_friends_of(self):
friendships_storage = FriendshipsStorage()
friendships_storage.get_friends_of = MagicMock(return_value=["Michał", "Adrian"])
self.assertEqual(friendships_storage.get_friends_of("Andrzej"), ["Michał", "Adrian"])
friendships_storage.get_friends_of.assert_called_with("Andrzej")
def test_friendships_storage_get_friends_of_non_existent(self):
friendships_storage = FriendshipsStorage()
friendships_storage.get_friends_of = MagicMock(side_effect=ValueError)
self.assertRaises(ValueError, friendships_storage.get_friends_of, "Andrzej")
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | tests/test_friendships_storage.py | TestowanieAutomatyczneUG/laboratorium-11-maciejSzcz |
"""empty message
Revision ID: eb02de174736
Revises: c0de0819f9f0
Create Date: 2020-02-04 18:29:57.302993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eb02de174736'
down_revision = 'c0de0819f9f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Shows', 'name',
existing_type=sa.VARCHAR(),
nullable=False)
op.create_foreign_key(None, 'Shows', 'Venue', ['name'], ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Shows', type_='foreignkey')
op.alter_column('Shows', 'name',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | starter_code/migrations/versions/eb02de174736_.py | nkatwesigye/project_furry |
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.util.string import Flag
from echomesh.util.TestCase import TestCase
class FlagTest(TestCase):
def test_empty(self):
self.assertEqual(Flag.split_flag(''), (u'', True))
def test_single_dash(self):
self.assertEqual(Flag.split_flag('-'), (u'', True))
def test_single_flag(self):
self.assertEqual(Flag.split_flag('-x'), (u'x', True))
def test_double_dash(self):
self.assertEqual(Flag.split_flag('--hello'), (u'hello', True))
def test_double_dash_equal(self):
self.assertEqual(Flag.split_flag('--hello='), (u'hello', True))
def test_full_flag(self):
self.assertEqual(Flag.split_flag('--hello=world'), (u'hello', u'world'))
def test_complex_case(self):
self.assertEqual(
Flag.split_flag_args(['hello', '--foo', '--bar=baz', '--bing', 'world']),
({u'bing': True, u'foo': True, u'bar': u'baz'}, [u'hello', u'world']))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | code/python/echomesh/util/string/Flag_test.py | silky/echomesh |
# -*- coding: utf-8 -*-
class MetaSingleton(type):
def __call__(cls, *args, **kwargs):
if not cls.__dict__.get("_instance"):
cls._instance = cls.__new__(cls, *args)
cls._instance.__init__(*args, **kwargs)
return cls._instance
class Singleton(object):
__metaclass__ = MetaSingleton
@classmethod
def Instance(cls):
return cls()
@classmethod
def CleanInstance(cls):
cls._instance = None | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | common/singleton.py | hellosword/MDNote |
class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def display(self):
temp = self.head
ll = []
while(temp):
ll.append(temp.data)
temp = temp.next
print(ll)
def insertAtPos(self, val, pos):
target = Node(val)
while(pos == 0):
target.next = self.head
self.head = target
return
def getPrev(pos):
temp = self.head
count = 1
while(count < pos):
temp = temp.next
count += 1
return temp
pre = getPrev(pos)
nextNode = pre.next
pre.next = target
target.next = nextNode
def deleteNode(self, key):
temp = self.head
#LL in empty
if(temp is None):
return
# CASE : Head Node Deletion
if(temp.data == key):
self.head = temp.next
temp = None
#Case : Delete in the middle
while(temp.next.data != key):
temp = temp.next
targetNode = temp.next
temp.next = targetNode.next
targetNode.next = None
if __name__ == "__main__":
print("Linked List Representation")
ll = LinkedList()
ll.head = Node(4)
second_node = Node(10)
ll.head.next = second_node
third_node = Node(6)
second_node.next = third_node
ll.insertAtPos(5,0)
ll.deleteNode(6)
ll.display()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | Leetcode-python/5_LinkedList/ll.py | gnsalok/algo-ds-python |
from django.db import models
from meiduoshop.utils.models import BaseModel
# Create your models here.
class ContentCategory(BaseModel):
"""广告内容类别"""
name = models.CharField(max_length=50, verbose_name='名称')
key = models.CharField(max_length=50, verbose_name='类别键名')
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT, verbose_name='类别')
title = models.CharField(max_length=100, verbose_name='标题')
url = models.CharField(max_length=300, verbose_name='内容链接')
image = models.ImageField(null=True, blank=True, verbose_name='图片')
text = models.TextField(null=True, blank=True, verbose_name='内容')
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(default=True, verbose_name='是否展示')
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ': ' + self.title
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
... | 3 | meiduoshop/meiduoshop/apps/contents/models.py | juntao66/meiduoshopping |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_cron_job import V1beta1CronJob
class TestV1beta1CronJob(unittest.TestCase):
""" V1beta1CronJob unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CronJob(self):
"""
Test V1beta1CronJob
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_cron_job.V1beta1CronJob()
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | kubernetes/test/test_v1beta1_cron_job.py | Scalr/kubernetes-client-python |
from eventsourcing.domain import Aggregate, event
from uuid import uuid5, NAMESPACE_URL
class Account(Aggregate):
"""A simple-as-can-be bank account"""
@event('Created')
def __init__(self):
self.balance = 0
@event('Credited')
def credit(self, amount: int):
self.balance += amount
@event('Debited')
def debit(self, amount: int):
self.balance -= amount
class Ledger(Aggregate):
"""A simple-as-can-be Ledger to track net movements across all accounts"""
def __init__(self, name):
self.name = name
self.transaction_count = 0
self.balance = 0
@classmethod
def create_id(cls, name):
"""Enable predictable IDs so that a Ledger can be retrieved
using its name - even if its ID isn't known
"""
return uuid5(NAMESPACE_URL, f'/ledgers/{name}')
@event('TransactionAdded')
def add_transaction(self, amount: int):
self.transaction_count += 1
self.balance += amount
def get_balance(self):
return self.balance
def get_transaction_count(self):
return self.transaction_count
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | event_sourced_bank/domain_model.py | sfinnie/event_sourced_bank |
import biathlonresults as api
def test_cups():
res = api.cups(1819)
assert isinstance(res, list)
assert len(res) == 37
def test_cup_results():
res = api.cup_results("BT1819SWRLCP__SMTS")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
assert res["Rows"][0]["Name"] == "BOE Johannes Thingnes"
def test_athletes():
res = api.athletes("boe", "johannes")
assert isinstance(res, dict)
assert isinstance(res["Athletes"], list)
assert "boe" in res["Athletes"][0]["FamilyName"].lower()
assert "johannes" in res["Athletes"][0]["GivenName"].lower()
def test_cisbios():
res = api.cisbios("BTNOR11605199301")
assert isinstance(res, dict)
assert res["FullName"] == "Johannes Thingnes BOE"
def test_all_results():
# Raphael Poiree
res = api.all_results("BTFRA10908197401")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert res["Results"][0]["SO"] == 2
assert len(res["Results"]) == 329
def test_events():
res = api.events(1819, 1)
assert isinstance(res, list)
assert len(res) == 10
assert res[0]["Level"] == 1
assert res[-1]["ShortDescription"] == "Oslo Holmenkollen"
def test_competitions():
# Pokljuka 1819
res = api.competitions("BT1819SWRLCP01")
assert isinstance(res, list)
assert len(res) == 8
assert res[-1]["ShortDescription"] == "Women 10km Pursuit"
def test_results():
# Pokljuka 1819 W PU
res = api.results("BT1819SWRLCP01SWPU")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert len(res["Results"]) == 60
assert res["Results"][0]["ResultOrder"] == 1
assert res["Results"][0]["Name"] == "MAKARAINEN Kaisa"
def test_stats():
# podiums men stat
res = api.stats("WCPOD_M", "WCPOD", "ATH", "M")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
# in case someone breaks Bjoerndalen's record
assert int(res["Rows"][0]["Value"]) >= 199
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/test_api.py | prtkv/biathlonresults |
"""
Reference: https://github.com/mfaruqui/eval-word-vectors
"""
import math
import numpy
from operator import itemgetter
from numpy.linalg import norm
EPSILON = 1e-6
def euclidean(vec1, vec2):
diff = vec1 - vec2
return math.sqrt(diff.dot(diff))
def cosine_sim(vec1, vec2):
vec1 += EPSILON * numpy.ones(len(vec1))
vec2 += EPSILON * numpy.ones(len(vec1))
return vec1.dot(vec2)/(norm(vec1)*norm(vec2))
def assign_ranks(item_dict):
ranked_dict = {}
sorted_list = [(key, val) for (key, val) in sorted(item_dict.items(),
key=itemgetter(1),
reverse=True)]
for i, (key, val) in enumerate(sorted_list):
same_val_indices = []
for j, (key2, val2) in enumerate(sorted_list):
if val2 == val:
same_val_indices.append(j+1)
if len(same_val_indices) == 1:
ranked_dict[key] = i+1
else:
ranked_dict[key] = 1.*sum(same_val_indices)/len(same_val_indices)
return ranked_dict
def correlation(dict1, dict2):
avg1 = 1.*sum([val for key, val in dict1.iteritems()])/len(dict1)
avg2 = 1.*sum([val for key, val in dict2.iteritems()])/len(dict2)
numr, den1, den2 = (0., 0., 0.)
for val1, val2 in zip(dict1.itervalues(), dict2.itervalues()):
numr += (val1 - avg1) * (val2 - avg2)
den1 += (val1 - avg1) ** 2
den2 += (val2 - avg2) ** 2
return numr / math.sqrt(den1 * den2)
def spearmans_rho(ranked_dict1, ranked_dict2):
assert len(ranked_dict1) == len(ranked_dict2)
if len(ranked_dict1) == 0 or len(ranked_dict2) == 0:
return 0.
x_avg = 1.*sum([val for val in ranked_dict1.values()])/len(ranked_dict1)
y_avg = 1.*sum([val for val in ranked_dict2.values()])/len(ranked_dict2)
num, d_x, d_y = (0., 0., 0.)
for key in ranked_dict1.keys():
xi = ranked_dict1[key]
yi = ranked_dict2[key]
num += (xi-x_avg)*(yi-y_avg)
d_x += (xi-x_avg)**2
d_y += (yi-y_avg)**2
return num/(math.sqrt(d_x*d_y))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | eval/ranking.py | blackredscarf/pytorch-SkipGram |
import asyncio
import decimal
import unittest
class DecimalContextTest(unittest.TestCase):
def test_asyncio_task_decimal_context(self):
async def fractions(t, precision, x, y):
with decimal.localcontext() as ctx:
ctx.prec = precision
a = decimal.Decimal(x) / decimal.Decimal(y)
await asyncio.sleep(t)
b = decimal.Decimal(x) / decimal.Decimal(y ** 2)
return a, b
async def main():
r1, r2 = await asyncio.gather(
fractions(0.1, 3, 1, 3), fractions(0.2, 6, 1, 3))
return r1, r2
r1, r2 = asyncio.run(main())
self.assertEqual(str(r1[0]), '0.333')
self.assertEqual(str(r1[1]), '0.111')
self.assertEqual(str(r2[0]), '0.333333')
self.assertEqual(str(r2[1]), '0.111111')
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exc... | 3 | Tools/python37/Lib/test/test_asyncio/test_context.py | xxroot/android_universal |
import re
from urlresolver import common
from urlresolver.plugins.lib import helpers
from urlresolver.resolver import UrlResolver, ResolverError
class RacatyResolver(UrlResolver):
name = "racaty"
domains = ['racaty.com']
pattern = '(?://|\.)(racaty\.com)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
response = self.net.http_GET(web_url, headers=headers)
html = response.content
data = helpers.get_hidden(html)
headers['Cookie'] = response.get_headers(as_dict=True).get('Set-Cookie', '')
html = self.net.http_POST(response.get_url(), headers=headers, form_data=data).content
url = re.search('w-100 mt-4">\s+<a href="([^"]+)">', html)
if url:
return url.group(1).replace(' ', '%20')
raise ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/{media_id}')
@classmethod
def _is_enabled(cls):
return True
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | libs/urlresolver_plugins/racaty.py | manishrawat4u/plugin.video.bloimediaplayer |
from .node import Node
class LinkedList:
"""
create a linked list
"""
def __init__(self, iterable=[]):
"""Constructor for the LinkedList object"""
self.head = None
self._size = 0
if type(iterable) is not list:
raise TypeError('Invalid iterable')
for item in iterable:
self.insert(item)
def __repr__(self):
return f'<head> is {self.head.val}'
def __str__(self):
return self.__repr__
def __len__(self):
"""Return the size of the linked list"""
return self._size
def insert(self, val):
"""Add a value to the head of the linked list"""
self.head = Node(val, self.head)
self._size += 1
def find(self, val):
"""Find an item in the linked list"""
current = self.head
while current:
if current.val == val:
return True
current = current._next
return False
def append(self, val):
"""Append an item to the end of the linked list"""
if self.head is None:
self.insert(val)
else:
current = self.head
while current:
if current._next is None:
current._next = Node(val)
self._size += 1
break
current = current._next
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | interviews/is_palindrome/linked_list.py | zarkle/data-structures-and-algorithms |
#coding=utf-8
import requests
import tempfile
import os
import re
import random
import time
import datetime
#from bs4 import BeautifulSoup
from collections import defaultdict
from flask import Flask, request, abort
import sqlite3
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.exceptions import LineBotApiError
from linebot.models import *
from bs4 import BeautifulSoup
app = Flask(__name__)
#bear
'''
line_bot_api = LineBotApi('cRMF7+DwEKVhAwHC0EB7oBQqtEZ09thaY6hLFnIrKy25hE385Al8RoxMxTN+VCsdYOIjGDwpLqXoxR60gdIvithhKyaSXgUBYL2V8k+/aDS//tdw4wk578xqutoHJEdLJxL9GkRKFZ0M8VbSHiDC5gdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('9b57c56f50bb020a8eda208c96b2c59c')
'''
#nini
line_bot_api = LineBotApi('/gYOVdyGfdrePeWzKes5zWkvwlJ42148waHkXu4/V4wdwlfgFHh1TcBNKvLYjvLUf2jkJ0SJ3TPK0eZ8s+wIec+kPjmrN9H7S5c+EonV4T5GK5/eo+uZLL1vcp1VgmCU0EL6kk2nmiKd7Ce7nVdSkAdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('d6c704bba551d5e05c71b8694416e89a')
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# print("body:",body)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MemberJoinEvent)
def handle_memberJoined(event):
print(event)
if __name__ == '__main__':
# app.run(host='0.0.0.0',port=5000)
app.run(port=5001)
#app.run(host='0.0.0.0',port=5000,ssl_context=context)
# app.run()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | test.py | adgj5472/python-linebot-sdk |
import math
class CustomType(type):
def __new__(mcls, name, bases, class_dict):
print(f'Using custom metaclass {mcls} to create class {name}...')
cls_obj = super().__new__(mcls, name, bases, class_dict)
cls_obj.circ = lambda self: 2 * math.pi * self.r
return cls_obj
class Circle(metaclass=CustomType):
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def area(self):
return math.pi * self.r ** 2
# Using custom metaclass <class '__main__.CustomType'> to create class Circle...
c = Circle(0, 0, 1)
print(c.area())
print(c.circ()) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | Back-End/Python/Basics/Part -4- OOP/07 - Metaprogramming/04_metaclass.py | ASHISHKUMAR2411/Programming-CookBook |
import pytest
from adventofcode.year_2016.day_09_2016 import part_one
from adventofcode.year_2016.day_09_2016 import part_two
@pytest.mark.parametrize(
["line", "expected"],
[
("ADVENT", 6),
("A(1x5)BC", 7),
("(3x3)XYZ", 9),
("A(2x2)BCD(2x2)EFG", 11),
("(6x1)(1x3)A", 6),
("X(8x2)(3x3)ABCY", 18),
],
)
def test_part_one(line, expected):
assert expected == part_one([line])
@pytest.mark.parametrize(
["line", "expected"],
[
("(3x3)XYZ", 9),
("X(8x2)(3x3)ABCY", 20),
("(27x12)(20x12)(13x14)(7x10)(1x12)A", 241920),
("(25x3)(3x3)ABC(2x3)XY(5x2)PQRSTX(18x9)(3x2)TWO(5x7)SEVEN", 445),
],
)
def test_part_two(line, expected):
assert expected == part_two([line])
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | tests/adventofcode/year_2016/test_day_09_2016.py | Frazzer951/Advent-Of-Code |
'''
Description:
email: 359066432@qq.com
Author: lhj
software: vscode
Date: 2021-09-19 17:28:48
platform: windows 10
LastEditors: lhj
LastEditTime: 2021-09-20 20:01:05
'''
from dataclasses import dataclass
@dataclass
class UserBriefInfo(object):
user_id:str
user_name:str
@classmethod
def from_user(cls,user):
return cls(user_id=user.id,user_name=user.username)
@property
def cache_permission_key(self):
return f"user:{self.user_name}:permissions"
@property
def cache_roles_key(self):
return f"user:{self.user_name}:roles" | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | TianJiPlanBackend/core/utils.py | weridolin/tianji-plan |
# ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
# pylint: disable=too-many-instance-attributes
import logging
from copy import deepcopy
import numpy as np
from .whale import Whale
from ..util.base_visualizer import BaseVisualizer
from ..util.problem_base import ProblemBase
LOGGER = logging.getLogger(__name__)
class WOAProblem(ProblemBase):
def __init__(self, **kwargs):
"""
Initialize a new whale optimization algorithm problem.
"""
super().__init__(**kwargs)
self.__iteration_number = kwargs['iteration_number']
self.__whales = [
Whale(**kwargs, bit_generator=self._random)
for _ in range(kwargs['whales'])
]
self._visualizer = BaseVisualizer(**kwargs)
# Initialize visualizer for plotting
positions = [whale.position for whale in self.__whales]
self._visualizer.add_data(positions=positions)
def solve(self) -> Whale:
global_best_whale = None
# And also update global_best_whale
for _ in range(self.__iteration_number):
# Update global best
global_best_whale = np.amin(self.__whales)
random_whales = deepcopy(self._random.choice(self.__whales, size=len(self.__whales)))
for whale, random_whale in zip(self.__whales, random_whales):
whale.step(global_best_whale, random_whale)
# Add data for plot
self._visualizer.add_data(positions=[whale.position for whale in self.__whales])
global_best_whale = np.amin(self.__whales)
LOGGER.info('Last best solution="%s" at position="%s"', global_best_whale.value, global_best_whale.position)
return global_best_whale
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | swarmlib/woa/woa_problem.py | HaaLeo/ant-colony-optimization |
import pytest
import logging
import sys
LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
logger.propagate = True
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
logging.getLogger("faker").setLevel(logging.ERROR)
@pytest.fixture
def error_fixture():
assert 0
def test_i_ok():
print("ok")
def test_ii_fail():
assert 0
def test_iii_error(error_fixture):
pass
def test_iv_skip():
pytest.skip("skipping this test")
def test_v_xfail():
pytest.xfail("xfailing this test")
def test_vi_fail_compare_dicts_for_pytest_icdiff():
listofStrings = ["Hello", "hi", "there", "at", "this"]
listofInts = [7, 10, 45, 23, 77]
assert len(listofStrings) == len(listofInts)
assert listofStrings == listofInts
@pytest.mark.xfail(reason="always xfail")
def test_vi_xpass():
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/test_pytest_fold_2.py | jeffwright13/pytest-fold |
import Anreal
class RendererBuildDesc(Anreal.BuildDesc) :
def SetDependency(self) :
self.DependencyList.append("Core")
self.DependencyList.append("RHI")
def SetOther(self) :
self.ModuleName = "Renderer"
def GetBuildDesc() :
return RendererBuildDesc()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | Engine/Source/Runtime/Renderer/RendererBuild.py | zxwnstn/AnrealEngine |
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, init_list: list = None):
self.head = None
if init_list:
for value in init_list:
self.append(value)
def append(self, value):
if self.head is None:
self.head = Node(value)
return
# Move to the tail (the last node)
node = self.head
while node.next:
node = node.next
node.next = Node(value)
return
list_with_loop = LinkedList([2, -1, 3, 0, 5])
# Creating a loop where the last node points back to the second node
loop_start = list_with_loop.head.next
node = list_with_loop.head
while node.next:
node = node.next
node.next = loop_start
def iscircular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
"""
if linked_list.head is None:
return False
slow = linked_list.head
fast = linked_list.head
# doing just while fast, you can reach the end and hit an attribution error
while fast and fast.next:
# slow pointer moves one node
slow = slow.next
# fast pointer moves two nodes
fast = fast.next.next
if slow == fast:
return True
# If we get to a node where fast doesn't have a next node or doesn't exist itself,
# the list has an end and isn't circular
return False
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | 3. data_structures/linked_list/linked_lists_circular.py | sourcery-ai-bot/udacity-datastructures-algorithms |
def get_sidereal_time(time: float, date: (int, int, int), longitude: float) -> float:
year, month, day = date
# Calculate the Julian Day
A = int(year/100)
B = 2 - A + int(A/4)
jd = int(365.25*(year + 4716)) + int(30.6001*(month + 1)) + day + B - 1524.5
# Calculate Greenwich Sidereal Time
T = (jd + time/24.0 - 2451545.0)/36525.0
qo = 280.46061837 + 360.98564736629 * (jd -2451545.0) + 0.000387933 * T**2 - T**3/38710000
# Calculate Local Sidereal Time
q = qo + longitude
return q
def get_local_hour_angle(lst: float, right_ascension: float) -> float:
return lst - right_ascension
def get_coordinates(latitude: float, declination: float, lha: float) -> (float, float):
pass
if __name__ == "__main__":
currtime = 23.87778
currdate = (2020, 3, 8)
currlong = -114.093810
ljd = get_sidereal_time(currtime, currdate, currlong)
print(f"Local Sidereal Time: {ljd}") | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | skyfall.py | jopetty/skyfall |
from typing import List, Union, Tuple
from lab.logger.colors import StyleCode
class Destination:
def log(self, parts: List[Union[str, Tuple[str, StyleCode]]], *,
is_new_line=True):
raise NotImplementedError()
def new_line(self):
raise NotImplementedError()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | lab/logger/destinations/__init__.py | gear/lab |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.museum/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisMuseumStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.museum/status_registered.txt"
host = "whois.museum"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 2)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "nic.museum")
eq_(self.record.nameservers[0].ipv4, "130.242.24.5")
eq_(self.record.nameservers[0].ipv6, None)
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "nic.frd.se")
eq_(self.record.nameservers[1].ipv4, None)
eq_(self.record.nameservers[1].ipv6, None)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2001-11-10 15:23:42 UTC'))
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2002-04-04 17:48:43 UTC'))
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2003-07-31 11:00:00 UTC'))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | test/record/parser/test_response_whois_museum_status_registered.py | huyphan/pyyawhois |
import os
class UserHandler:
def __init__(self, user):
self.user = user
def check_user_dir(self):
path = f"home/{self.user}"
if not os.path.exists(path):
os.mkdir(path)
return f"/{path}"
else:
return f"/{path}"
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | src/users.py | novus-alex/PyShell |
import pandas as pd
import numpy as np
import gc
import os
from pathlib import Path
p = Path(__file__).parents[1]
ROOT_DIR=os.path.abspath(os.path.join(p, '..', 'data/raw/'))
def convert(data, num_users, num_movies):
''' Making a User-Movie-Matrix'''
new_data=[]
for id_user in range(1, num_users+1):
id_movie=data[:,1][data[:,0]==id_user]
id_rating=data[:,2][data[:,0]==id_user]
ratings=np.zeros(num_movies, dtype=np.uint32)
ratings[id_movie-1]=id_rating
if sum(ratings)==0:
continue
new_data.append(ratings)
del id_movie
del id_rating
del ratings
return new_data
def get_dataset_1M():
''' For each train.dat and test.dat making a User-Movie-Matrix'''
gc.enable()
training_set=pd.read_csv(ROOT_DIR+'/ml-1m/train.dat', sep='::', header=None, engine='python', encoding='latin-1')
training_set=np.array(training_set, dtype=np.uint32)
test_set=pd.read_csv(ROOT_DIR+'/ml-1m/test.dat', sep='::', header=None, engine='python', encoding='latin-1')
test_set=np.array(test_set, dtype=np.uint32)
num_users=int(max(max(training_set[:,0]), max(test_set[:,0])))
num_movies=int(max(max(training_set[:,1]), max(test_set[:,1])))
training_set=convert(training_set,num_users, num_movies)
test_set=convert(test_set,num_users, num_movies)
return training_set, test_set
def _get_dataset():
return get_dataset_1M()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | src/data/preprocess_data.py | artem-oppermann/Deep-Autoencoders-For-Collaborative-Filtering |
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable("ufo.sh")
def name(self):
return "Ufo"
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = "\n".join(output)
if returnsignal == 9 or returnsignal == (128 + 9):
if isTimeout:
status = "TIMEOUT"
else:
status = "KILLED BY SIGNAL 9"
elif returncode == 1 and "program correct: ERROR unreachable" in output:
status = "SAFE"
elif returncode != 0:
status = f"ERROR ({returncode})"
elif "ERROR reachable" in output:
status = "UNSAFE"
elif "program correct: ERROR unreachable" in output:
status = "SAFE"
else:
status = "FAILURE"
return status
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | benchexec/tools/ufo.py | SvenUmbricht/benchexec |
from flask import Blueprint, request
from app.spiders.core import *
from app.utils import build_result
from app.constants import code
core = Blueprint('core', __name__)
@core.route('/login', methods=['POST'])
def login():
data = request.form
username = data.get('username')
password = data.get('password')
return core_login(username, password)
@core.route('/book_borrow_info', methods=['GET'])
def book_borrow_info():
token = request.args.get('token')
return get_book_borrow_info(token)
@core.route('/trans_list', methods=['GET'])
def trans_list():
token = request.args.get('token')
return get_trans_list(token)
@core.route('/tel_book', methods=['GET'])
def tel_book():
department_id = request.args.get('department_id')
return get_tel_book(department_id)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | app/api/core.py | Aneureka/njuapi |
from abc import ABC
from enum import Enum
from erica.domain.Shared.BaseDomainModel import BasePayload
class StateAbbreviation(str, Enum):
bw = "bw"
by = "by"
be = "be"
bb = "bb"
hb = "hb"
hh = "hh"
he = "he"
mv = "mv"
nd = "nd"
nw = "nw"
rp = "rp"
sl = "sl"
sn = "sn"
st = "st"
sh = "sh"
th = "th"
# To find the correct values case insensitively
@classmethod
def _missing_(cls, value):
for member in cls:
if member.lower() == value.lower():
return member
class CheckTaxNumberPayload(BasePayload, ABC):
state_abbreviation: StateAbbreviation
tax_number: str
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},... | 3 | erica/domain/tax_number_validation/check_tax_number.py | digitalservice4germany/erica |
import codecs
import yaml
from .filebased import FileBasedSource
__all__ = (
'YamlFileSource',
)
class YamlFileSource(FileBasedSource):
def __init__(self, *args, **kwargs):
self.encoding = kwargs.pop('encoding', 'utf-8')
super(YamlFileSource, self).__init__(*args, **kwargs)
def get_settings_from_file(self, file_path, settings, manager=None):
content = codecs.open(file_path, 'r', self.encoding).read().strip()
if not content:
return None
content = yaml.safe_load(content)
if not content:
return None
if not isinstance(content, dict):
raise TypeError('YAML files must contain only mappings')
for setting in settings:
if setting.name in content:
setting.value = content[setting.name]
return settings
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | src/djangoreactredux/djrenv/lib/python3.5/site-packages/setoptconf/source/yamlfile.py | m2jobe/c_x |
import pytest
import tempfile
import zipfile
import zipfile_deflate64
from pathlib import Path
from skultrafast.quickcontrol import QC1DSpec, QC2DSpec, parse_str, QCFile
from skultrafast.data_io import get_example_path, get_twodim_dataset
def test_parse():
assert (parse_str('-8000.000000') == -8000.0)
assert (parse_str('75') == 75)
assert (parse_str('TRUE') == True)
assert (parse_str('FALSE') == False)
flist = '-8000.000000,-7950.000000,-7900.000000,-7850.000000'
res = parse_str(flist)
assert (isinstance(res, list))
assert (res[0] == -8000)
assert (len(res) == 4)
@pytest.fixture(scope='session')
def datadir(tmp_path_factory):
p = get_example_path('quickcontrol')
tmp = tmp_path_factory.mktemp("data")
zipfile.ZipFile(p).extractall(tmp)
return tmp
@pytest.fixture(scope='session')
def datadir2d(tmp_path_factory):
p = get_twodim_dataset()
return p
def test_info(datadir):
qc = QCFile(fname=datadir / '20201029#07')
def test_1d(datadir):
qc = QC1DSpec(fname=datadir / '20201029#07')
assert (qc.par_data.shape == qc.per_data.shape)
assert (qc.par_data.shape[1] == len(qc.t))
assert (qc.par_data.shape[2] == 128)
ds = qc.make_pol_ds()
ds.plot.spec(1)
def test_2d(datadir2d):
infos = list(Path(datadir2d).glob('*320.info'))
ds = QC2DSpec(infos[0])
ds.make_ds()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | skultrafast/tests/test_quickcontrol.py | Tillsten/skultrafast |
"""Utility functions"""
import json
import os
import datetime
import logging
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
def load_config_json(name):
json_path = os.path.abspath(os.path.join(BASE_DIR, 'microservice')) + '/' + name + '.json'
with open(json_path) as data_file:
info = json.load(data_file)
return info
def delete_old_files(path="/opt/composite/tmp_imgs/"):
"""Delete old image files if necessary (>5min old)"""
flist = os.listdir(path)
now = datetime.datetime.now().timestamp()
if len(flist) > 0:
for file in flist:
tmp = os.path.getmtime(f'{path}{file}')
logging.info( f'[Utilities] Removing {path}{file} {tmp}')
if (now - tmp) > (60 * 5): # 5 min delay on erasure
print(f'Erasing {path}{file}')
os.remove(f'{path}{file}')
return None
else:
return None | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | composite/utils/files.py | Skydipper/Composite |
from sqlalchemy.orm.exc import NoResultFound
from zeeguu_core.model import User, Language, UserWord, Text, Bookmark
def own_or_crowdsourced_translation(user, word: str, from_lang_code: str, context: str):
own_past_translation = get_own_past_translation(user, word, from_lang_code, context)
if own_past_translation:
translations = [{'translation': own_past_translation,
'service_name': 'Own Last Translation',
'quality': 100}]
return translations
others_past_translation = get_others_past_translation(word, from_lang_code, context)
if others_past_translation:
translations = [{'translation': others_past_translation,
'service_name': 'Contributed Translation',
'quality': 100}]
return translations
return None
def get_others_past_translation(word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context)
def get_own_past_translation(user, word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context, user)
def _get_past_translation(word: str, from_lang_code: str, context: str, user: User = None):
try:
from_language = Language.find(from_lang_code)
origin_word = UserWord.find(word, from_language)
text = Text.query.filter_by(content=context).one()
query = Bookmark.query.filter_by(origin_id=origin_word.id, text_id=text.id)
if user:
query = query.filter_by(user_id=user.id)
# prioritize older users
query.order_by(Bookmark.user_id.asc())
return query.first().translation.word
except Exception as e:
print(e)
return None
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | zeeguu_core/crowd_translations/__init__.py | C0DK/Zeeguu-Core |
import logging
import time
from threading import Event
from watchdog.observers import Observer
from .OutputEventHandler import OutputEventHandler
class FileSystemObserver(object):
def __init__(self, test_output_dir):
self.test_output_dir = test_output_dir
# Start observing output dir
self.done_event = Event()
self.event_handler = OutputEventHandler(self.done_event)
self.observer = Observer()
self.observer.schedule(self.event_handler, self.test_output_dir, recursive=True)
self.observer.start()
def get_output_dir(self):
return self.test_output_dir
def restart_observer_if_needed(self):
if self.observer.is_alive():
return
self.observer = Observer()
self.done_event.clear()
self.observer.schedule(self.event_handler, self.test_output_dir, recursive=True)
self.observer.start()
def wait_for_output(self, timeout_seconds, output_validator, max_files):
logging.info('Waiting up to %d seconds for %d test outputs...', timeout_seconds, max_files)
self.restart_observer_if_needed()
wait_start_time = time.perf_counter()
for i in range(0, max_files):
# Note: The timing on Event.wait() is inaccurate
self.done_event.wait(timeout_seconds)
self.done_event.clear()
current_time = time.perf_counter()
if timeout_seconds < (current_time - wait_start_time) or output_validator.validate():
break
self.observer.stop()
self.observer.join()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | docker/test/integration/minifi/core/FileSystemObserver.py | galshi/nifi-minifi-cpp |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
class MandatoryOptions(object):
def __init__(self,options):
self.options=options
def __getattr__(self,name):
call=getattr(self.options,name)
def require(*args,**kwargs):
value=call(*args,**kwargs)
if not value:
raise RuntimeError("WTF Dude")
return value
return require
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | .waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/extras/wurf/mandatory_options.py | looopTools/sw9-source |
import crypto_tools
import crypto_native
def openssl_aes_128_little_doc():
return "short example of encryption and decryption using openssl"
def openssl_aes_128_full_doc():
return """
openssl_aes_128_full_doc
"""
def openssl_aes_128_pre_processing(key, iv):
if len(key) > 16:
raise ValueError(f"Too big key. Max len required: 16")
else:
crypto_tools.supl_to_mult(key, 16)
if len(iv) > 16:
raise ValueError(f"Too big initialization vector. Max len required: 16")
else:
crypto_tools.supl_to_mult(iv, 16)
def openssl_aes_128_processing(data, key, iv, mode, encrypt):
openssl_aes_128_pre_processing(key, iv)
crypto_native.openssl_api_init()
result = crypto_native.openssl_api_aes_128(data, key, iv, mode, encrypt)
return result
@crypto_tools.file_manipulation()
def openssl_aes_128(data):
if data.__class__ == str:
data = bytearray(data, "utf-8")
key = crypto_tools.cterm('input', 'Enter key(str): ', 'ans')
if key.__class__ == str:
key = bytearray(key.encode())
iv = crypto_tools.cterm('input', 'Enter initialization vector(str): ', 'ans')
if iv.__class__ == str:
iv = bytearray(iv.encode())
mode = crypto_tools.cterm(
'input',
'Enter mode(CBC(Cipher Block Chaining)|CFB(Cipher Feedback)): ',
'ans'
)
if mode not in ["CBC", "CFB"]:
raise ValueError(f"Incorrect mode: {mode}")
encrypt = crypto_tools.cterm('input',
'You want encrypt or decrypt: ', 'ans')
if encrypt not in ["decrypt", "encrypt"]:
raise ValueError("Incorrect type")
res_data = openssl_aes_128_processing(data, key, iv, mode, encrypt)
if encrypt == "encrypt":
result_str = res_data
else:
result_str = res_data.decode()
return result_str
openssl_aes_128.little_doc = openssl_aes_128_little_doc
openssl_aes_128.full_doc = openssl_aes_128_full_doc
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | algo/openssl_aes_128.py | dkushche/Crypto |
def indeterminados_posicion(*args):
# print(args)
for arg in args:
print(arg)
indeterminados_posicion(5,"hola a todos", [1,2,4])
def indeterminados_nombre(**kwargs):
# print(kwargs)
for kwarg in kwargs:
print("clave: {}, valor: {}".format(kwarg,kwargs[kwarg]))
indeterminados_nombre(n=5,c="hola a todos", l=[1,2,4])
def super_funcion(*args, **kwargs):
t = 0
for arg in args:
t += arg
print("Sumatorio indeterminado", t)
for kwarg in kwargs:
print("clave: {}, valor: {}".format(kwarg,kwargs[kwarg]))
super_funcion(10,50,-1,1.56, nombre="Hector",edad=22)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | curso_hector/08-funciones.py/parametros_indeterminados.py | corahama/python |
"""
Flake8 plugin to encourage correct string literal concatenation.
Forbid implicitly concatenated string literals on one line such as those
introduced by Black.
Forbid all explicitly concatenated strings, in favour of implicit concatenation.
"""
from __future__ import generator_stop
import ast
import tokenize
from typing import Iterable, List, Tuple
import attr
import more_itertools
__all__ = ["__version__", "Checker"]
__version__ = "0.1.0"
_ERROR = Tuple[int, int, str, None]
def _implicit(file_tokens: Iterable[tokenize.TokenInfo]) -> Iterable[_ERROR]:
return (
(
*a.end,
"ISC001 implicitly concatenated string literals on one line"
if a.end[0] == b.start[0]
else "ISC002 implicitly concatenated string literals "
"over continuation line",
None,
)
for (a, b) in more_itertools.pairwise(file_tokens)
if a.type == b.type == tokenize.STRING
)
def _explicit(root_node: ast.AST) -> Iterable[_ERROR]:
return (
(
node.lineno,
node.col_offset,
"ISC003 explicitly concatenated string should be implicitly concatenated",
None,
)
for node in ast.walk(root_node)
if isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and all(
isinstance(operand, (ast.Str, ast.Bytes, ast.JoinedStr))
for operand in [node.left, node.right]
)
)
@attr.s(frozen=True, auto_attribs=True)
class Checker:
name = __name__
version = __version__
tree: ast.AST
file_tokens: List[tokenize.TokenInfo]
def run(self) -> Iterable[_ERROR]:
yield from _implicit(self.file_tokens)
yield from _explicit(self.tree)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
... | 3 | flake8_implicit_str_concat.py | graingert/flake8-implicit-str-concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.