source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
import cv2
def SIFT(imgname1, imgname2):
sift = cv2.xfeatures2d.SIFT_create()
img1 = cv2.imread(imgname1)
img2 = cv2.imread(imgname2)
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.70*n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
return img3
def ORB(imgname1, imgname2):
orb = cv2.ORB_create()
img1 = cv2.imread(imgname1)
img2 = cv2.imread(imgname2)
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
good = []
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
return img3
if __name__ == '__main__':
imgname1 = '1.jpg'
imgname2 = '2.jpg'
img3 = SIFT(imgname1, imgname2)
img4 = ORB(imgname1, imgname2)
cv2.imshow("SIFT", img3)
cv2.imwrite("sift.jpg",img3)
cv2.imshow("ORB", img4)
cv2.imwrite("orb.jpg",img4)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | MultiMediaAnalyse/Task2/main.py | wsh-nie/Assignments |
# Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class CosiCommit(p.MessageType):
MESSAGE_WIRE_TYPE = 71
def __init__(
self,
address_n: List[int] = None,
data: bytes = None,
) -> None:
self.address_n = address_n if address_n is not None else []
self.data = data
@classmethod
def get_fields(cls) -> Dict:
return {
1: ('address_n', p.UVarintType, p.FLAG_REPEATED),
2: ('data', p.BytesType, 0),
}
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | monero_glue/messages/CosiCommit.py | ph4r05/monero-agent |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from pypinyin import phonetic_symbol
from pypinyin.constants import RE_TONE2
# 用于向后兼容,TODO: 废弃
from pypinyin.seg.simpleseg import simple_seg # noqa
def _replace_tone2_style_dict_to_default(string):
regex = re.compile(RE_TONE2.pattern.replace('$', ''))
d = phonetic_symbol.phonetic_symbol_reverse
def _replace(m):
s = m.group(0)
return d.get(s) or s
return regex.sub(_replace, string)
def _remove_dup_items(lst):
new_lst = []
for item in lst:
if item not in new_lst:
new_lst.append(item)
return new_lst
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | pypinyin/utils.py | jamessa/python-pinyin |
def cuda(x):
if isinstance(x, list):
return [xi.cuda() for xi in x]
elif isinstance(x, dict):
return {key: x[key].cuda() for key in x}
else:
return x.cuda()
def cpu(x):
if isinstance(x, list):
return [xi.cpu() for xi in x]
elif isinstance(x, dict):
return {key: x[key].cpu() for key in x}
else:
return x.cpu()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | ivory/torch/utils.py | daizutabi/ivory |
"""Barebones reader plugin example, using imageio.imread"""
from napari_plugin_engine import napari_hook_implementation
from imageio import formats, imread
readable_extensions = tuple(set(x for f in formats for x in f.extensions))
@napari_hook_implementation
def napari_get_reader(path):
"""A basic implementation of the napari_get_reader hook specification."""
# if we know we cannot read the file, we immediately return None.
if not path.endswith(readable_extensions):
return None
# otherwise we return the *function* that can read ``path``.
return reader_function
def reader_function(path):
"""Take a path and returns a list of LayerData tuples."""
data = imread(path)
# Readers are expected to return data as a list of tuples, where each tuple
# is (data, [meta_dict, [layer_type]])
return [(data,)]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | examples/reader_plugin.py | mkitti/napari |
def coding_problem_10():
"""
Implement a job scheduler which takes in a function f and an integer n, and calls f after n milliseconds.
Example:
>>> coding_problem_10()
Before
Hello from thread
After
"""
from threading import Thread
import time
def delayed_execution(f, ms):
time.sleep(ms)
return f()
def hello(name):
print(f'Hello {name}')
job = Thread(target=delayed_execution, args=(lambda: hello('from thread'), 0.01))
job.start()
print('Before')
time.sleep(0.02)
print('After')
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | problems/10/solution_10.py | r1cc4rdo/daily_coding_problem |
# -*- coding: utf-8 -*-
from django.db import models, migrations
from pybb.models import create_or_check_slug
def fill_slugs(apps, schema_editor):
Category = apps.get_model("pybb", "Category")
Forum = apps.get_model("pybb", "Forum")
Topic = apps.get_model("pybb", "Topic")
for category in Category.objects.all():
category.slug = create_or_check_slug(instance=category, model=Category)
category.save()
for forum in Forum.objects.all():
extra_filters = {"category": forum.category}
forum.slug = create_or_check_slug(instance=forum, model=Forum, **extra_filters)
forum.save()
for topic in Topic.objects.all():
extra_filters = {"forum": topic.forum}
topic.slug = create_or_check_slug(instance=topic, model=Topic, **extra_filters)
topic.save()
def clear_slugs(apps, schema_editor):
Category = apps.get_model("pybb", "Category")
Forum = apps.get_model("pybb", "Forum")
Topic = apps.get_model("pybb", "Topic")
Category.objects.all().update(slug="")
Forum.objects.all().update(slug="")
Topic.objects.all().update(slug="")
class Migration(migrations.Migration):
dependencies = [
("pybb", "0002_slugs_optional"),
]
operations = [
migrations.RunPython(fill_slugs, clear_slugs),
]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherit... | 3 | pybb/migrations/0003_slugs_fill.py | shubhanshu02/pybbm |
from openbiolink.graph_creation.file_processor.fileProcessor import FileProcessor
from openbiolink.graph_creation.metadata_infile import InMetaOntoUberonPartOf
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.graph_creation.types.readerType import ReaderType
class OntoUberonPartOfProcessor(FileProcessor):
IN_META_CLASS = InMetaOntoUberonPartOf
def __init__(self):
self.use_cols = self.IN_META_CLASS.USE_COLS
super().__init__(self.use_cols, readerType=ReaderType.READER_ONTO_UBERON,
infileType=InfileType.IN_ONTO_UBERON_PART_OF, mapping_sep=self.IN_META_CLASS.MAPPING_SEP)
def individual_postprocessing(self, data):
# bgee is only mapping on CL and UBERON terms
data = data[data['ID'].str.startswith('UBERON:') | data['ID'].str.startswith('CL:') ]
data = data[data['PART_OF'].str.startswith('UBERON:') | data['PART_OF'].str.startswith('CL:') ]
return data | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | src/openbiolink/graph_creation/file_processor/onto/ontoUberonPartOfProcessor.py | cthoyt/OpenBioLink |
import time
class StopWatch(object):
before = 0
after = 0
def start(self):
self.before = time.time() * 1000
def stop(self):
self.after = time.time() * 1000
def reset(self):
self.before = 0
def currentValue(self):
return (time.time() * 1000) - self.before
def storedValue(self):
return self.after - self.before
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | Outros/StopWatch.py | BlackDereker/Universidade |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2020 MBI-Division-B
# MIT License, refer to LICENSE file
# Author: Luca Barbera / Email: barbera@mbi-berlin.de
from tango import AttrWriteType, DevState, DebugIt, ErrorIt, InfoIt, DeviceProxy
from tango.server import Device, attribute, command, device_property
class EnvironmentAM2315MuxSensor(Device):
CtrlDevice = device_property(
dtype="str",
default_value="domain/family/memeber",
)
Channel = device_property(
dtype="int",
default_value=0,
)
temperature = attribute(label='Temperature',
access=AttrWriteType.READ,
dtype=float,
format='3.1f',
unit='C')
humidity = attribute(label='Humidity',
access=AttrWriteType.READ,
dtype=float,
format='3.1f',
unit='%')
def init_device(self):
Device.init_device(self)
self.set_state(DevState.INIT)
try:
self.ctrl = DeviceProxy(self.CtrlDevice)
self.info_stream("Connection established.")
self.set_state(DevState.ON)
except Exception:
self.error_stream('Connection could not be established.')
self.set_state(DevState.OFF)
self._temp = 0
self._humid = 0
def always_executed_hook(self):
try:
# _read_data measures both humidity and temperature
self._temp, self._humid = self.ctrl.read_data(self.Channel)
except Exception:
self.error_stream('Data could not be read')
def read_temperature(self):
return self._temp
def read_humidity(self):
return self._humid
if __name__ == "__main__":
EnvironmentAM2315MuxSensor.run_server()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | EnvironmentAM2315MuxSensor.py | MBI-Div-B/pytango-EnvironmentAM2315Mux |
# -*- coding: utf-8 -*-
from django import forms
from django.forms.widgets import RadioSelect
from .models import Rating
class FlagForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.object = kwargs.pop('object')
super().__init__(*args, **kwargs)
if not self.instance or self.instance.marked_flag == Rating.FLAG_NONE:
self.fields['marked_flag'].choices = Rating.FLAG_CHOICES[1:]
self.fields['marked_flag'].required = True
self.initial['marked_flag'] = Rating.FLAG_SPAM
if self.data.get(self.add_prefix('marked_flag')) == Rating.FLAG_OTHER:
self.fields['comment'].required = True
class Meta:
model = Rating
fields = ('marked_flag', 'comment')
widgets = {
'marked_flag': RadioSelect(),
}
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get('marked_flag') == Rating.FLAG_NONE:
cleaned_data['comment'] = ''
return cleaned_data
def save(self):
return Rating.objects.rate(
self.object,
self.user,
marked_flag=self.cleaned_data['marked_flag'],
comment=self.cleaned_data['comment']
)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | rating/forms.py | lesspointless/Shakal-NG |
import os.path
import sys
def get_test_file_path(file):
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
return os.path.join(pre_path, file)
def get_text_from_test_data(file):
file_path = get_test_file_path(file)
with open(file_path, mode='r', encoding="utf-8") as f:
return f.read()
def silence_stderr(f):
def new_f(*args, **kwargs):
real_stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
try:
return f(*args, **kwargs)
finally:
sys.stderr.close()
sys.stderr = real_stderr
return new_f | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | text_summary/project_folder/textrank/test/utils.py | pdmorale/Summarization |
"""
Author: Peratham Wiriyathammabhum
"""
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import scipy.sparse.linalg as linalg
def naivebayes(X):
"""
Perform spectral clustering on an input row matrix X.
mode \in {'affinity','neighborhood','gaussian'}
See: http://www.math.ucsd.edu/~fan/research/revised.html
http://www.math.ucsd.edu/~fan/research/cbms.pdf
"""
ni, nd = X.shape
L = laplacian_graph(X, mode='affinity', knn=knn, eta=eta, sigma=sigma)
vals, vecs = linalg.eigs(L, k=k, which='SR')
# ind = np.argsort(vals, axis=0)
# vals = vals[ind]
# vecs = vecs[:, ind]
mu = kmeans(vecs, k=k, thres=10**-5, max_iters=max_iters)
dist = ((vecs[:,None,:] - mu[None,:,:])**2).sum(axis=2)
cidx = np.argmin(dist, axis=1)
return mu, cidx
def tfidf():
return
def main(opts):
k = opts['k']
# load data
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(twenty_train.data)
# tf-idf
# clustering
_, cidx = spectral_clustering(X, mode=mode, k=k, knn=knn, eta=eta, sigma=sigma, max_iters=max_iters)
# plot
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='run naivebayes.')
parser.add_argument('--k', dest='k',
help='number of clusters',
default=2, type=int)
args = parser.parse_args()
opts = vars(args)
main(opts)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | text/naivebayes.py | perathambkk/ml-techniques |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
ANSIBLE_SSH_PORT = '2222'
def get_args():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host')
return parser.parse_args()
def wd_to_script_dir():
import os
path = os.path.abspath(__file__)
dir = os.path.dirname(path)
os.chdir(dir)
def terraform_output(key):
ret = os.popen('terraform output -json ' + key).read()
return json.loads(ret)
def main():
args = get_args()
wd_to_script_dir()
primary_managers = terraform_output('swarm-primary-managers')
secondary_managers = terraform_output('swarm-secondary-managers')
workers = terraform_output('swarm-workers')
ssh_public_key = terraform_output('ssh-public-key')
if args.list:
inventory = {
'swarm-primary-managers': list(primary_managers.keys()),
'swarm-secondary-managers': list(secondary_managers.keys()),
'swarm-workers': list(workers.keys())
}
print(json.dumps(inventory))
if args.host:
hosts = {**primary_managers, **secondary_managers, **workers}
print(json.dumps({
'ansible_host': hosts[args.host],
'ansible_port': ANSIBLE_SSH_PORT,
'ssh_public_key': ssh_public_key
}))
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | terraform/terraform-swarm-inventory.py | swipswaps/hcloud_swarm |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.settings_access_time_settings import SettingsAccessTimeSettings # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestSettingsAccessTimeSettings(unittest.TestCase):
"""SettingsAccessTimeSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSettingsAccessTimeSettings(self):
"""Test SettingsAccessTimeSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.settings_access_time_settings.SettingsAccessTimeSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | isi_sdk_8_1_1/test/test_settings_access_time_settings.py | mohitjain97/isilon_sdk_python |
from abc import abstractmethod
import rastervision as rv
from rastervision.core import (Config, ConfigBuilder)
class AugmentorConfig(Config):
def __init__(self, augmentor_type):
self.augmentor_type = augmentor_type
@abstractmethod
def create_augmentor(self):
"""Create the Augmentor that this configuration represents"""
pass
def to_builder(self, augmentor_type):
return rv._registry.get_config_builder(rv.AUGMENTOR,
self.augmentor_type)(self)
@staticmethod
def builder(augmentor_type):
return rv._registry.get_config_builder(rv.AUGMENTOR, augmentor_type)()
@staticmethod
def from_proto(msg):
"""Creates a AugmentorConfig from the specificed protobuf message
"""
return rv._registry.get_config_builder(rv.AUGMENTOR, msg.augmentor_type)() \
.from_proto(msg) \
.build()
def update_for_command(self, command_type, experiment_config, context=[]):
# Generally augmentors do not have an affect on the IO.
return (self, rv.core.CommandIODefinition())
class AugmentorConfigBuilder(ConfigBuilder):
pass
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherit... | 3 | rastervision/augmentor/augmentor_config.py | Yochengliu/raster-vision |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Pieter Huycke
email: pieter.huycke@ugent.be
GitHub: phuycke
"""
#%%
def message_check(message = str):
if len(message) < 160:
return message
else:
return message[:160]
longer = message_check("In certain chat programs or messaging applications, there is a limit on the number of characters that you can send in a message. Write a function that takes as input the message (a string) and checks whether the number of characters is less than 160 (or not). If the length of the message is less than 160, the message should be returned. If the length of the message is greater than 160, a string consisting of only the first 160 characters should be returned.")
print(longer)
shorter = message_check("In certain chat programs, there is a limit on the number of characters that you can send in a message.")
print(shorter)
#%%
def first_20_words(message = str):
words = message.split(" ")
if len(words) > 20:
raise ValueError("\nError\nThe message can only contain 20 words.")
else:
print("\nMessage passed:\n{}".format(message))
first_20_words("There is a limit on the number of characters that you can send in a message.")
first_20_words(longer)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | Part 3/Chapter 06/Exercises/exercise_15.py | phuycke/Practice-of-computing-using-Python |
from tir import Webapp
import unittest
class GFEA069(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGFE", "29/12/2020", "T1", "D MG 01", "78")
inst.oHelper.Program("GFEA069")
def test_GFEA069_CT001(self):
self.oHelper.SetButton('Importar Tarifas')
self.oHelper.SetButton('Cancelar')
self.oHelper.SetButton('Outras Ações','Modelo importação')
self.oHelper.SetButton('OK')
self.oHelper.SetButton('Cancelar')
self.oHelper.SetButton('Outras Ações','Imprimir Browse')
self.oHelper.SetButton('Imprimir')
self.oHelper.SetButton('Sair')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | Protheus_WebApp/Modules/SIGAGFE/GFEA069TESTCASE.py | 98llm/tir-script-samples |
# Last Updated: 2.2
from datetime import datetime
from util.diagMessage import DiagMessage
# Logger class
# Buffers and writes messages to a file
class Logger:
BUFFER_MAX = 10
DEFAULT_FN = "../log.txt"
# Constructor for logger class
# Params: fn - file name to use or leave default
# log - flag to keep a log file or not
# Return: Logger instance
def __init__(self, fn = DEFAULT_FN, log = True):
#{{{
self.keep_log = log
self.fn = fn
self.log_buffer = []
if self.keep_log:
self.log(DiagMessage("LOG0000I"))
#}}}
# Append line to internal log buffer, flush if needed
# Params: diag - DiagMessage to log
# flush - bool flag for flushing buffer early
# Return: None
def log(self, diag, flush=False):
#{{{
if self.keep_log:
self.log_buffer.append(str(datetime.now()) + " - " + diag.msg)
if len(self.log_buffer) >= self.BUFFER_MAX or flush:
self._write()
elif not flush:
print(diag.msg)
#}}}
# Write contents of buffer out to file
# Params: None
# Return: None
def _write(self):
#{{{
print("Writing log...") if debug else None
with open(self.fn,'a') as logfile:
for line in self.log_buffer:
try:
logfile.write(line)
except TypeError:
logfile.write(str(datetime.now())+" - LOG ERR")
except UnicodeEncodeError:
logfile.write(str(line.encode("utf-8","replace")))
logfile.write("\n")
del self.log_buffer[:]
#}}}
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | duckbot/util/logger.py | NicholasMarasco/duckbot |
import os.path
import os
import shutil
from dbt.task.base_task import BaseTask
class CleanTask(BaseTask):
def __is_project_path(self, path):
proj_path = os.path.abspath('.')
return not os.path.commonprefix(
[proj_path, os.path.abspath(path)]
) == proj_path
def __is_protected_path(self, path):
abs_path = os.path.abspath(path)
protected_paths = self.config.source_paths + \
self.config.test_paths + ['.']
protected_abs_paths = [os.path.abspath for p in protected_paths]
return abs_path in set(protected_abs_paths) or \
self.__is_project_path(abs_path)
def run(self):
for path in self.config.clean_targets:
if not self.__is_protected_path(path):
shutil.rmtree(path, True)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | dbt/task/clean.py | cwkrebs/dbt |
import hashlib
import pytest
from dagster import String
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.config_schema import dagster_type_loader
def test_dagster_type_loader_one():
@dagster_type_loader(String)
def _foo(_, hello):
return hello
def test_dagster_type_loader_missing_context():
with pytest.raises(DagsterInvalidDefinitionError):
@dagster_type_loader(String)
def _foo(hello):
return hello
def test_dagster_type_loader_missing_variable():
with pytest.raises(DagsterInvalidDefinitionError):
@dagster_type_loader(String)
def _foo(_):
return 1
def test_dagster_type_loader_default_version():
@dagster_type_loader(String)
def _foo(_, hello):
return hello
assert _foo.loader_version == None
assert _foo.compute_loaded_input_version({}) == None
def test_dagster_type_loader_provided_version():
def _get_ext_version(dict_param):
return dict_param["version"]
@dagster_type_loader(String, loader_version="5", external_version_fn=_get_ext_version)
def _foo(_, hello):
return hello
dict_param = {"version": "42"}
assert _foo.loader_version == "5"
assert _foo.compute_loaded_input_version(dict_param) == hashlib.sha1(b"542").hexdigest()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | python_modules/dagster/dagster_tests/core_tests/runtime_types_tests/config_schema_tests/test_config_schema.py | dbatten5/dagster |
from copy import deepcopy
from typing import Type
from pydfs_lineup_optimizer.constants import PlayerRank
from pydfs_lineup_optimizer.sites.fanduel.classic.importer import FanDuelCSVImporter
def build_fanduel_single_game_importer(mvp=True, star=False, pro=False) -> Type[FanDuelCSVImporter]:
class FanDuelSingleGameCSVImporter(FanDuelCSVImporter): # pragma: nocover
def import_players(self):
players = super().import_players()
extra_players = []
for player in players:
if mvp:
mvp_player = deepcopy(player)
mvp_player.fppg *= 2
mvp_player.rank = PlayerRank.MVP
extra_players.append(mvp_player)
if star:
star_player = deepcopy(player)
star_player.fppg *= 1.5
star_player.rank = PlayerRank.STAR
extra_players.append(star_player)
if pro:
pro_player = deepcopy(player)
pro_player.fppg *= 1.2
pro_player.rank = PlayerRank.PRO
extra_players.append(pro_player)
players.extend(extra_players)
return players
return FanDuelSingleGameCSVImporter
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | pydfs_lineup_optimizer/sites/fanduel/single_game/importer.py | BenikaH/pydfs-lineup-optimizer |
import pytest
from MidiCompose.logic.rhythm.beat import Beat
from MidiCompose.logic.rhythm.measure import Measure
from MidiCompose.logic.rhythm.part import Part
@pytest.fixture
def part_1():
m1 = Measure([Beat([1,2,1,2]),
Beat([1,0,0,1])])
m2 = Measure([Beat([2,2,1,1]),
Beat([2,2,2,2])])
part = Part([m1,m2])
return part
def test_empty_constructor():
p = Part()
assert p.n_measures == 1
assert p.n_beats == 1
assert p.n_note_on == 0
def test_n_note_on(part_1):
assert part_1.n_note_on == 6
def test_iterator(part_1):
for m in part_1:
assert type(m) == Measure
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | tests/test_logic/test_rhythm/test_Part.py | aParthemer/MidiCompose |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch05_datastructures.solutions.ex11_list_add import list_add_improved, list_add_with_iter, list_add, \
list_add_inverse
@pytest.mark.parametrize("values1, values2, expected",
[([1, 2, 3], [4, 5, 6], [5, 7, 9]),
([9, 2, 7], [1, 3, 5], [1, 0, 6, 2])])
def test_list_add(values1, values2, expected):
result = list_add(values1, values2)
assert expected == result
@pytest.mark.parametrize("values1, values2, expected",
[([1, 2, 3], [4, 5, 6], [5, 7, 9]),
([9, 2, 7], [1, 3, 5], [1, 0, 6, 2])])
def test_list_add_with_iter(values1, values2, expected):
result = list_add_with_iter(values1, values2)
assert expected == result
@pytest.mark.parametrize("values1, values2, expected",
[([1, 2, 3], [4, 5, 6], [5, 7, 9]),
([9, 2, 7], [1, 3, 5], [1, 0, 6, 2])])
def test_list_add_improved(values1, values2, expected):
result = list_add_improved(values1, values2)
assert expected == result
@pytest.mark.parametrize("values1, values2, expected",
[([3, 2, 1], [6, 5, 4], [9, 7, 5]),
([7, 2, 9], [5, 3, 1], [2, 6, 0, 1])])
def test_list_add_inverse(values1, values2, expected):
result = list_add_inverse(values1, values2)
assert expected == result
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch05_datastructures/ex11_list_add_test.py | Kreijeck/learning |
from flask import Flask, Response
from camera import Camera
import cv2
app = Flask(__name__)
camera = Camera().start()
def gen(camera):
while True:
frame = camera.read()
_, jpeg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n')
@app.route('/stream')
def stream():
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, use_reloader=False)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | chapter4/main.py | takapat/training-kit-3 |
import sympy as sp
from dataclasses import dataclass
import numpy.typing as npt
from typing import Optional
@dataclass
class Metric:
"""Generic Metric class used to represent Metrics in General Relativity"""
components: npt.ArrayLike
symbols: Optional[sp.symbols] = sp.symbols("t x y z")
def __abs__(self):
pass
def __str__(self):
return self.components, self.symbols
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | src/PyOGRe/Metric.py | JaredWogan/PyOGRe |
import asyncio
from aiotasks import build_manager
loop = asyncio.get_event_loop()
loop.set_debug(True)
manager = build_manager(loop=loop)
@manager.task()
async def task_01(num):
print("Task 01 starting: {}".format(num))
await asyncio.sleep(2, loop=loop)
print("Task 01 stopping")
return "a"
async def main_async():
manager.run()
async with task_01.wait(1) as f:
print(f)
await manager.wait(5)
manager.stop()
if __name__ == '__main__':
loop.run_until_complete(main_async())
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | examples_old/run_executable_redis/basic_wait.py | shepilov-vladislav/aiotasks |
import torch.nn as nn
import torch
class ProtoNetBig(nn.Module):
def __init__(self, x_dim=23433, hid_dim=[2000, 1000, 500, 250], z_dim=100):
super(ProtoNetBig, self).__init__()
self.linear0 = nn.Linear(x_dim, hid_dim[0])
self.bn1 = nn.BatchNorm1d(hid_dim[0])
self.linear1 = nn.Linear(hid_dim[0], hid_dim[1])
self.bn2 = nn.BatchNorm1d(hid_dim[1])
self.linear2 = nn.Linear(hid_dim[1] + hid_dim[0], hid_dim[2])
self.bn3 = nn.BatchNorm1d(hid_dim[2])
self.linear3 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2], hid_dim[3])
self.bn4 = nn.BatchNorm1d(hid_dim[3])
self.linear4 = nn.Linear(hid_dim[1] + hid_dim[0] + hid_dim[2] + hid_dim[3], z_dim)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(inplace=True)
def forward(self, x):
out = self.dropout(self.bn1(self.relu(self.linear0(x))))
out1 = self.dropout(self.bn2(self.relu(self.linear1(out))))
out2 = torch.cat([out, out1], 1)
out3 = self.dropout(self.bn3(self.relu(self.linear2(out2))))
out4 = torch.cat([out, out1, out3], 1)
out5 = self.dropout(self.bn4(self.relu(self.linear3(out4))))
out6 = torch.cat([out, out1, out3, out5], 1)
out7 = self.linear4(out6)
return out7 | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | model/protonet.py | ashblib/protocell |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.coupon_percent_off_retail_price_items import CouponPercentOffRetailPriceItems
class TestCouponPercentOffRetailPriceItems(unittest.TestCase):
""" CouponPercentOffRetailPriceItems unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCouponPercentOffRetailPriceItems(self):
"""
Test CouponPercentOffRetailPriceItems
"""
# FIXME: construct object with mandatory attributes with example values
#model = ultracart.models.coupon_percent_off_retail_price_items.CouponPercentOffRetailPriceItems()
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | test/test_coupon_percent_off_retail_price_items.py | gstingy/uc_python_api |
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
import copy
from typing import Any
import torch
from torch_mlir_e2e_test.torchscript.framework import TestConfig, Trace, TraceItem
class TorchScriptTestConfig(TestConfig):
"""TestConfig that runs the torch.nn.Module through TorchScript"""
def __init__(self):
super().__init__()
def compile(self, program: torch.nn.Module) -> torch.jit.ScriptModule:
return torch.jit.script(program)
def run(self, artifact: torch.jit.ScriptModule, trace: Trace) -> Trace:
# TODO: Deepcopy the torch.jit.ScriptModule, so that if the program is
# stateful then it does not mutate the original compiled program.
result: Trace = []
for item in trace:
attr = artifact
for part in item.symbol.split('.'):
attr = getattr(attr, part)
output = attr(*item.inputs)
result.append(
TraceItem(symbol=item.symbol,
inputs=item.inputs,
output=output))
return result
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | python/torch_mlir_e2e_test/torchscript/configs/torchscript.py | sogartar/torch-mlir |
from django.shortcuts import render
from django.http import HttpResponse
from .forms import CarForm
from .models import NewCar
# Creates form
def index(request):
form = CarForm() #asign model and form to a variable
NewCarModel = NewCar.objects.all()
if request.method == "POST":
form = CarForm(request.POST)
if (form.is_valid()):
NewCar.objects.create(make= request.POST['make'],model=request.POST['model'],
year=request.POST['year'],mpg=request.POST['mpg'])
#this adds the car and makes it an actual model
return render(request,'carApp/congratulations.html',)
#this renders the congratulations page
else:
context= {
"form":CarForm,
'model': NewCar, #this adds errors for rendering the page
'errors':CarForm.errors
}
return render(request,'carApp/index.html',context)
# access to the congratulations.html
def congrats(request):
return render(request,'carApp/congrats.html',) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | car/carApp/views.py | cs-fullstack-2019-spring/django-validation-cw-PorcheWooten |
import argparse
import logging
import os
from faim_robocopy.starter import run_robocopy_gui
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%d.%m.%Y %H:%M:%S')
def parse():
'''parse command line arguments.
'''
parser = argparse.ArgumentParser('FAIM-Robocopy')
parser.add_argument('--debug', help='enable debug log messages',
default=False, action='store_true')
args = parser.parse_args()
return vars(args)
def main():
'''run FAIM-robocopy.
Checks for latest version of FAIM-robocopy and updates/restarts if
necessary.
'''
try:
run_robocopy_gui(**parse())
except Exception as err:
logging.getLogger(__name__).error('Unexpected error: %s', str(err), exc_info=True)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | FAIM-robocopy.pyw | fmi-basel/python-scripts |
from jd.api.base import RestApi
class KplOpenSkuQueryinfoRequest(RestApi):
def __init__(self,domain='gw.api.360buy.com',port=80):
RestApi.__init__(self,domain, port)
self.skuQuery = None
def getapiname(self):
return 'jd.kpl.open.sku.queryinfo'
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | jd/api/rest/KplOpenSkuQueryinfoRequest.py | fengjinqi/linjuanbang |
# Copyright (c) 2009-2014 Upi Tamminen <desaster@gmail.com>
# See the COPYRIGHT file for more information
from __future__ import annotations
from twisted.conch.ssh.common import getNS
from cowrie.ssh import userauth
# object is added for Python 2.7 compatibility (#1198) - as is super with args
class ProxySSHAuthServer(userauth.HoneyPotSSHUserAuthServer):
def __init__(self):
super().__init__()
self.triedPassword = None
def auth_password(self, packet):
"""
Overridden to get password
"""
self.triedPassword = getNS(packet[1:])[0]
return super().auth_password(packet)
def _cbFinishedAuth(self, result):
"""
We only want to return a success to the user, no service needs to be set.
Those will be proxied back to the backend.
"""
self.transport.sendPacket(52, b"")
self.transport.frontendAuthenticated = True
# TODO store this somewhere else, and do not call from here
if self.transport.sshParse.client:
self.transport.sshParse.client.authenticateBackend(
self.user, self.triedPassword
)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | src/cowrie/ssh_proxy/userauth.py | uwacyber/cowrie |
"""
Official page for Nigeria COVID figures:
https://covid19.ncdc.gov.ng/
"""
import logging
import os
import re
from bs4 import BeautifulSoup
import requests
from .country_scraper import CountryScraper
logger = logging.getLogger(__name__)
class Nga(CountryScraper):
def fetch(self):
url = 'https://covid19.ncdc.gov.ng/'
response = requests.get(url)
saved_file = self.save_to_raw_cache(response.text, 'html')
return saved_file
def extract(self, source_file):
scrape_date = self.runtimestamp
with open(source_file) as fh:
soup = BeautifulSoup(fh.read(), 'html.parser')
headers = [h.text for h in soup.table.thead.find_all('th')]
headers.extend(['date', 'scrape_date'])
data = []
tbody_rows = soup.table.tbody.find_all('tr')
for tr in tbody_rows:
cells = [
cell.text.strip().replace(',','')
for cell in tr.find_all('td')
]
cells.extend(['', scrape_date])
data.append(cells)
outfile = self.processed_filepath_from_raw(source_file, 'csv')
merged_data = [headers]
merged_data.extend(data)
self.write_csv(merged_data, outfile)
return outfile
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | covid_world_scraper/nga.py | biglocalnews/covid-world-scraper |
import sys
from pathlib import Path
def mainwin32():
if len(sys.argv) < 2:
print(f'to use run: python set_activate_alias.py $profile')
return
profile = sys.argv[1]
profile = Path(profile)
# makr parent directory if not exist
if not profile.parent.exists():
profile.parent.mkdir(parents=True)
# make file if not exist
if not profile.exists():
with open(profile, "a") as f:
f.write("")
with open(profile, 'r') as f:
txt = f.read()
insert = r"Set-Alias -Name activate -Value .\venv\Scripts\activate"
if txt.find(insert) != -1:
print(f'alias already set in "{profile}".')
return
# write to file
with open(profile, "a") as f:
f.write(insert + "\n")
def main():
if sys.platform == "win32":
mainwin32()
else:
print("plafrom not supported")
if __name__ == "__main__":
main() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | rapidenv/misc/set_activate_alias.py | innoviz-sw-infra/rapid-env |
#!/usr/bin/env python
"""
Example of ttree usage to generate recursive tree of directories.
It could be useful to implement Directory Tree data structure
2016 samuelsh
"""
import ttree
import random
from hashlib import blake2b
from string import digits, ascii_letters
MAX_FILES_PER_DIR = 10
def get_random_string(length):
return ''.join(random.choice(digits + ascii_letters) for _ in range(length))
def build_recursive_tree(tree, base, depth, width):
"""
Args:
tree: Tree
base: Node
depth: int
width: int
Returns:
"""
while depth >= 0:
depth -= 1
for _ in range(width):
directory = Directory()
tree.create_node(
directory.name,
# node id is md5 hash of it's name
blake2b(directory.name.encode()).hexdigest(),
parent=base.id,
data=directory
)
dirs_nodes = tree.children(base.id)
for dir in dirs_nodes:
newbase = tree.get(dir.id)
build_recursive_tree(tree, newbase, depth, width)
class Directory:
def __init__(self):
self._name = get_random_string(64)
# Each directory contains 1000 files
self._files = [File() for _ in range(MAX_FILES_PER_DIR)]
@property
def name(self):
return self._name
@property
def files(self):
return self._files
class File:
def __init__(self):
self._name = get_random_string(64)
@property
def name(self):
return self._name
tree = ttree.Tree()
base = tree.create_node('Root', 'root')
build_recursive_tree(tree, base, 2, 10)
tree.print()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?"... | 3 | examples/recursive_dirtree_generator.py | vovanbo/treelib |
"""empty message
Revision ID: 2cc127c4ea12
Revises:
Create Date: 2020-05-12 20:18:08.387848
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2cc127c4ea12'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.String(length=120), nullable=False),
sa.Column('image_file', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=False),
sa.Column('date_posted', sa.DateTime(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('users')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | migrations/versions/2cc127c4ea12_.py | Atienodolphine01/Flaskblog |
'''
Contains PidFd() class
Reads the /proc/<pid>/fd directory and resolves symbolic links
NB requires sudo
'''
from contextlib import suppress
from logging import getLogger
from os import listdir, readlink, path as ospath
from .readfile import ReadFile
LOGGER = getLogger(__name__)
class PidFd(ReadFile):
'''
PidFd handling
'''
FILENAME = ospath.join('proc', '%s', 'fd')
KEY = 'pidfd'
def read(self):
'''
The <pid>/fd is a directory
so we make the dictionary here in case the pid goes away
'''
LOGGER.debug("Read")
ret = {}
with suppress(FileNotFoundError):
for line in listdir(self.filename):
if line:
try:
ret[int(line)] = readlink(
ospath.join(self.filename, line)
)
except OSError:
pass
self.data = ret
def normalize(self):
'''
Returns data
'''
LOGGER.debug("Normalize")
return self.data.copy()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | lnxproc/pidfd.py | eccles/lnxproc |
import pytest
from unittest.mock import MagicMock
from django_ontruck.notifiers import AsyncNotifier, Notifier
from ..test_app.notifiers import DummySegmentNotifier, DummySegmentWithIdentityNotifier
@pytest.fixture
def mock_user():
user = MagicMock()
user.uuid = 'uuid'
return user
def test_segment_notifier(mocker, mock_user):
notifier = DummySegmentNotifier(user=mock_user, delayed=False)
assert isinstance(notifier, Notifier)
notifier.send()
def test_segment_async_notifier(mocker, mock_user):
notifier = DummySegmentNotifier(user=mock_user)
assert isinstance(notifier, AsyncNotifier)
notifier.send()
def test_segment_with_identity_notifier(mocker, mock_user):
notifier = DummySegmentWithIdentityNotifier(user=mock_user, delayed=False)
assert isinstance(notifier, Notifier)
notifier.send()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | tests/notifiers/test_segment_notifier.py | ontruck/django-ontruck |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetThumbnailsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudPhoto', '2017-07-11', 'GetThumbnails','cloudphoto')
self.set_protocol_type('https');
def get_LibraryId(self):
return self.get_query_params().get('LibraryId')
def set_LibraryId(self,LibraryId):
self.add_query_param('LibraryId',LibraryId)
def get_PhotoIds(self):
return self.get_query_params().get('PhotoIds')
def set_PhotoIds(self,PhotoIds):
for i in range(len(PhotoIds)):
if PhotoIds[i] is not None:
self.add_query_param('PhotoId.' + str(i + 1) , PhotoIds[i]);
def get_StoreName(self):
return self.get_query_params().get('StoreName')
def set_StoreName(self,StoreName):
self.add_query_param('StoreName',StoreName)
def get_ZoomType(self):
return self.get_query_params().get('ZoomType')
def set_ZoomType(self,ZoomType):
self.add_query_param('ZoomType',ZoomType) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | aliyun-python-sdk-cloudphoto/aliyunsdkcloudphoto/request/v20170711/GetThumbnailsRequest.py | xiaozhao1/aliyun-openapi-python-sdk |
from locust import HttpLocust, TaskSet, between
import json
def login(l):
headers = {'content-type': 'application/vnd.api+json'}
l.client.post(
"/api/v1/realms/PLAYGROUND/auth-session",
data=json.dumps({
"data": {
"type":"auth-session",
"attributes":{
"username":"test@test.com.br",
"password":"1234567a"
}
}
}),
headers=headers
)
class UserBehavior(TaskSet):
tasks = {login: 1}
def on_start(self):
login(self)
def on_stop(self):
pass
class WebsiteUser(HttpLocust):
task_set = UserBehavior
wait_time = between(5.0, 9.0)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | tests/locust.py | geru-br/keyloop |
# Matthieu Brucher
# Last Change : 2007-08-26 19:43
import numpy
class BacktrackingSearch(object):
"""
The backtracking algorithm for enforcing Armijo rule
"""
def __init__(self, rho = 0.1, alpha_step = 1., alpha_factor = 0.5, **kwargs):
"""
Can have :
- a coefficient for the Armijo rule (rho = 0.1)
- an alpha factor to modulate the step (alpha_step = 1.)
- an alpha factor < 1 that will decrease the step size until the rule is valid (alpha_factor = 0.5)
"""
self.rho = rho
self.stepSize = alpha_step
self.stepFactor = alpha_factor
def __call__(self, origin, function, state, **kwargs):
"""
Tries to find an acceptable candidate
"""
direction = state['direction']
if 'initial_alpha_step' in state:
alpha = state['initial_alpha_step']
else:
alpha = self.stepSize
f1temp = function(origin)
gradient = state['gradient']
while(True):
ftemp = function(origin + alpha * direction)
#Armijo rule
if ftemp <= f1temp + self.rho * alpha * numpy.dot(gradient, direction):
state['alpha_step'] = alpha
return origin + alpha * direction
alpha = alpha * self.stepFactor
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | PyDSTool/Toolbox/optimizers/line_search/backtracking_search.py | yuanz271/PyDSTool |
from gilded_rose import GildedRose, Sulfuras
def test_update_sell_in():
sulfuras_item = Sulfuras("Sulfuras, Hand of Ragnaros", 2, 10)
items = [sulfuras_item]
GildedRose(items).update_quality()
assert sulfuras_item.sell_in == 2
def test_update_quality():
sulfuras_item = Sulfuras("Sulfuras, Hand of Ragnaros", 2, 80)
items = [sulfuras_item]
GildedRose(items).update_quality()
assert sulfuras_item.quality == 80
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | ddd_driven/test_sulfuras.py | Neppord/bdd-ddd-gilded-rose |
from unittest import TestCase
from unittest.mock import patch
with patch('serial.Serial'):
from controls.valloxcontrol import ValloxControl
from valloxserial import vallox_serial
class TestValloxControl(TestCase):
@patch('serial.Serial')
def setUp(self, _):
self.vc = ValloxControl()
@patch.object(vallox_serial, 'set_speed')
def test_control_same_speed(self, set_speed):
self.vc.state = 1
self.vc.control(1)
self.assertFalse(set_speed.called)
@patch.object(vallox_serial, 'set_speed')
def test_control_change_speed(self, set_speed):
self.vc.state = 1
self.vc.control(2)
set_speed.assert_called_with(2)
def test_control_data(self):
data = self.vc.serial.get_control_data('host', 'speed', 2)
expected = b'\x01\x22\x11\x29\x03'
checksum = 0
for byte in expected:
checksum += byte
checksum = checksum & 0xff
expected += bytes((checksum,))
self.assertEqual(expected, data)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exclud... | 3 | tests/test_controls/test_valloxcontrol.py | jussike/kuappi |
# encoding: utf-8
import logging
from marrow.mailer import Mailer
class MailHandler(logging.Handler):
"""A class which sends records out via e-mail.
This handler should be configured using the same configuration
directives that Marrow Mailer itself understands.
Be careful how many notifications get sent.
It is suggested to use background delivery using the 'dynamic' manager.
"""
def __init__(self, *args, **config):
"""Initialize the instance, optionally configuring TurboMail itself.
If no additional arguments are supplied to the handler, re-use any
existing running TurboMail configuration.
To get around limitations of the INI parser, you can pass in a tuple
of name, value pairs to populate the dictionary. (Use `{}` dict
notation in production, though.)
"""
logging.Handler.__init__(self)
self.config = dict()
if args:
config.update(dict(zip(*[iter(args)]*2)))
self.mailer = Mailer(config).start()
# If we get a configuration that doesn't explicitly start TurboMail
# we use the configuration to populate the Message instance.
self.config = config
def emit(self, record):
"""Emit a record."""
try:
self.mailer.new(plain=self.format(record)).send()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
logging.MailHandler = MailHandler
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | marrow/mailer/logger.py | digiturtle/mailer |
# -*- encoding: utf-8 -*-
"""
Flask Boilerplate
Author: AppSeed.us - App Generator
"""
from flask import json
from app import app, db
from .common import *
# build a Json response
def response(data):
return app.response_class(response=json.dumps(data),
status=200,
mimetype='application/json')
def g_db_commit():
db.session.commit()
def g_db_add(obj):
if obj:
db.session.add(obj)
def g_db_del(obj):
if obj:
db.session.delete(obj)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | app/util.py | mkhumtai/6CCS3PRJ |
import matplotlib.pyplot as plt, streamlit as st
from typing import Iterable, Union
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc, RocCurveDisplay
def train(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Train custom classifier model.
Parameters:
estimator: Unfitted estimator.
X: Input training data.
y: Labels for test data.
Returns:
Fitted estimator model.
"""
return estimator.fit(X=X, y=y)
def classify(estimator: object, X: Iterable[Union[int, float]]):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
Returns:
Predicted labels.
"""
return estimator.predict(X=X)
def regress(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom regressor model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pass
def evaluate(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pred = estimator.predict(X=X)
# classification report
report = classification_report(y_true=y, y_pred=pred)
st.write('Classification Report')
st.write(report)
# ROC curve
fpr, tpr, thresholds = roc_curve(y, pred)
roc_auc = auc(fpr, tpr)
_, _, figure = RocCurveDisplay(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
estimator_name=type(estimator)
)
st.pyplot(fig=figure)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": tru... | 3 | ml.py | Fennec2000GH/Poly-Finance |
from utils import create_newfig, create_moving_polygon, create_still_polygon, run_or_export
func_code = 'aq'
func_name = 'test_one_moving_one_stationary_distlimit_touch_at_limit'
def setup_fig01():
fig, ax, renderer = create_newfig('{}01'.format(func_code), ylim=(-1, 7))
create_moving_polygon(fig, ax, renderer, ((0, 0), (0, 1), (1, 1), (1, 0)), (4, 3), 'none')
create_still_polygon(fig, ax, renderer, ((3, 5, 'topleft'), (4, 5), (4, 4), (3, 4)), 'none')
return fig, ax, '{}01_{}'.format(func_code, func_name)
def setup_fig02():
fig, ax, renderer = create_newfig('{}02'.format(func_code), xlim=(-1, 8), ylim=(-1, 7))
create_moving_polygon(fig, ax, renderer, ((4, 4), (5, 6), (4, 3)), (2, -1.5), 'topright')
create_still_polygon(fig, ax, renderer, ((1, 3), (2, 3.5), (7, 1), (6, 0)), 'top')
return fig, ax, '{}02_{}'.format(func_code, func_name)
def setup_fig03():
fig, ax, renderer = create_newfig('{}03'.format(func_code))
create_moving_polygon(fig, ax, renderer, ((6, 3), (6, 2), (5, 1), (4, 3)), (-3, 0), 'topright')
create_still_polygon(fig, ax, renderer, ((0, 3, 'none'), (1, 3), (2, 1), (0, 1, 'none')))
return fig, ax, '{}03_{}'.format(func_code, func_name)
def setup_fig04():
fig, ax, renderer = create_newfig('{}04'.format(func_code))
create_moving_polygon(fig, ax, renderer, ((5, 0, 'none'), (6, 1), (2, 1)), (0, 2), 'topright')
create_still_polygon(fig, ax, renderer, ((3, 4, 'top'), (4, 4), (4, 3), (3, 3)), 'none')
return fig, ax, '{}04_{}'.format(func_code, func_name)
run_or_export(setup_fig01, setup_fig02, setup_fig03, setup_fig04) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | imgs/test_geometry/test_extrapolated_intersection/aq_test_one_moving_one_stationary_distlimit_touch_at_limit.py | devilcry11/pygorithm |
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from pyfr.backends.base.kernels import BaseKernelProvider, MPIKernel
class BasePackingKernels(BaseKernelProvider, metaclass=ABCMeta):
def _sendrecv(self, mv, mpipreqfn, pid, tag):
# If we are an exchange view then extract the exchange matrix
if isinstance(mv, self.backend.xchg_view_cls):
xchgmat = mv.xchgmat
else:
xchgmat = mv
# Create a persistent MPI request to send/recv the matrix
preq = mpipreqfn(xchgmat.hdata, pid, tag)
class SendRecvPackKernel(MPIKernel):
def run(self, queue):
# Start the request and append us to the list of requests
preq.Start()
queue.mpi_reqs.append(preq)
return SendRecvPackKernel()
@abstractmethod
def pack(self, mv):
pass
def send_pack(self, mv, pid, tag):
from mpi4py import MPI
return self._sendrecv(mv, MPI.COMM_WORLD.Send_init, pid, tag)
def recv_pack(self, mv, pid, tag):
from mpi4py import MPI
return self._sendrecv(mv, MPI.COMM_WORLD.Recv_init, pid, tag)
@abstractmethod
def unpack(self, mv):
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | pyfr/backends/base/packing.py | tjcorona/PyFR |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ccroot,ar,gxx
from waflib.Configure import conf
@conf
def find_icpc(conf):
cxx=conf.find_program('icpc',var='CXX')
conf.get_cc_version(cxx,icc=True)
conf.env.CXX_NAME='icc'
def configure(conf):
conf.find_icpc()
conf.find_ar()
conf.gxx_common_flags()
conf.gxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | src/third_party/angle/third_party/glmark2/src/waflib/Tools/icpc.py | goochen/naiveproxy |
"""
Unit tests for the sas_gen
"""
import os.path
import warnings
warnings.simplefilter("ignore")
import unittest
import numpy as np
from sas.sascalc.calculator import sas_gen
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class sas_gen_test(unittest.TestCase):
def setUp(self):
self.sldloader = sas_gen.SLDReader()
self.pdbloader = sas_gen.PDBReader()
self.omfloader = sas_gen.OMFReader()
def test_sldreader(self):
"""
Test .sld file loaded
"""
f = self.sldloader.read(find("sld_file.sld"))
self.assertEqual(f.pos_x[0], -40.5)
self.assertEqual(f.pos_y[0], -13.5)
self.assertEqual(f.pos_z[0], -13.5)
def test_pdbreader(self):
"""
Test .pdb file loaded
"""
f = self.pdbloader.read(find("c60.pdb"))
self.assertEqual(f.pos_x[0], -0.733)
self.assertEqual(f.pos_y[0], -1.008)
self.assertEqual(f.pos_z[0], 3.326)
def test_omfreader(self):
"""
Test .omf file loaded
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
output = sas_gen.OMF2SLD()
output.set_data(f)
self.assertEqual(f.mx[0], 0)
self.assertEqual(f.my[0], 0)
self.assertEqual(f.mz[0], 0)
self.assertEqual(output.pos_x[0], 0.0)
self.assertEqual(output.pos_y[0], 0.0)
self.assertEqual(output.pos_z[0], 0.0)
def test_calculator(self):
"""
Test that the calculator calculates.
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
omf2sld = sas_gen.OMF2SLD()
omf2sld.set_data(f)
model = sas_gen.GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
model.runXY([x, x])
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | test/sascalculator/utest_sas_gen.py | llimeht/sasview |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
from sm_client.common import base
class SmcNode(base.Resource):
def __repr__(self):
return "<SmcNode %s>" % self._info
class SmcNodeManager(base.Manager):
resource_class = SmcNode
@staticmethod
def _path(id=None):
return '/v1/nodes/%s' % id if id else '/v1/nodes'
def list(self):
return self._list(self._path(), "nodes")
def get(self, nodes_id):
result = self._list(self._path(nodes_id))
if len(result) > 0:
return result[0]
else:
return None
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | service-mgmt-client/sm-client/sm_client/v1/smc_service_node.py | starlingx-staging/stx-ha |
import cv2
import dropbox
import time
import random
start_time = time.time()
def take_snapshot():
number = random.randint(0,100)
#initializing cv2
videoCaptureObject = cv2.VideoCapture(0)
result = True
while(result):
#read the frames while the camera is on
ret,frame = videoCaptureObject.read()
#cv2.imwrite() method is used to save an image to any storage device
img_name = "img"+str(number)+".png"
cv2.imwrite(img_name, frame)
start_time = time.time
result = False
return img_name
print("snapshot taken")
# releases the camera
videoCaptureObject.release()
#closes all the window that might be opened while this process
cv2.destroyAllWindows()
def upload_file(img_name):
access_token = "riFu6Ybhc9AAAAAAAAAALaZlr0KQZp4W5yPr5fRlLudO7HyuxSz5BpczxsAwjvTN"
file =img_name
file_from = file
file_to="/testFolder/"+(img_name)
dbx = dropbox.Dropbox(access_token)
with open(file_from, 'rb') as f:
dbx.files_upload(f.read(), file_to,mode=dropbox.files.WriteMode.overwrite)
print("file uploaded")
def main():
while(True):
if ((time.time() - start_time) >= 5):
name = take_snapshot()
upload_file(name)
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | capture_and_uploadImage.py | khushmax/automation |
from audio import Stream, AudioSettings
class PhraseRecognizer(object):
def __init__(self, config, audio_settings: AudioSettings):
self._config = config
self._audio_settings = audio_settings
def get_config(self):
return self._config
def get_audio_settings(self) -> AudioSettings:
return self._audio_settings
async def recognize(self, stream: Stream, recv_callback):
raise Exception('Not implemented "recognize"')
class HotwordRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def start(self):
pass
def is_hotword(self, raw_frames) -> bool:
raise Exception('Not implemented "is_hotword"')
class VADRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def is_speech(self, raw_frames) -> bool:
raise Exception('Not implemented "is_speech"')
class PhraseRecognizerConfig(object):
def create_phrase_recognizer(self) -> PhraseRecognizer:
raise Exception('Not implemented "create_phrase_recognizer"')
class HotwordRecognizerConfig(object):
def create_hotword_recognizer(self) -> HotwordRecognizer:
raise Exception('Not implemented "create_hotword_recognizer"')
class VADRecognizerConfig(object):
def create_vad_recognizer(self) -> VADRecognizer:
raise Exception('Not implemented "create_vad_recognizer"')
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?"... | 3 | recognition/base.py | ReanGD/smart-home |
import pygame
from pygame.sprite import Sprite
class UFO(Sprite):
"""A class to represent a single alien in the fleet."""
def __init__(self, ai_settings, screen):
"""Initialize the alien and set its starting position."""
super(UFO, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load the alien image and set its rect attribute.
self.UFO = ['images/UFO.gif', 'images/PTS.png']
self.image = pygame.image.load(self.UFO[0])
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new alien near the top left of the screen.
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# self.rect.centerx = self.screen_rect.centerx
# self.rect.bottom = self.screen_rect.bottom
self.rect.x = self.screen_rect.left
self.rect.y = self.screen_rect.top
# Store the alien's exact position.
self.x = float(self.rect.x)
self.hit = False
# def check_edges(self):
# """Return True if alien is at edge of screen."""
# screen_rect = self.screen.get_rect()
# if self.rect.right >= screen_rect.right:
# return True
# elif self.rect.left <= 0:
# return True
def explosion(self):
self.hit = True
print("Yellow destroyed")
def animate(self):
self.animate = 0
def update(self):
"""Move the alien right or left."""
if self.hit == True:
self.image = pygame.image.load(self.UFO[1])
self.hit == False
self.x += (self.ai_settings.alien_speed_factor *
self.ai_settings.fleet_direction)
self.rect.x = self.x
def blitme(self):
"""Draw the alien at its current location."""
self.screen.blit(self.image, self.rect) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | ufo.py | Steven-Kha/Space-Invasion |
def BinaryTree(r):
"""Create a new binary tree."""
return [r, [], []]
def insertLeft(root, newBranch):
"""Insert a new left node."""
t = root.pop(1) # get the left child
if len(t) > 1:
# add new left child to root and
# make old left child it's left child.
root.insert(1, [newBranch, t, []])
else:
root.insert(1, [newBranch, [], []])
return root
def insertRight(root, newBranch):
"""Insert a new right node."""
t = root.pop(2) # get the right child
if len(t) > 1:
root.insert(2, [newBranch, [], t])
else:
root.insert(2, [newBranch, [], []])
return root
def getRootVal(root):
"""Get the root's value."""
return root[0]
def setRootVal(root, newVal):
"""Set the root value."""
root[0] = newVal
def getLeftChild(root):
"""Get the left child."""
return root[1]
def getRightChild(root):
"""Get the right child."""
return root[2] | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | tree.py | carlb15/Python |
#! /usr/bin/env python
# Thomas Nagy, 2011
# Try to cancel the tasks that cannot run with the option -k when an error occurs:
# 1 direct file dependencies
# 2 tasks listed in the before/after/ext_in/ext_out attributes
from waflib import Task, Runner
Task.CANCELED = 4
def cancel_next(self, tsk):
if not isinstance(tsk, Task.TaskBase):
return
if tsk.hasrun >= Task.SKIPPED:
# normal execution, no need to do anything here
return
try:
canceled_tasks, canceled_nodes = self.canceled_tasks, self.canceled_nodes
except AttributeError:
canceled_tasks = self.canceled_tasks = set()
canceled_nodes = self.canceled_nodes = set()
try:
canceled_nodes.update(tsk.outputs)
except AttributeError:
pass
try:
canceled_tasks.add(tsk)
except AttributeError:
pass
def get_out(self):
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count -= 1
self.dirty = True
self.cancel_next(tsk) # new code
def error_handler(self, tsk):
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
self.cancel_next(tsk) # new code
Runner.Parallel.cancel_next = cancel_next
Runner.Parallel.get_out = get_out
Runner.Parallel.error_handler = error_handler
def get_next_task(self):
tsk = self.get_next_task_smart_continue()
if not tsk:
return tsk
try:
canceled_tasks, canceled_nodes = self.canceled_tasks, self.canceled_nodes
except AttributeError:
pass
else:
# look in the tasks that this one is waiting on
# if one of them was canceled, cancel this one too
for x in tsk.run_after:
if x in canceled_tasks:
tsk.hasrun = Task.CANCELED
self.cancel_next(tsk)
break
else:
# so far so good, now consider the nodes
for x in getattr(tsk, 'inputs', []) + getattr(tsk, 'deps', []):
if x in canceled_nodes:
tsk.hasrun = Task.CANCELED
self.cancel_next(tsk)
break
return tsk
Runner.Parallel.get_next_task_smart_continue = Runner.Parallel.get_next_task
Runner.Parallel.get_next_task = get_next_task
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | .mywaflib/waflib/extras/smart_continue.py | tobiasraabe/crypto |
from collections import namedtuple
import numpy as np
import talib
from numba import njit
from jesse.helpers import get_candle_source
from jesse.helpers import slice_candles
DamianiVolatmeter = namedtuple('DamianiVolatmeter', ['vol', 'anti'])
def damiani_volatmeter(candles: np.ndarray, vis_atr: int = 13, vis_std: int = 20, sed_atr: int = 40, sed_std: int = 100,
threshold: float = 1.4, source_type: str = "close",
sequential: bool = False) -> DamianiVolatmeter:
"""
Damiani Volatmeter
:param candles: np.ndarray
:param vis_atr: int - default=13
:param vis_std: int - default=20
:param sed_atr: int - default=40
:param sed_std: int - default=100
:param threshold: float - default=1.4
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
atrvis = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=vis_atr)
atrsed = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=sed_atr)
vol, t = damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std, threshold)
if sequential:
return DamianiVolatmeter(vol, t)
else:
return DamianiVolatmeter(vol[-1], t[-1])
@njit
def damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std,
threshold): # Function is compiled to machine code when called the first time
lag_s = 0.5
vol = np.full_like(source, 0)
t = np.full_like(source, 0)
for i in range(source.shape[0]):
if not (i < sed_std):
vol[i] = atrvis[i] / atrsed[i] + lag_s * (vol[i - 1] - vol[i - 3])
anti_thres = np.std(source[i - vis_std:i]) / np.std(source[i - sed_std:i])
t[i] = threshold - anti_thres
return vol, t
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a t... | 3 | jesse/indicators/damiani_volatmeter.py | noenfugler/jesse |
import pygame
from jumper.entities.bullet import Bullet
from math import pi, sin, cos
class BBWaveBullet(Bullet):
def __init__(self, environment, position, angle, speed=1000):
super().__init__(environment, position)
self.speed = speed
self.angle = angle
self.wave_acc = 0.0
self.set_size(20, 20)
def update(self, delta):
if not self.is_alive:
return
self.wave_acc = (self.wave_acc + self.speed * delta) % 360
wave_angle = sin(self.wave_acc / 180 * pi) * 40
x = self.position.x + self.speed * delta * cos((self.angle + wave_angle) / 180 * pi)
y = self.position.y + self.speed * delta * sin((self.angle + wave_angle) / 180 * pi)
camera = self.environment.camera
(bound_x, bound_y) = self.environment.scene.get_bound()
if (x < 0 or x > bound_x) or (y > bound_y + camera.pos() or y < 0):
self.destory()
else:
self.set_position(x, y)
def render_bullet(self, surface, position):
(w, h) = self.get_size().int()
(x, y) = int(position[0] + w/2), int(position[1] + h/2)
r = int(w/2)
for i in range(0, r, 2):
radius = r - i
c = 255 - 10 * i
pygame.draw.circle(surface, (0, c, c), (x, y), radius)
def get_color(self):
return (0, 255, 255)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | jumper/entities/bullets/bb_wave_bullet.py | ccmikechen/Jumper-Game |
# задание 13
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id =None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | model/group.py | DariaMagarshak/python_training |
import pytest
from barista.models import Match
def test_both_trigger_and_triggers():
with pytest.raises(ValueError):
Match.parse_obj(
{
"replace": "asd",
"trigger": "asd",
"triggers": ["asd", "abc"],
}
)
def test_neither_trigger_or_triggers():
with pytest.raises(ValueError):
Match.parse_obj({"replace": "asd"})
def test_trigger():
Match.parse_obj(
{
"replace": "asd",
"trigger": "ads",
}
)
def test_triggers():
Match.parse_obj(
{
"replace": "asd",
"triggers": ["ads", "ads2"],
}
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tests/test_verify.py | JeppeKlitgaard/barista |
from Test import Test, Test as test
'''
Complete the solution so that it returns true if the first argument(string) passed in ends with the 2nd argument (also a string).
Examples:
solution('abc', 'bc') # returns true
solution('abc', 'd') # returns false
'''
def solution(string, ending):
return True if string[-len(ending):] == ending or len(ending) == 0 else False
# Top solution
def solution(string, ending):
return string.endswith(ending)
test.assert_equals(solution('abcde', 'cde'), True)
test.assert_equals(solution('abcde', 'abc'), False)
test.assert_equals(solution('abcde', ''), True) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | codewars/7 kyu/string-ends-with.py | sirken/coding-practice |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-05-20 16:25
from typing import Union, List, Callable
from elit.common.dataset import TransformableDataset
from elit.utils.io_util import read_cells
STS_B_TRAIN = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-train.csv'
STS_B_DEV = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-dev.csv'
STS_B_TEST = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-test.csv'
class SemanticTextualSimilarityDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
sent_a_col,
sent_b_col,
similarity_col,
delimiter='auto',
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
self.delimiter = delimiter
self.similarity_col = similarity_col
self.sent_b_col = sent_b_col
self.sent_a_col = sent_a_col
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
for i, cells in enumerate(read_cells(filepath, strip=True, delimiter=self.delimiter)):
yield {
'sent_a': cells[self.sent_a_col],
'sent_b': cells[self.sent_b_col],
'similarity': float(cells[self.similarity_col])
}
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | elit/datasets/sts/stsb.py | emorynlp/stem-cell-hypothesis |
#
# This file is part of PCAP BGP Parser (pbgpp)
#
# Copyright 2016-2017 DE-CIX Management GmbH
# Author: Tobias Hannaske <tobias.hannaske@de-cix.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pbgpp.Output.Filter import BGPFilter
class MACDestinationFilter(BGPFilter):
def __init__(self, values=[]):
BGPFilter.__init__(self, values)
def apply(self, pcap_information):
# !!! Attention: This is a pre-parsing filter!
# This filter must be applied BEFORE parsing, otherwise it will unnecessarily slow down
# the whole application. BGP messages don't have to be parsed when applying that filter
# directly after reading PCAP packet header
try:
for v in self.values:
if pcap_information.get_mac().get_destination_string() == v:
return True
if v[0:1] == "~" and pcap_information.get_mac().get_destination_string() != v[1:]:
return True
# Searched value was not found
return False
except Exception as e:
# On error the filtering was not successful (due to wrong fields, etc.)
return False
@staticmethod
def clear_input(values):
return_values = list()
for v in values:
return_values.append(v.replace(":", "").replace("-", "").lower())
return return_values
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | pbgpp/Output/Filters/MACDestinationFilter.py | antoine-blin/pbgp-parser |
import unittest
import requests_mock
from podman import PodmanClient, tests
from podman.domain.volumes import Volume, VolumesManager
FIRST_VOLUME = {
"CreatedAt": "1985-04-12T23:20:50.52Z",
"Driver": "default",
"Labels": {"BackupRequired": True},
"Mountpoint": "/var/database",
"Name": "dbase",
"Scope": "local",
}
class VolumeTestCase(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.client = PodmanClient(base_url=tests.BASE_SOCK)
def tearDown(self) -> None:
super().tearDown()
self.client.close()
def test_id(self):
actual = Volume(attrs={"Name": "dbase"})
self.assertEqual(actual.id, "dbase")
@requests_mock.Mocker()
def test_remove(self, mock):
adapter = mock.delete(tests.LIBPOD_URL + "/volumes/dbase?force=True", status_code=204)
vol_manager = VolumesManager(self.client.api)
volume = vol_manager.prepare_model(attrs=FIRST_VOLUME)
volume.remove(force=True)
self.assertTrue(adapter.called_once)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | podman/tests/unit/test_volume.py | kevinwylder/podman-py |
def path_hack():
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
# print('path added:', sys.path[0])
path_hack()
import traceback
import sys
import urllib.request
from urllib.request import urlopen
import json
from apis import utilities
try:
from apis import my_token
API_TUTOR_TOKEN = my_token.API_TUTOR_TOKEN
except:
title = 'IMPORTANT: You Need an Access Token!'
error_message = '\n\n\n' + '*' * len(title) + '\n' + \
title + '\n' + '*' * len(title) + \
'\nPlease download the the my_token.py file and save it in your apis directory.\n\n'
raise Exception(error_message)
def get_token(url):
try:
response = urlopen(url + '?auth_manager_token=' + API_TUTOR_TOKEN)
data = response.read()
results = data.decode('utf-8', 'ignore')
return json.loads(results)['token']
except urllib.error.HTTPError as e:
# give a good error message:
error = utilities.get_error_message(e, url)
raise Exception(error) | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | apis/authentication.py | n-ryan/spotify-genius |
from tamarco.resources.basic.metrics.reporters.base import CarbonBaseHandler
def test_base_handler_creation_no_metric_prefix():
base_handler = CarbonBaseHandler()
assert base_handler.metric_prefix == ""
def test_base_handler_creation_metric_prefix():
base_handler = CarbonBaseHandler("test")
assert base_handler.metric_prefix == "test."
def test_carbon_base_format(sample_meters):
base_handler = CarbonBaseHandler("test")
metrics_str = base_handler.format_metrics(meters=sample_meters)
check_metric_str(metrics_str)
def check_metric_str(metrics_str):
metrics_by_line = metrics_str.split("\n")[:-1]
assert all(len(metric.split(" ")) == 3 for metric in metrics_by_line)
metrics_names = [metric.split(" ")[0] for metric in metrics_by_line]
assert all("test" in name for name in metrics_names)
assert all("__" in name for name in metrics_names)
assert "cat_counter" in metrics_names[0]
assert "cat_weight" in metrics_names[1]
assert all("meow_time" in name for name in metrics_names[2:])
metrics_values = [metric.split(" ")[1] for metric in metrics_by_line]
metrics_timestamp = [metric.split(" ")[2] for metric in metrics_by_line]
assert [float(value) for value in metrics_values]
assert [float(timestamp) for timestamp in metrics_timestamp]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/unit/resources/basic/metrics/reporters/test_base.py | System73/tamarco |
import requests
obis_baseurl = "https://api.obis.org/v3/"
class NoResultException(Exception):
pass
def obis_GET(url, args,ctype, **kwargs):
out = requests.get(url, params=args, **kwargs)
out.raise_for_status()
stopifnot(out.headers['content-type'], ctype)
return out.json()
def obis_write_disk(url, path, ctype, **kwargs):
out = requests.get(url, stream=True, **kwargs)
out.raise_for_status()
with open(path, 'wb') as f:
for chunk in out.iter_content(chunk_size = 1024):
if chunk:
f.write(chunk)
return path
def stopifnot(x, ctype):
if x != ctype:
raise NoResultException("content-type did not equal " + str(ctype))
def stop(x):
raise ValueError(x)
def handle_arrstr(x):
if x.__class__.__name__ == 'NoneType':
pass
else:
if x.__class__.__name__ == 'str':
return x
else:
return ','.join(x) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | pyobis/obisutils.py | MathewBiddle/pyobis |
import logging
import requests
import urllib
class AzkabanScheduler:
def __init__(self, url: str, port: str, environment: str, session_id: str):
self._url = f"https://{url}:{port}/schedule"
self._environment = environment
self._session_id = session_id
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.getLevelName("INFO"))
def schedule_flows(self, project: str, schedules: dict):
for flow in schedules.keys():
self._logger.info(f"Scheduling {project}/{flow}")
if self._environment in schedules[flow]:
cron_expression = schedules[flow][self._environment]
self._logger.info(f"Scheduling {project}/{flow}/{self._environment}, schedule is {cron_expression}")
self.schedule(project, flow, cron_expression)
else:
self._logger.info(f"Scheduling {project}/{flow} no entry for {self._environment}")
def schedule(self, project, flow, cron_expression):
data = {
"session.id": self._session_id,
"ajax": "scheduleCronFlow",
"projectName": project,
"flow": flow,
"cronExpression": cron_expression
}
headers={
"Content-Type": "application/x-www-form-urlencoded"
}
query = urllib.parse.urlencode(data).encode('utf-8')
self._logger.info(f"Attempting to post schedule. Data is '{data}', query is '{query}'")
response = requests.get(self._url, params=query, verify=False, headers=headers)
self._logger.info(f"Request response '{response.text}' with a status of '{response.status_code}'")
response.raise_for_status()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | azkaban_zip_uploader/scheduler.py | dwp/aws-azkaban |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic exec utility that allows us to set the
execute and root_helper attributes for putils.
Some projects need their own execute wrapper
and root_helper settings, so this provides that hook.
"""
from cinder.openstack.common import processutils as putils
class Executor(object):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
self.set_execute(execute)
self.set_root_helper(root_helper)
def set_execute(self, execute):
self._execute = execute
def set_root_helper(self, helper):
self._root_helper = helper
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | cinder/brick/executor.py | cloudbau/cinder |
from sys import stdout, stderr
from subprocess import check_call
from os import path, remove
from base64 import b64encode
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat, PrivateFormat, NoEncryption
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
PUBLIC_EXPONENT = 65537
KEY_SIZE = 2048
def _base64_string(bytes):
return b64encode(bytes).decode()
def create_key():
# Create RSA TLS keypair
key = rsa.generate_private_key(backend=default_backend(), public_exponent=PUBLIC_EXPONENT, key_size=KEY_SIZE)
# get private key in PEM container format
key_pem = key.private_bytes(encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL, encryption_algorithm=NoEncryption())
# get public key in OpenSSH format
public_key = key.public_key().public_bytes(Encoding.OpenSSH, PublicFormat.OpenSSH)
# Return
return {
'private-key': _base64_string(key_pem),
'public-key': _base64_string(public_key),
}
if __name__ == "__main__":
print(create_key())
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | terraform/nginx/shared/tls-function/tls.py | cicdenv/cicdenv |
import wx
import os
import imagebrowser
import Utils
#------------------------------------------------------------------------------------------------
class SetGraphicDialog( wx.Dialog ):
def __init__( self, parent, id = wx.ID_ANY, graphic = None ):
super().__init__( parent, id, "Set Graphic",
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
mainSizer = wx.BoxSizer( wx.VERTICAL )
label = wx.StaticText( self, label = _('Graphic to be Displayed in Results (set to blank to use default graphic):') )
mainSizer.Add( label, flag = wx.ALL, border = 4 )
bhh = wx.BoxSizer( wx.HORIZONTAL )
self.chooseButton = wx.Button( self, label = _('Choose...') )
bhh.Add( self.chooseButton, flag = wx.ALL, border = 4 )
self.Bind( wx.EVT_BUTTON, self.onChoose, self.chooseButton )
self.graphic = wx.TextCtrl( self, size=(600,-1) )
if graphic:
self.graphic.SetValue( graphic )
bhh.Add( self.graphic, flag = wx.ALL | wx.EXPAND, border = 4 )
mainSizer.Add( bhh )
btnSizer = self.CreateStdDialogButtonSizer( wx.OK|wx.CANCEL )
if btnSizer:
mainSizer.Add( btnSizer, flag = wx.EXPAND|wx.ALL, border = 4 )
self.SetSizerAndFit( mainSizer )
def GetValue( self ):
return self.graphic.GetValue()
def onChoose( self, event ):
imgPath = self.GetValue()
set_dir = os.path.dirname(imgPath)
with imagebrowser.ImageDialog( self, set_dir = set_dir ) as dlg:
dlg.ChangeFileTypes([
('All Formats (GIF, PNG, JPEG)', '*.gif|*.png|*.jpg'),
('GIF (*.gif)', '*.gif'),
('PNG (*.png)', '*.png'),
('JPEG (*.jpg)', '*.jpg')
])
if os.path.isfile(imgPath):
dlg.SetFileName( imgPath )
if dlg.ShowModal() == wx.ID_OK:
imgPath = dlg.GetFile()
self.graphic.SetValue( imgPath )
if __name__ == '__main__':
app = wx.App(False)
mainWin = wx.Frame(None,title="SprintMan" )
mainWin.Show()
setGraphicDialog = SetGraphicDialog( mainWin, -1, "Set Graphic Dialog Test" )
setGraphicDialog.ShowModal()
app.MainLoop()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | SprintMgr/SetGraphic.py | esitarski/CrossMgr |
from pathlib import Path
import unittest
from saw_client import *
from saw_client.llvm import Contract, array, array_ty, void, i32
class ArraySwapContract(Contract):
def specification(self):
a0 = self.fresh_var(i32, "a0")
a1 = self.fresh_var(i32, "a1")
a = self.alloc(array_ty(2, i32),
points_to=array(a0, a1))
self.execute_func(a)
self.points_to(a[0], a1)
self.points_to(a[1], a0)
self.returns(void)
class LLVMArraySwapTest(unittest.TestCase):
def test_llvm_array_swap(self):
connect(reset_server=True)
if __name__ == "__main__": view(LogResults())
bcname = str(Path('tests','saw','test-files', 'llvm_array_swap.bc'))
mod = llvm_load_module(bcname)
result = llvm_verify(mod, 'array_swap', ArraySwapContract())
self.assertIs(result.is_success(), True)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | saw-remote-api/python/tests/saw/test_llvm_array_swap.py | msaaltink/saw-script |
from typing import List, Literal, Union, Callable, Tuple
from dataclasses import dataclass, replace
from .config_types import (
TWInterface,
TWSettingStorage, TWSettingBool, TWSetting,
WrapType, Fields, AnkiModel, LabelText, WhichField, Tags, Falsifiable,
)
ScriptKeys = Literal[
'enabled',
'name',
'version',
'description',
'conditions',
'code',
]
def __list_to_tw_bool(prototype, vals: List[ScriptKeys]):
return replace(
prototype,
**dict([(key, True) for key in vals])
)
def make_interface(
# name for the type of the interface
tag: str,
prototype: WrapType,
getter: Callable[[str, TWSettingStorage], TWSetting],
# result is used for storing,
setter: Callable[[str, TWSetting], Union[bool, TWSetting]],
wrapper: Callable[[str, TWSettingStorage, AnkiModel, Fields, WhichField, slice, Tags], Tuple[Fields, Tags]],
label: Falsifiable(Callable[[str, TWSettingStorage], LabelText]),
reset: Falsifiable(Callable[[str, TWSettingStorage], TWSetting]),
deletable: Falsifiable(Callable[[str, TWSettingStorage], bool]),
# list of values that are readonly,,
readonly: TWSettingBool,
# list of values or stored in `storage` field,
store: TWSettingBool,
) -> TWInterface:
return TWInterface(
tag,
prototype,
getter,
setter,
wrapper,
label,
reset,
deletable,
readonly,
store,
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | src/lib/interface.py | hgiesel/anki_text_wrapper |
#!/usr/bin/env python3
'''
Return a list of items without any elements with the same value next to each other and preserve the original order of elements.
'''
def unique_in_order(a):
newlist = []
for i in a:
if len(newlist) < 1 or not i == newlist[len(newlist) -1]:
newlist.append(i)
return newlist
#Alternative Solutions
def unique_in_order(iterable):
res = []
for item in iterable:
if len(res) == 0 or item != res[-1]:
res.append(item)
return res
def unique_in_order(iterable):
result = []
prev = None
for char in iterable[0:]:
if char != prev:
result.append(char)
prev = char
return result
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | CodeWars/2019/UniqueInOrder-6k.py | JLJTECH/TutorialTesting |
import logging
import sys
try:
import obd
except ImportError:
import subprocess
subprocess.run([
'pip',
'install',
'git+https://github.com/SLR8/py-obd'
])
import obd
# connection = obd.OBD()
# command = obd.commands.SPEED
# response = connection.query(command)
# print(response.value)
# Executes commands passed from the client
# All commands should start with '$'
def executor(command_name):
command = name_to_command(command_name)
logging.info(command)
response = connection.query(command)
def has_command(command):
return obd.commands.has_command(command)
def has_name(name):
return obd.commands.has_name(name)
def name_to_command(command_name):
# Ensure upper
# command_name = upper(command_name)
# ensure exists
if has_name(command_name):
print("Command exists")
# return obd.commands
return obd.commands[command_name]
else:
logging.error('The command does not exist')
if __name__ == "__main__":
executor(sys.argv[1]) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | src/executor.py | SLR8/core |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@gmail.com',
password='1qazxsw2'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@gmail.com',
password='1qazxsw2',
name='Test name'
)
def test_users_listed(self):
"""Test that users are listed in user_page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that creates user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | app/core/tests/test_admin.py | NikolasK88/recipe-app-api |
import pytest
from datasets import Dataset
from pathlib import Path
from typing import Dict
from copy import deepcopy
from tio import Task
__all__ = [
"DummyTask"
]
EXPECTED_DUMMY_DATA = {
"idx" : [0, 1, 2],
"input" : ["The comment section is ", "The butcher of ", "Get "],
"output": ["out of control.", "Blevkin.", "Some."]
}
@pytest.fixture()
def dummy_data():
yield deepcopy(EXPECTED_DUMMY_DATA)
@Task.register('dummy')
class DummyTask(Task):
SPLIT_MAPPING = {
"train": deepcopy(EXPECTED_DUMMY_DATA),
"val" : deepcopy(EXPECTED_DUMMY_DATA)
}
@staticmethod
def map_to_standard_entries(sample: Dict) -> Dict:
sample['input_sequence'] = sample['input']
sample['target'] = sample['output']
return sample
def _load_samples(self, split: str) -> Dataset:
return Dataset.from_dict(EXPECTED_DUMMY_DATA)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | test_fixtures/dummy_objects.py | gabeorlanski/taskio |
import os
import torch
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True) # first position is score; second position is pred.
pred = pred.t() # .t() is T of matrix (256 * 1) -> (1 * 256)
correct = pred.eq(target.view(1, -1).expand_as(pred)) # target.view(1,2,2,-1): (256,) -> (1, 2, 2, 64)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def adjust_learning_rate(optimizer, epoch, learning_rate, end_epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if epoch in [round(end_epoch * 0.333), round(end_epoch * 0.666)]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.2
learning_rate = learning_rate* 0.2
print('Adjust_learning_rate ' + str(epoch))
print('New_LearningRate: {}'.format(learning_rate))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, at_type=''):
if not os.path.exists('./model'):
os.makedirs('./model')
epoch = state['epoch']
save_dir = './model/'+at_type+'_' + str(epoch) + '_' + str(round(float(state['prec1']), 4))
torch.save(state, save_dir)
print(save_dir)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | Code/util.py | bryant1410/Emotion-FAN |
# Copyright (c) Kuba Szczodrzyński 2021-5-6.
import os
from .primes import PRIMES
class DiffieHellman:
_prime: int
_private_key: int
_public_key: int
_shared_key: int
@staticmethod
def _to_bytes(a: int) -> bytes:
return a.to_bytes((a.bit_length() + 7) // 8, byteorder="big")
def __init__(self, group: int = 14, key_bits: int = 540) -> None:
prime_bytes = PRIMES[group]
self._prime = int.from_bytes(prime_bytes, byteorder="big")
self.generate_private_key(key_bits)
def generate_private_key(self, key_bits: int = 540) -> bytes:
private_key = os.urandom(key_bits // 8 + 8)
self.set_private_key(private_key)
return self.get_private_key()
def set_private_key(self, key: bytes) -> None:
self._private_key = int.from_bytes(key, byteorder="big")
self._public_key = pow(2, self._private_key, self._prime)
def generate_shared_key(self, other_public_key: bytes) -> bytes:
remote_key = int.from_bytes(other_public_key, "big")
self._shared_key = pow(remote_key, self._private_key, self._prime)
return self.get_shared_key()
def get_private_key(self) -> bytes:
return self._to_bytes(self._private_key)
def get_public_key(self) -> bytes:
return self._to_bytes(self._public_key)
def get_shared_key(self) -> bytes:
return self._to_bytes(self._shared_key)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exc... | 3 | diffiehellman/diffie_hellman.py | TOPDapp/py-diffie-hellman |
import pytest
from django.urls import reverse
from soduko.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.get(username="admin")
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | soduko/users/tests/test_admin.py | idannik/soduko_django |
import uuid
from turkit2.common import TextClassification
from turkit2.qualifications import Unique, Locale, AcceptRate
from utils import get_client
client = get_client()
quals = [Locale(), AcceptRate()]
task = TextClassification(client, 'Test3', '0.01', 'test test', 600, 6000, ['positive', 'negative'], question='Which class does this text match, positive or negative?', qualifications=quals)
documents = [f'test{i}' for i in range(5)]
def proc(text):
for answer, assignment in task.ask(verbosity=100, text=text):
print(answer)
print(assignment)
def main():
tasks = []
for text in documents:
proc(text)
main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/text_classification_nonsync.py | anthliu/turkit2 |
"""Error handling for the tilesets CLI"""
class TilesetsError(Exception):
"""Base Tilesets error
Deriving errors from this base isolates module development
problems from Python usage problems.
"""
exit_code = 1
def __init__(self, message):
"""Error constructor
Parameters
----------
message: str
Error description
"""
self.message = message
class TilesetNameError(TilesetsError):
"""Not a valid tileset id
"""
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
... | 3 | mapbox_tilesets/errors.py | bozdoz/tilesets-cli |
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class PVCInfo_PVCSpec_Resources_RequestsEntry(object):
"""Implementation of the 'PVCInfo_PVCSpec_Resources_RequestsEntry' model.
Attributes:
key (string): TODO: Type description here.
value (string): TODO: Type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"key": 'key',
"value": 'value'
}
def __init__(self,
key=None,
value=None):
"""Constructor for the PVCInfo_PVCSpec_Resources_RequestsEntry class"""
# Initialize members of the class
self.key = key
self.value = value
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
key = dictionary.get('key', None)
value = dictionary.get('value', None)
# Return an object of this model
return cls(key,
value)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | cohesity_management_sdk/models/pvc_info_pvc_spec_resources_requests_entry.py | cohesity/management-sdk-python |
import pandas as pd
from test_importer import Importer
from functools import lru_cache
class Analyzer:
def __init__(self, *args, **kwargs):
self._importer = Importer(database_name=kwargs["database_name"])
self.data = self._importer.create_dataframe()
# Without maxsize the cache will preserve 128 itmes
@lru_cache
def filter(self, keyword):
"""Check wether if there's a match for the keyword."""
_match = self.data[self.data["keywords"].apply(lambda x : keyword in x).values]
return _match
def return_country(keyword):
"""Return the countries of the papers that mtched
with input keyword."""
_match = self.filter(keyword)
return _match['country']
def return_created_date():
"""Return the created_dates of the papers that mtched
with input keyword."""
_match = self.filter(keyword)
return _match['created_date']
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | test/test_analyzer.py | Quiltomics/godseye |
import threading
try:
from .grab import Image
except:pass
def grab_bytes():
return Image().asbytes
def send(s,a):
s.post(b's'+grab_bytes(),a)
def show_bytes(r):
if not r.startswith('s'):return
Image(r[1:]).show()
def conf(s,a):
def _conf():
while True:
send(s,a)
threading.Thread(target=_conf).start()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | openctrl/display.py | openctrl-python/openctrl |
# Time: ctor: O(n), n is number of words in the dictionary.
# lookup: O(1)
# Space: O(k), k is number of unique words.
class ValidWordAbbr(object):
def __init__(self, dictionary):
"""
initialize your data structure here.
:type dictionary: List[str]
"""
self.lookup_ = collections.defaultdict(set)
for word in dictionary:
abbr = self.abbreviation(word)
self.lookup_[abbr].add(word)
def isUnique(self, word):
"""
check if a word is unique.
:type word: str
:rtype: bool
"""
abbr = self.abbreviation(word)
return self.lookup_[abbr] <= {word}
def abbreviation(self, word):
if len(word) <= 2:
return word
return word[0] + str(len(word)-2) + word[-1]
# Your ValidWordAbbr object will be instantiated and called as such:
# vwa = ValidWordAbbr(dictionary)
# vwa.isUnique("word")
# vwa.isUnique("anotherWord")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | Python/unique-word-abbreviation.py | bssrdf/LeetCode-5 |
# coding: utf-8
"""
Translator Knowledge Beacon Aggregator API
This is the Translator Knowledge Beacon Aggregator web service application programming interface (API) that provides integrated access to a pool of knowledge sources publishing concepts and relations through the Translator Knowledge Beacon API. This API is similar to that of the latter mentioned API with the addition of some extra informative endpoints plus session identifier and beacon indices. These latter identifiers are locally assigned numeric indices provided to track the use of specific registered beacons within the aggregator API itself. # noqa: E501
OpenAPI spec version: 1.1.1
Contact: richard@starinformatics.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.client_statement_object import ClientStatementObject # noqa: E501
from openapi_client.rest import ApiException
class TestClientStatementObject(unittest.TestCase):
"""ClientStatementObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClientStatementObject(self):
"""Test ClientStatementObject"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.client_statement_object.ClientStatementObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | client/test/test_client_statement_object.py | NCATS-Tangerine/kba-reasoner |
import torch
from torch import nn
from torch.distributions import MultivariateNormal
class Normal(nn.Module):
def __init__(self, num_vars=100):
super(Normal, self).__init__()
self.num_vars = num_vars
self.means = nn.Parameter(torch.zeros(num_vars))
self.std = nn.Parameter(torch.eye(num_vars))
def log_prob(self, x):
distr = MultivariateNormal(self.means, self.std)
return distr.log_prob(x)
def sample(self, num_samples):
distr = MultivariateNormal(self.means, self.std)
return distr.sample_n(num_samples)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | core/learnable_priors/normal_prior.py | insilicomedicine/TRIP |
# NOTE: Must be before we import or call anything that may be synchronous.
from gevent import monkey
monkey.patch_all()
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count, get_worker_connections_count
logconfig = logfile_path(debug=False)
bind = "unix:/tmp/gunicorn_secscan.sock"
workers = get_worker_count("secscan", 2, minimum=2, maximum=4)
worker_class = "gevent"
worker_connections = get_worker_connections_count("secscan")
pythonpath = "."
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug(
"Starting secscan gunicorn with %s workers and %s worker class", workers, worker_class
)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | conf/gunicorn_secscan.py | sferich888/quay |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.logs_api import LogsApi
class TestLogsApi(unittest.TestCase):
""" LogsApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.logs_api.LogsApi()
def tearDown(self):
pass
def test_log_file_handler(self):
"""
Test case for log_file_handler
"""
pass
def test_log_file_list_handler(self):
"""
Test case for log_file_list_handler
"""
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | kubernetes/test/test_logs_api.py | woqer/python |
from dataclasses import dataclass
from typing import Generator
@dataclass
class Execution:
action: type
params: object
class Action:
@classmethod
def can_execute(cls, p):
return True
@classmethod
def execute(cls, p):
pass
@classmethod
def record_undo(cls, p) -> Generator[Execution, None, None]:
return
yield
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | vfio_isolate/action/action.py | spheenik/vfio-isolate |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import client
from client.rest import ApiException
from client.apis.apiregistration_v1beta1_api import ApiregistrationV1beta1Api
class TestApiregistrationV1beta1Api(unittest.TestCase):
""" ApiregistrationV1beta1Api unit test stubs """
def setUp(self):
self.api = client.apis.apiregistration_v1beta1_api.ApiregistrationV1beta1Api()
def tearDown(self):
pass
def test_create_api_service(self):
"""
Test case for create_api_service
"""
pass
def test_delete_api_service(self):
"""
Test case for delete_api_service
"""
pass
def test_delete_collection_api_service(self):
"""
Test case for delete_collection_api_service
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_api_service(self):
"""
Test case for list_api_service
"""
pass
def test_patch_api_service(self):
"""
Test case for patch_api_service
"""
pass
def test_read_api_service(self):
"""
Test case for read_api_service
"""
pass
def test_replace_api_service(self):
"""
Test case for replace_api_service
"""
pass
def test_replace_api_service_status(self):
"""
Test case for replace_api_service_status
"""
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | kubernetes/test/test_apiregistration_v1beta1_api.py | craigtracey/python |
from collections import defaultdict
from apps.configuration.models import Environment
class MapConfigValues(object):
def __init__(self, values, env_id):
environments = Environment.query.all()
sort_environments = sorted([x for x in environments if x.id != env_id], key=lambda x: x.priority)
self.env_ids = [env_id] + [x.id for x in sort_environments]
self.env_id = env_id
self.map_values = defaultdict(dict)
[self.map_values[x.key_id].__setitem__(x.env_id, x.value) for x in values]
def _iter_value(self, map_value):
for env_id in self.env_ids:
value = map_value.get(env_id)
if value:
return value
def get(self, key_id):
map_value = self.map_values.pop(key_id, None)
if map_value:
return self._iter_value(map_value)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | spug_api/apps/apis/utils.py | showsmall/spug |
import utils
class Model:
def __init__(self, file_path):
with open(file_path, 'r', encoding="utf8") as model_file:
self.model_tree = {}
for line in model_file:
chars, minus_log_p = utils.parse_model_file_line(line)
n_1_gram = ''.join(chars[:-1])
last_char = chars[-1]
if n_1_gram not in self.model_tree:
self.model_tree[n_1_gram] = {}
self.model_tree[n_1_gram][last_char] = minus_log_p
for n_1_gram in self.model_tree:
min_n_char, min_value = next(iter(self.model_tree[n_1_gram].items()))
for n_char, value in self.model_tree[n_1_gram].items():
if value < min_value:
min_n_char, min_value = n_char, value
self.model_tree[n_1_gram] = min_n_char
def __getitem__(self, n_1_gram):
return self.model_tree[n_1_gram] | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | model.py | OrBin/N-Gram-Language-Model |
from pandas import DataFrame, Series
from sklearn.svm import SVR
from bender.trained_model.interface import TrainedRegressionModel
class SupportVectorRegression(TrainedRegressionModel):
model: SVR
input_features: list[str]
def __init__(self, model: SVR, input_features: list[str]) -> None:
self.input_features = input_features
self.model = model
def _predict_on_valid(self, data: DataFrame) -> Series:
return self.model.predict(data)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return... | 3 | bender/trained_model/support_vector.py | otovo/bender |
from typing import Optional
from thyme.types.blockchain_format.coin import Coin
from thyme.types.blockchain_format.program import Program
from thyme.types.blockchain_format.sized_bytes import bytes32
from thyme.wallet.puzzles.load_clvm import load_clvm
MOD = load_clvm("genesis-by-coin-id-with-0.clvm", package_or_requirement=__name__)
def create_genesis_or_zero_coin_checker(genesis_coin_id: bytes32) -> Program:
"""
Given a specific genesis coin id, create a `genesis_coin_mod` that allows
both that coin id to issue a cc, or anyone to create a cc with amount 0.
"""
genesis_coin_mod = MOD
return genesis_coin_mod.curry(genesis_coin_id)
def genesis_coin_id_for_genesis_coin_checker(
genesis_coin_checker: Program,
) -> Optional[bytes32]:
"""
Given a `genesis_coin_checker` program, pull out the genesis coin id.
"""
r = genesis_coin_checker.uncurry()
if r is None:
return r
f, args = r
if f != MOD:
return None
return args.first().as_atom()
def lineage_proof_for_genesis(parent_coin: Coin) -> Program:
return Program.to((0, [parent_coin.as_list(), 0]))
def lineage_proof_for_zero(parent_coin: Coin) -> Program:
return Program.to((0, [parent_coin.as_list(), 1]))
def lineage_proof_for_coin(parent_coin: Coin) -> Program:
if parent_coin.amount == 0:
return lineage_proof_for_zero(parent_coin)
return lineage_proof_for_genesis(parent_coin)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return t... | 3 | thyme/wallet/puzzles/genesis_by_coin_id_with_0.py | yuanliuus/thyme-blockchain |
from django.conf import settings
from django.core import signing
from django.core.signing import BadSignature, SignatureExpired, TimestampSigner
class Registrations:
@staticmethod
def generate_registration_token(email):
return TimestampSigner().sign(signing.dumps({'email': email}))
@staticmethod
def validate_registration_token(token):
try:
return signing.loads(
TimestampSigner().unsign(token, max_age=settings.REGISTRATION_CONFIRMATION_TIMEOUT)
)['email']
except (BadSignature, SignatureExpired):
return None
@staticmethod
def generate_student_confirmation_token(student_username, course, member):
data = signing.dumps(
{
'student_username': student_username.lower(),
'course': course,
'member': member
}
)
token = TimestampSigner().sign(data)
return token
@staticmethod
def validate_student_confirmation_token(token):
try:
return signing.loads(
TimestampSigner().unsign(token, max_age=settings.STUDENT_CONFIRMATION_TIMEOUT)
)
except (BadSignature, SignatureExpired):
return None
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | lego/apps/users/registrations.py | andrinelo/lego |
#!/usr/bin/env python3
# encoding: utf-8
import dis
class MyObject:
"""Example for dis."""
CLASS_ATTRIBUTE = 'some value'
def __str__(self):
return 'MyObject({})'.format(self.name)
def __init__(self, name):
self.name = name
dis.dis(MyObject)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | PyMOTW/source/dis/dis_class.py | axetang/AxePython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.