source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
def load_from_backup():
"""
"""
# TODO
pass
def save_to_backup():
"""
"""
# TODO
pass
if __name__ == "__main__":
pass | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | flask-microservice/api/util/backup_handlers.py | sashaobucina/coronatracker |
from sqlalchemy.orm import Query
from .paginator import Paginator
class BaseQuery(Query):
"""The default query object used for models. This can be subclassed and
replaced for individual models by setting the :attr:`~SQLAlchemy.query_cls`
attribute.
This is a subclass of a standard SQLAlchemy
:class:`~sqlalchemy.orm.query.Query` class and has all the methods of a
standard query as well.
"""
def get_or_error(self, uid, error):
"""Like :meth:`get` but raises an error if not found instead of
returning `None`.
"""
rv = self.get(uid)
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def first_or_error(self, error):
"""Like :meth:`first` but raises an error if not found instead of
returning `None`.
"""
rv = self.first()
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def paginate(self, **kwargs):
"""Paginate this results.
Returns an :class:`Paginator` object.
"""
return Paginator(self, **kwargs)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | sqla_wrapper/base_query.py | ramuta/sqla-wrapper |
import asyncio
async def get_html(url):
print(f"get {url} ing")
# if url == "https://www.asp.net":
# raise Exception("Exception is over")
await asyncio.sleep(2)
return f"<h1>This is a test for {url}</h1>"
def callback_func(task):
print(type(task))
if task.done():
print(f"done") # print(task.result())
async def main():
urls = [
"https://www.baidu.com", "https://www.asp.net",
"https://www.python.org", "https://www.sogou.com"
]
# asyncio.create_task来创建一个Task
tasks = [asyncio.create_task(get_html(url)) for url in urls]
# 给每个任务都加一个回调函数
for task in tasks:
task.add_done_callback(callback_func)
# 批量执行任务
result = await asyncio.gather(*tasks)
print(result) # 返回 result list
if __name__ == "__main__":
import time
start_time = time.time()
asyncio.run(main())
print(time.time() - start_time)
# Task所有方法:['add_done_callback', 'all_tasks', 'cancel', 'cancelled', 'current_task', 'done', 'exception', 'get_loop', 'get_stack', 'print_stack', 'remove_done_callback', 'result', 'set_exception', 'set_result'] | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | python/5.concurrent/ZCoroutine/z_new_code/2.call_back.py | lotapp/BaseCode |
def is_leap(year):
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
else:
return True
else:
return False
#Defining the function returning the number of days in the specified month
def days_in_month(year, month):
#Testing if the month input is valid
if month < 1 or month > 12:
#Returning a message indicating the month is invalid
return "Invalid month"
#Storing number of days for each month
month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
#Testing if the year is a leap year and the month is february
if is_leap(year) and month == 2:
#Returning 29, the number of days for february in a leap year
return 29
#Returning the number of days for the specified month
return month_days[month-1]
#🚨 Do NOT change any of the code below
year = int(input("Enter a year: "))
month = int(input("Enter a month: "))
days = days_in_month(year, month)
print(days)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | Day 10/day-10-1-exercise/main.py | Jean-Bi/100DaysOfCodePython |
from random import randrange
from time import time
def bubble_sort(arr):
for i in range(len(arr)):
for j in range(len(arr)-1, i, -1):
if arr[j] < arr[j-1]:
# меняем элементы местами
arr[j], arr[j-1] = arr[j-1], arr[j]
return arr
def opt_bubble_sort(arr):
while True:
swap = False
for i in range(len(arr)-1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
swap = True
if not swap:
break
swap = False
for j in range(len(arr)-1, 0):
if arr[j] < arr[j+1]:
# меняем элементы местами
arr[j], arr[j+1] = arr[j+1], arr[j]
swap = True
return arr
# измерить время работы алгоритма в случайом массиве
def check_time_in_random_arr(f):
arr = [randrange(100) for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
# время работы алгоритма в сортированном массиве
def check_time(f):
arr = [i for i in range(1100)]
start = time()
f(arr)
end = time()
return end - start
bubble_sort_time = check_time(bubble_sort)
opt_bubble_sort_time = check_time(opt_bubble_sort)
bubble_sort_time2 = check_time_in_random_arr(bubble_sort)
opt_bubble_sort_time2 = check_time_in_random_arr(opt_bubble_sort)
print('''
Время работы в уже отсортированном массиве:\n
Обычный пузырёк: {}\n
Модифицированный {}\n
Время работы в случайном массиве: \n
Обычный пузырёк: {}\n
Модифицированный: {}'''.format(bubble_sort_time, opt_bubble_sort_time, bubble_sort_time2, opt_bubble_sort_time2))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | 09.py | Michanix/Algorithms-Intro-Course |
import backtrader as bt
from backtrader.indicators import ExponentialMovingAverage as EMA
class Pullbacks(bt.Indicator):
"""
An indicator to detect pullbacks to EMA
Params :
- ema_period : int
EMA period, default is 50
- period : int
Period for pullbacks calculation, default is 3
Outputs :
- pullbacks : int
1 if upwards pullback, -1 if downwards, else 0
"""
params = (('ema_period', 50),('period',3))
lines = ('pullbacks',)
def __init__(self):
self.high = self.datas[0].high
self.low = self.datas[0].low
self.ema = EMA(self.datas[0], period = self.p.ema_period)
def next(self):
under, above = 0, 0
for i in range(-self.p.period, 0):
if self.high[i] < self.ema[i]:
under += 1
if self.low[i] > self.ema[i]:
above += 1
if under == self.p.period and self.high[0] > self.ema[0]:
self.l.pullbacks[0] = -1
elif above == self.p.period and self.low[0] < self.ema[0]:
self.l.pullbacks[0] = 1
else:
self.l.pullbacks[0] = 0 | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | bot/models/Indicators/Pullbacks.py | estebanthi/BinanceTradingBotV4 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Default rendering returning a default web page."""
from __future__ import absolute_import, print_function
from flask import render_template
previewable_extensions = []
def can_preview(file):
"""Return if file type can be previewed."""
return True
def preview(file):
"""Return the appropriate template and passes the file and embed flag."""
return render_template("invenio_previewer/default.html", file=file)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | invenio_previewer/extensions/default.py | invenio-toaster/invenio-previewer |
import re
from .delta import Inf, d_expr_dimension
from .linear import Linear
from .lyndon import to_lyndon_basis
from .util import get_one_item
def word_expr_weight(expr):
return len(get_one_item(expr.items())[0])
def word_expr_max_char(expr):
return max([max(word) for word, _ in expr.items()])
def words_with_n_distinct_chars(expr, min_distinct):
return expr.filtered_obj(lambda word: len(set(word)) >= min_distinct)
# Replaces each letter c with index_map[c]
def word_substitute(
word, # Tuple[int]
index_map, # int -> int
):
return tuple([index_map.get(c, c) for c in word])
# For each word, replaces each letter c with index_map[c]
def word_expr_substitute(
expr, # Linear[word], word is Tuple[int]
index_map, # int -> int
):
ret = Linear()
for word, coeff in expr.items():
word_new = word_substitute(word, index_map)
if not Inf in word_new:
ret += Linear({word_new: coeff})
return ret
def _word_to_template_impl(word, index_map):
next_index = 0 if len(index_map) == 0 else max(index_map.values()) + 1
for c in word:
if not c in index_map:
index_map[c] = next_index
next_index += 1
return word_substitute(word, index_map)
# Converts word to a standard form modulo substitutions
def word_to_template(word):
return _word_to_template_impl(word, {})
def word_expr_to_template(expr, index_map=None):
if index_map is None:
index_map = {}
return expr.mapped_obj(lambda w: _word_to_template_impl(w, index_map))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | python/polypy/lib/word_algebra.py | amatveyakin/polykit |
import torch.nn as nn
import torch
import torch.cuda
from onmt.utils.logging import init_logger
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations."
:cite:`DBLP:journals/corr/LiuL17d`
"""
def __init__(self, eps=1e-5):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(torch.eye(input.size(1)).cuda().ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = (
inv_laplacian.diag().unsqueeze(1).expand_as(input[b]).transpose(0, 1)
)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(inv_laplacian.transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
if __name__ == "__main__":
logger = init_logger("StructuredAttention.log")
dtree = MatrixTree()
q = torch.rand(1, 5, 5).cuda()
marg = dtree.forward(q)
logger.info(marg.sum(1))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | onmt/modules/structured_attention.py | philhchen/OpenNMT-evidential-softmax |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
import numpy as np
# from scipy.misc import imread, imresize
import cv2
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
把图片列表变化为适合网络的输入格式
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
# 取出每张图片的最大的长宽和深度
max_shape = np.array([im.shape for im in ims]).max(axis=0)
# 求出图片的个数
num_images = len(ims)
# 创建一个np数组4维,(图片序号,长,宽,深度)(最大的),用for循环填入图片数据
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# 返回图片的np数组
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
# 减去中值
im -= pixel_means
# im = im[:, :, ::-1]
# 记录维度(三个维度的值)
im_shape = im.shape
# 取前两个维度的最大值和最小值
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
# target是短边像素
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
# if np.round(im_scale * im_size_max) > max_size:
# im_scale = float(max_size) / float(im_size_max)
# im = imresize(im, im_scale)
# 沿x,y轴缩放的系数都是im_scale
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
# 返回缩放后的图形 和 缩放比
return im, im_scale
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | lib/model/utils/blob.py | K2OKOH/da-faster-RCNN-ChineseComment |
import numpy as np
import sklearn.metrics as sk
SUPPORTED_METRICS = ['accuracy', 'auc', 'rmse']
def error_check(flat_true_values, pred_values):
if len(flat_true_values) != len(pred_values):
raise ValueError("preds and true values need to have same shape")
def accuracy(flat_true_values, pred_values):
error_check(flat_true_values, pred_values)
if len(flat_true_values) == 0:
return np.nan
correct = 0
for i in range(len(pred_values)):
if pred_values[i] >= 0.5 and flat_true_values[i] == 1:
correct += 1
if pred_values[i] < 0.5 and flat_true_values[i] == 0:
correct += 1
return correct/len([x for x in flat_true_values if (x == 0 or x == 1)])
def auc(flat_true_values, pred_values):
error_check(flat_true_values, pred_values)
# multiprior handling, remove phantom nondata
if len(flat_true_values) == 0:
return np.nan
i = 0
while i < len(flat_true_values):
if (flat_true_values[i] != 1 and flat_true_values[i] != 0) or (pred_values[i] < 0 or pred_values[i] > 1):
flat_true_values = np.delete(flat_true_values, i)
pred_values = np.delete(pred_values, i)
i -= 1
i += 1
if len(set(flat_true_values)) == 1:
return np.nan
auc = sk.roc_auc_score(flat_true_values, pred_values)
return auc
def rmse(flat_true_values, pred_values):
# represent correct as 1, incorrect as 0 for RMSE calculation
if len(flat_true_values) == 0:
return np.nan
error_check(flat_true_values, pred_values)
rmse, c = 0, 0
for i in range(len(flat_true_values)):
if flat_true_values[i] != -1:
rmse += ((flat_true_values[i] - pred_values[i]) ** 2)
c += 1
rmse /= c
rmse = rmse ** 0.5
return rmse
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | source-py/pyBKT/util/metrics.py | shaoliangliang1996/pyBKT |
from django.db.models.signals import pre_save, post_save
from django.core.signals import request_finished
from django.dispatch import receiver
from .my_singal import action
def pre_save_model(sender, **kwargs):
print(sender)
print(kwargs)
def post_save_func(sender, **kwargs):
# 记个日志
print('发送者',sender)
print(kwargs)
pre_save.connect(pre_save_model)
post_save.connect(post_save_func)
# @receiver(request_finished)
def test_finished_func(sender, **kwargs):
print("被调用")
request_finished.connect(test_finished_func)
#自定义的信号
def my_design(sender, **kwargs):
print("自定义信号被调用")
print(sender)
print(kwargs)
action.connect(my_design) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | t9/__init__.py | whoareyou0401/mytest |
from django.db import models
# Create your models here.
class Product(models.Model):
product_brand = models.CharField(max_length=50)
product_article_code = models.CharField(max_length=20)
product_code= models.IntegerField()
product_name= models.CharField(max_length=150)
product_unit_packaging_number = models.IntegerField(default=1)
product_price = models.DecimalField(max_digits=8,decimal_places=2)
product_selected = models.BooleanField(default=False)
def __str__(self):
return self.product_name
def product_was_selected(self):
return self.product_selected
def get_price(self):
return self.product_price
def get_selected_product():
return Product.objects.filter(product_selected=True)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | products/models.py | benoitboyer/DjangoBio |
def is_alpha(c):
result = ord('A') <= ord(c.upper()) <= ord('Z')
return result
def is_ascii(c):
result = 0 <= ord(c) <= 127
return result
def is_ascii_extended(c):
result = 128 <= ord(c) <= 255
return result
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return ... | 3 | utils/text/general.py | goztrk/django-htk |
import time
import datetime as datetime
class BusData:
def __init__(self, number, destination, timeLiteral, operator):
self.number = number
self.destination = destination
self.timeLiteral = timeLiteral
self.operator = operator
self.time = prepare_departure_time(timeLiteral)
def prepare_departure_time(timeLiteral):
now = datetime.datetime.today()
if isTimeFormat(timeLiteral):
dueAt = now.date() + time.strptime(timeLiteral, '%H:%M')
return dueAt
else:
if timeLiteral == 'Due':
dueMinutes = 0
else:
dueMinutes = [int(word) for word in timeLiteral.split() if word.isdigit()][0]
dueAt = now + datetime.timedelta(minutes = dueMinutes)
return dueAt
def isTimeFormat(input):
try:
time.strptime(input, '%H:%M')
return True
except ValueError:
return False | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | departure/provider/nexus/bus_data_model.py | Woll78/departure-python |
import os.path
import pypandoc
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
class BibTexPlugin(BasePlugin):
"""
Allows the use of bibtex in markdown content for MKDocs.
Options:
bib_file (string): path to a single bibtex file for entries, relative to mkdocs.yml.
csl_file (string, optional): path to a CLS file, relative to mkdocs.yml.
"""
config_scheme = [
("bib_file", config_options.Type(str, required=True)), # TODO: multiple files.
("csl_file", config_options.Type(str, required=False)),
("pandoc_output_format", config_options.Type(str, required=False)),
]
def on_config(self, config):
"""Get path on load of config."""
config_path = os.path.dirname(config.config_file_path)
self.csl_path = get_path(self.config.get("csl_file", None), config_path)
self.bib_path = get_path(self.config["bib_file"], config_path)
self.pandoc_output_format = self.config.get("pandoc_output_format", "markdown_strict")
return config
def on_page_markdown(self, markdown, page, config, files):
to = self. pandoc_output_format # "markdown_strict", "gfm", "markdown-citations".
input_format = "md"
extra_args = []
# Add bibtex files.
# TODO: multiple bib files. Pandoc supports multiple "--bibliography" args,
# but I don't know yet how to get a list from the config.
extra_args.extend(["--bibliography", self.bib_path])
# Add CSL files.
if self.csl_path is not None:
extra_args.extend(["--csl", self.csl_path])
# Call Pandoc.
markdown = pypandoc.convert_text(markdown, to, input_format, extra_args)
return str(markdown)
def get_path(path, base_path):
if path is None:
return None
elif os.path.isabs(path):
return path
else:
return os.path.abspath(os.path.join(base_path, path))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | mkdocs_bibtex/plugin.py | alexvoronov/mkdocs-bibtex |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.contrib import admin
from notification.models import MobileDevice
class MobileDeviceAdmin(admin.ModelAdmin):
list_display = ['id', 'user', 'app', 'token', 'device_id', 'active']
list_filter = ['app', 'device_id', 'active']
search_fields = ['user__profile__name', 'user__username', 'app', 'token', 'device_id']
def delete_model(self, request, obj):
obj.deleted = True
obj.deleted_on = datetime.now()
obj.changed_by = request.user
super(MobileDeviceAdmin, self).delete_model(request, obj)
def has_delete_permission(self, request, obj=None):
if obj and obj.deleted:
return False
return True
def save_model(self, request, obj, form, change):
obj.changed_by = request.user
super(MobileDeviceAdmin, self).save_model(request, obj, form, change)
def get_readonly_fields(self, request, obj=None):
return ['deleted_on', 'changed_by', 'created_on', 'updated_on', 'deleted']
admin.site.register(MobileDevice, MobileDeviceAdmin)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | web/transiq/notification/admin.py | manibhushan05/transiq |
class call_if(object):
def __init__(self, cond):
self.condition = cond
def __call__(self, func):
def inner(*args, **kwargs):
if getattr(args[0], self.condition):
return func(*args, **kwargs)
else:
return None
return inner | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | toast/decorators/__init__.py | joshuaskelly/Toast |
import re
from functools import reduce
def completely_invalid_sum(defs, ticket):
invalid, s = False, 0
for val in ticket:
found = False
for ((x1, x2), (y1, y2)) in defs:
if (x1 <= val <= x2) or (y1 <= val <= y2):
found = True
break
if not found:
s += val
invalid = True
return invalid, s
def field_sets(defs, ticket):
valid_fields = []
for val in ticket:
s = set(range(len(defs)))
for (idx, ((x1, x2), (y1, y2))) in enumerate(defs):
if (not (x1 <= val <= x2)) and (not (y1 <= val <= y2)):
s.remove(idx)
assert len(s) > 0
valid_fields.append(s)
return valid_fields
if __name__ == "__main__":
with open("16.txt") as f:
defs_raw, raw_ticket, nearby = f.read().split("\n\n")
nearby = [[int(i) for i in l.split(",")] for l in nearby.strip().split("\n")[1:]]
defs = []
for f in defs_raw.strip().split("\n"):
m = re.match(".*: (\d+)-(\d+) or (\d+)-(\d+)", f)
if not m:
continue
defs.append(((int(m[1]), int(m[2])), (int(m[3]), int(m[4]))))
print(f"Part 1: {sum(completely_invalid_sum(defs, t)[1] for t in nearby)}")
nearby = [t for t in nearby if not completely_invalid_sum(defs, t)[0]]
valid_fields = [set(range(len(defs))) for _ in range(len(defs))]
for t in nearby:
for idx, fields in enumerate(field_sets(defs, t)):
valid_fields[idx] &= fields
field_map = dict()
while len(field_map) != len(defs):
idx, singular = next(
(idx, s) for idx, s in enumerate(valid_fields) if len(s) == 1
)
val = list(singular)[0]
for s in valid_fields:
s.discard(val)
field_map[val] = idx
ticket = [int(i) for i in raw_ticket.split("\n")[1].split(",")]
part2 = reduce((lambda x, y: x * y), [ticket[field_map[i]] for i in range(6)])
print(f"Part 2: {part2}")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | Day10-19/16.py | bcongdon/advent_of_code_2020 |
import numpy as np
fwhm_m = 2 * np.sqrt(2 * np.log(2))
def fwhm(sigma):
"""
Get full width at half maximum (FWHM) for a provided sigma /
standard deviation, assuming a Gaussian distribution.
"""
return fwhm_m * sigma
def gaussian(x_mean, x_std, shape):
return np.random.normal(x_mean, x_std, shape)
def truncated_gaussian(x_mean, x_std, x_min, shape):
"""
Sample from a normal distribution, but enforces a minimum value.
"""
return np.maximum(gaussian(x_mean, x_std, shape), x_min)
def chi2(x_mean, chi2_df, shape):
"""
Chi-squared distribution centered at a specific mean.
Parameters
----------
x_mean : float
chi2_df : int
Degrees of freedom for chi-squared
shape : list
Shape of output noise array
Returns
-------
dist : ndarray
Array of chi-squared noise
"""
return np.random.chisquare(df=chi2_df, size=shape) * x_mean / chi2_df | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | setigen/distributions.py | bbrzycki/setigen |
import sqlite3
import requests
from bs4 import BeautifulSoup
from datetime import datetime
conn = None
conn = sqlite3.connect("db/db_scrapper.db")
def showAll():
cur = conn.cursor()
cur.execute("SELECT * FROM LOG_TEST")
rows = cur.fetchall()
for row in rows:
print(row)
def returnLast():
cur = conn.cursor()
cur.execute("SELECT count(*) + 1 FROM LOG_TEST")
row = cur.fetchall()
return row
def insertRow(val1):
dateTimeObj = datetime.now()
cur = conn.cursor()
query = 'INSERT INTO LOG_TEST(LOG_TEXT,LOG_DATE) VALUES (?, ?)'
cur.execute(query,(val1,dateTimeObj))
conn.commit()
return cur.lastrowid
def rqsts():
URL = 'https://serieslan.com/las-aventuras-de-tom-sawyer'
page = requests.get(URL)
soup = BeautifulSoup(page.content,'html.parser')
print(soup)
if __name__ == "__main__":
rqsts()
""" getlast = insertRow("R1000")
print(getlast)
showAll() """
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | init.py | PabloCBX/Scrapper-Retail-CL |
import torch
from torch.optim.lr_scheduler import MultiStepLR
from theconf import Config as C
def adjust_learning_rate_resnet(optimizer):
"""
Sets the learning rate to the initial LR decayed by 10 on every predefined epochs
Ref: AutoAugment
"""
if C.get()['epoch'] == 90:
return MultiStepLR_HotFix(optimizer, [30, 60, 80])
elif C.get()['epoch'] == 270: # autoaugment
return MultiStepLR_HotFix(optimizer, [90, 180, 240])
else:
raise ValueError('invalid epoch=%d for resnet scheduler' % C.get()['epoch'])
class MultiStepLR_HotFix(MultiStepLR):
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
super(MultiStepLR_HotFix, self).__init__(optimizer, milestones, gamma, last_epoch)
self.milestones = list(milestones)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | FastAutoAugment/lr_scheduler.py | zherlock030/fast-autoaugment |
from flask import Blueprint, render_template
from macronizer_cores import db
# create error blueprint
errors = Blueprint('errors', __name__)
# SECTION - routes
# NOTE - app_errorhandler() is a method inherited from Blueprint that is equivalent to errorhandler() inherited from flask
@errors.app_errorhandler(404)
def page_not_found(e):
'''Handle 404 error'''
return render_template('errors/404.html'), 404
@errors.app_errorhandler(500)
def internal_server_error(e):
'''Handle 500 error'''
db.session.rollback()
return render_template('errors/500.html'), 500 | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true... | 3 | macronizer_cores/errors/routes.py | triethuynh2301/macronizer-project |
from django.conf import settings
from django.utils import timezone
from datetime import timedelta
from paypal.standard.models import ST_PP_COMPLETED
from paypal.standard.ipn.signals import (
valid_ipn_received,
invalid_ipn_received)
def show_me_the_money(sender, **kwargs):
instance = sender
# if instance.payment_status == ST_PP_COMPLETED:
# job = Job.objects.get(id=instance.job_id)
# expiration = timezone.now() + timedelta(days=settings.PREMIUM_DAYS)
# job.expired_date = expiration.date()
# job.save()
import pdb; pdb.set_trace()
def do_not_show_me_the_money(sender, **kwargs):
import pdb; pdb.set_trace()
valid_ipn_received.connect(show_me_the_money)
invalid_ipn_received.connect(do_not_show_me_the_money) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | work/jobs/hooks.py | ralphleyga/django-jobportal |
import os
from itertools import count
from pathlib import Path
from typing import cast
from fastapi import FastAPI
from piccolo.columns import Integer, Text
from piccolo.conf.apps import AppConfig, AppRegistry
from piccolo.engine import SQLiteEngine, engine_finder
from piccolo.table import Table
from pytest import fixture
from fastapi_pagination import LimitOffsetPage, Page, add_pagination
from fastapi_pagination.ext.piccolo import paginate
from ..base import BasePaginationTestCase
from ..utils import faker
_counter = count().__next__
os.environ["PICCOLO_CONF"] = __name__
class User(Table, tablename="users"):
id = Integer(default=_counter, primary_key=True)
name = Text(required=False, null=True)
@fixture(
scope="session",
params=[True, False],
ids=["model", "query"],
)
def query(request):
if request.param:
return User
else:
return User.select()
DB = SQLiteEngine()
APP_REGISTRY = AppRegistry()
APP_CONFIG = AppConfig(
app_name="example",
migrations_folder_path=None,
table_classes=[User],
)
@fixture(scope="session")
def database_url():
return "piccolo.sqlite"
@fixture(scope="session")
async def engine(database_url):
engine: SQLiteEngine = cast(SQLiteEngine, engine_finder())
p = Path(engine.path)
if p.exists():
os.remove(p)
await engine.prep_database()
await User.create_table().run()
@fixture(scope="session")
def app(query, engine, model_cls):
app = FastAPI()
@app.get("/default", response_model=Page[model_cls])
@app.get("/limit-offset", response_model=LimitOffsetPage[model_cls])
async def route():
return await paginate(query)
return add_pagination(app)
class TestPiccolo(BasePaginationTestCase):
@fixture(scope="class")
async def entities(self, query, client):
await User.insert(*(User(name=faker.name()) for _ in range(100))).run()
return await User.select()
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": ... | 3 | tests/ext/test_piccolo.py | liu-junyong/fastapi-pagination |
from tortoise import Tortoise
from loguru import logger
from app.core.config import DB_TYPE, DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DATABASE
DB_URL = f'{DB_TYPE}://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DATABASE}'
async def init():
"""初始化连接"""
logger.info(f'Connecting to database')
await Tortoise.init(
db_url=DB_URL,
modules={
'db': ['app.db.category', 'app.db.brand', 'app.db.store', 'app.db.product']
},
)
logger.info(f'Connection established')
await Tortoise.generate_schemas()
logger.info(f'Schema generated')
async def disconnect():
"""停止连接"""
logger.info('Closing connection to database')
await Tortoise.close_connections()
logger.info('Connection closed')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | app/db/database.py | Huangkai1008/market-admin |
from autoPyTorch.training.base_training import BaseBatchLossComputationTechnique
import numpy as np
from torch.autograd import Variable
import ConfigSpace
import torch
class Mixup(BaseBatchLossComputationTechnique):
def set_up(self, pipeline_config, hyperparameter_config, logger):
super(Mixup, self).set_up(pipeline_config, hyperparameter_config, logger)
self.alpha = hyperparameter_config["alpha"]
def prepare_batch_data(self, X_batch, y_batch):
'''Returns mixed inputs, pairs of targets, and lambda'''
if self.alpha > 0:
self.lam = np.random.beta(self.alpha, self.alpha)
else:
self.lam = 1
batch_size = X_batch.size()[0]
if X_batch.is_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
self.mixed_x = self.lam * X_batch + (1 - self.lam) * X_batch[index, :]
self.y_a, self.y_b = y_batch, y_batch[index]
def compute_batch_loss(self, loss_function, y_batch_pred):
# self.logger.debug("Computing batch loss with mixup")
result = self.lam * loss_function(y_batch_pred, Variable(self.y_a)) + \
(1 - self.lam) * loss_function(y_batch_pred, Variable(self.y_b))
self.lam = None
self.mixed_x = None
self.y_a = None
self.y_b = None
return result
@staticmethod
def get_hyperparameter_search_space(**pipeline_config):
cs = ConfigSpace.ConfigurationSpace()
cs.add_hyperparameter(ConfigSpace.hyperparameters.UniformFloatHyperparameter("alpha", lower=0, upper=1, default_value=1))
return cs | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | autoPyTorch/training/mixup.py | thomascherickal/Auto-PyTorch |
import sys
sys.path.append("..")
from CGPython import CodeGenerationTransformer
from CGPython.Commands import ModifyMethodCommand
from ast import ClassDef, FunctionDef, Name
class TrueStaticTransformer(CodeGenerationTransformer):
def Transform(self):
def func(cmd:ModifyMethodCommand, name:str):
return cmd.DecoratedBy("staticmethod").RemoveArg(name)
(self.Engine.Select(ClassDef())
.Select(FunctionDef())
.Using(lambda x: ([dec.id for dec in x.node.decorator_list if isinstance(dec,Name)],"decorators"))
.Where(lambda x:x.node.name[:2]!="__")
.Where(lambda x:not "staticmethod" in x.Get("decorators"))
.Where(lambda x:not "classmethod" in x.Get("decorators"))
.Where(lambda x:len(x.node.args.args)>0)
.Using(lambda x: (x.node.args.args[0].arg,"name"))
.Where(lambda x: len(x.Select(Name())
.Where(lambda x: x.parent.Get('name') == x.node.id).targets)==0)
.Execute(func)
) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | src/Transformers/TrueStaticTransformer.py | BlackBeard98/Code-Generation |
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
class OCR():
def __init__(self):
self.loaded_model = None
self.load_models()
def load_models(self):
self.loaded_model = load_model("digits.h5")
return
def prediction(self,image):
image = cv2.resize(image, (28, 28))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
predicted_val = self.loaded_model.predict(image,verbose=0).argmax(axis=1)[0]
return predicted_val
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | Sudoku Solver/Recognizer.py | Ch-V3nU/Projects |
from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
# decorators = [login_required, ]
# @method_decorator(decorators, name='dispatch')
class BenchmarkViewAppCGOne(TemplateView):
template_name = "benchmarking/app-x3-Z63/benchmarking-home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
# @method_decorator(decorators, name='dispatch')
class BenchmarkEndView(TemplateView):
template_name = "benchmarking/end-of-demo.html"
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | smpc_demo_platform/benchmarking/views.py | Safe-DEED/mpc-mock-up |
#!/usr/bin/env python3
import asyncio
import websockets
import json
import random
import time
import numpy as np
URI = "wss://api-proxy.auckland-cer.cloud.edu.au/dynamic_network_graph"
#URI = "ws://api-proxy.auckland-cer.cloud.edu.au:6789"
#URI = "ws://localhost:6789"
SESSION_ID = "STRESS_TEST"
connections = []
async def read_all(websocket):
try:
while True:
await asyncio.wait_for(websocket.recv(), 0)
except:
return
async def test():
start = time.time()
websocket = await websockets.connect(URI)
connections.append(websocket)
await websocket.send(json.dumps({
"action": "connect",
"session_id": SESSION_ID
}))
await websocket.send(json.dumps({
"session_id": SESSION_ID,
"action": "upsert_entry",
"entry": {
"id": random.randint(0, 100),
"donor": random.randint(0, 100),
"resourceType": "$",
"recipient": random.randint(0, 100)
}
}))
return time.time() - start
async def run_n_tests(n):
results = await asyncio.gather(*[test() for i in range(n)])
return results
async def main():
print("n_clients,t,wall_time")
start = time.time()
for i in range(100):
result = await run_n_tests(15)
result = np.mean(result)
print(f"{len(connections)},{result},{time.time() - start}")
for ws in connections:
await read_all(ws)
asyncio.get_event_loop().run_until_complete(main()) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | stress_test.py | UoA-eResearch/dynamic_network_graph |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklinkwan.endpoint import endpoint_data
class GetLocalConfigSyncTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'LinkWAN', '2019-03-01', 'GetLocalConfigSyncTask','linkwan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/GetLocalConfigSyncTaskRequest.py | yndu13/aliyun-openapi-python-sdk |
# Copyright 2019, The Emissions API Developers
# https://emissions-api.org
# This software is available under the terms of an MIT license.
# See LICENSE fore more information.
class RESTParamError(ValueError):
"""User-specific exception, used in :func:`~emissionsapi.utils.polygon_to_wkt`.
"""
pass
def bounding_box_to_wkt(lon1, lat1, lon2, lat2):
"""Convert a bounding box specified by its top-left and bottom-right
coordinates to a wkt string defining a polygon.
"""
return f'POLYGON(({lon1} {lat1},{lon1} {lat2},{lon2} {lat2},'\
f'{lon2} {lat1},{lon1} {lat1}))'
def polygon_to_wkt(polygon):
"""Converts a list of points to a WKT string defining a polygon.
:param polygon: List of values with every pair of values representing a
consecutive vertex of the polygon.
:type polygon: list
:return: WKT defining the polygon.
:rtype: str
"""
# check if element number is even
if len(polygon) % 2 != 0:
raise RESTParamError('Number of elements has to be even')
# check if polygon is closed
if polygon[-2:] != polygon[:2]:
# close polygon by adding the first lon/lat pair at the end of the list
polygon.extend(polygon[0:2])
# check if we have at least 3 (+1 to close the polygon) coordinate points
if len(polygon) < 8:
raise RESTParamError('At least 4 points are needed to define a '
'polygon')
# create list with x-y points as strings
points = []
for index in range(0, len(polygon), 2):
points.append(f'{polygon[index]} {polygon[index+1]}')
# return string with points, joined by ','
return f'POLYGON(({",".join(points)}))'
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | emissionsapi/utils.py | shaardie/emissions-api |
from . import resource
from botocore.exceptions import ClientError
class Elb(resource.Resource):
METRICS = {
"Latency": "レイテンシー",
"RequestCount": "リクエストカウント",
"HealthyHostCount": "正常EC2数",
"UnHealthyHostCount": "危険EC2数",
"HTTPCode_ELB_4XX": "HTTPレスポンスコード(4xx)",
"HTTPCode_ELB_5XX": "HTTPレスポンスコード(5xx)",
}
def __init__(self, region: str, resource_id: str):
super().__init__(region, resource_id)
self.dns_name = None
self.scheme = None
def serialize(self, aws=None):
res = super().serialize(aws)
return res
def describe(self, aws):
from backend.externals.elb import Elb, Elbv2
try:
return Elbv2(aws, self.region).describe_load_balancer(self.resource_id)
except ClientError:
return Elb(aws, self.region).describe_load_balancer(self.resource_id)
@staticmethod
def get_id_name():
return "LoadBalancerName"
@staticmethod
def get_service_name():
return "ELB"
@staticmethod
def get_instance_resource_name():
return 'elasticloadbalancing:loadbalancer'
@staticmethod
def convert_instance_arn(arn) -> str:
arn_parts = arn.split(":") # ["arn", "aws", "service", "region", "account_id", "id"]
resource_id = arn_parts[-1]
resource_id_parts = resource_id.split("/")
return resource_id_parts[2] if resource_id_parts[1] in ["app", "net"] else resource_id_parts[1]
@staticmethod
def get_namespace():
return "AWS/ELB"
@staticmethod
def get_metrics():
return Elb.METRICS.keys()
@staticmethod
def get_metrics_japanese(metrics: str):
return Elb.METRICS.get(metrics, metrics)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | backend/models/resource/elb.py | crosspower/naruko |
#!/usr/bin/env python
# -*- coding: utf-8
import os
def has_utility(cmd):
path = os.environ['PATH']
return any(os.access(os.path.join(p, cmd), os.X_OK) for p in path.split(os.pathsep))
def is_macos():
return os.uname()[0] == 'Darwin'
class Driver(object):
arch = "amd64"
@property
def name(self):
raise NotImplementedError("Subclass must set name")
@property
def arguments(self):
return "--vm-driver", self.name
class LinuxDriver(Driver):
os = "linux"
class MacDriver(Driver):
os = "darwin"
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | minikube/drivers/common.py | j-boivie/fiaas-deploy-daemon |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from abc import abstractmethod
from typing import Dict
class Backend:
"""Interface for backend"""
@abstractmethod
def run(self) -> Dict:
"""
Initiate a run.
Returns
-------
None
"""
pass # pragma: no cover
def delete(self) -> None:
"""
Delete a remote run.
Returns
-------
"""
pass # pragma: no cover
def watch(self) -> None:
"""
Stream logs from a remote run.
Returns
-------
None
"""
pass # pragma: no cover
def cancel(self) -> None:
"""
Cancel a remote run.
Returns
-------
None
"""
pass # pragma: no cover
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer"... | 3 | ads/opctl/backend/base.py | oracle/accelerated-data-science |
"""
---------
loader.py
---------
A minimal code to store data in MongoDB
"""
import csv
import json
from datetime import datetime
from pymongo import MongoClient
def load_orders():
"""Load orders sample data"""
client = MongoClient('localhost', 27017)
orders = client["orders"]
# insert customers data
customers = orders["customers"]
with open('customers.csv') as csvfile:
customers_data = list(csv.DictReader(csvfile))
_ = customers.insert_many(customers_data)
# insert items data
items_ordered = orders["items_ordered"]
with open('items_ordered.csv') as csvfile:
items_ordered_data = list(csv.DictReader(csvfile))
_ = items_ordered.insert_many(items_ordered_data)
def load_airbnb():
"""Load AirBnB sample data"""
client = MongoClient('localhost', 27017)
airbnb = client["airbnb"]
sample_data = airbnb["sample_data"]
with open("airbnb.json", "r") as f_in:
data = json.load(f_in)
for d in data:
for key, val in d.items():
if isinstance(val, dict):
if "$date" in val.keys():
d[key] = datetime.fromtimestamp(val["$date"] / 1000)
elif "$numberDecimal" in val.keys():
d[key] = val["$numberDecimal"]
try:
sample_data.insert(d)
except:
pass
def main():
"""The main script"""
load_airbnb()
load_orders()
if __name__ == "__main__":
main()
print("Done!")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | mongodb/assets/loader.py | Code360In/katacoda-scenarios-34 |
from unittest import TestCase
from Implementations.FastIntegersFromGit import FastIntegersFromGit
from Implementations.helpers.Helper import ListToPolynomial, toNumbers
from Implementations.FasterSubsetSum.RandomizedVariableLayers import RandomizedVariableExponentialLayers
from benchmarks.test_distributions import Distributions as dist
class Test(TestCase):
@classmethod
def setUp(cls):
cls.fasterSubset = RandomizedVariableExponentialLayers(False, 3, 'variable layers', 1)
def test_faster_sumset_base_returns_correct_sumset(self):
vals = [1, 15, 3, 8, 120, 290, 530, 420, 152, 320, 150, 190]
T = 11
sums = self.fasterSubset.fasterSubsetSum(vals, T, 0.2)
self.assertListEqual(sums, [0, 1, 3, 4, 8, 9, 11])
def test_color_coding_base_returns_correct_sumset(self):
vals = [1, 15, 3, 8, 120, 290, 530, 420, 152, 320, 150, 190]
T = 11
characteristic = ListToPolynomial(vals)
sums = self.fasterSubset.color_coding(characteristic, T, len(vals), 0.2)
self.assertListEqual(toNumbers(sums), [0, 1, 3, 4, 8, 9, 11])
def test_me(self):
delta = 0.0001
i = 20
a, T = dist.clusteredDistributionEven(i)
fast = self.fasterSubset.fasterSubsetSum(a, T, delta)
expertSolution = FastIntegersFromGit().run(a, T)
self.assertListEqual(fast, expertSolution)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/FasterSubsetSumTests/test_randomizedVariableLayers.py | joakiti/Benchmark-SubsetSums |
import requests
from . import FeedSource, _request_headers
# pylint: disable=no-member
class WorldCoinIndex(FeedSource): # Weighted average from WorldCoinIndex
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timeout = getattr(self, 'timeout', 15)
if not hasattr(self, 'api_key'):
raise Exception("WorldCoinIndex FeedSource requires 'api_key'.")
def _fetch(self):
feed = {}
for base in self.bases:
url = "https://www.worldcoinindex.com/apiservice/v2getmarkets?key={apikey}&fiat={base}"
response = requests.get(url=url.format(apikey=self.api_key, base=base),
headers=_request_headers, timeout=self.timeout)
result = response.json()['Markets']
for market in result:
for ticker in market:
(quote, returnedBase) = ticker['Label'].split('/')
if base == returnedBase and quote in self.quotes:
self.add_rate(feed, base, quote, ticker['Price'], ticker['Volume_24h'] / ticker['Price'])
return feed
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | bitshares_pricefeed/sources/worldcoinindex.py | bitshares/nbs-pricefeed |
from abc import abstractmethod
from wai.common.adams.imaging.locateobjects import LocatedObject
from ....core.component import ProcessorComponent
from ....core.stream import ThenFunction, DoneFunction
from ....core.stream.util import RequiresNoFinalisation
from ....domain.image.object_detection import ImageObjectDetectionDomainSpecifier
ObjectDetectionInstance = ImageObjectDetectionDomainSpecifier.instance_type()
class Coercion(
RequiresNoFinalisation,
ProcessorComponent[ObjectDetectionInstance, ObjectDetectionInstance]
):
"""
Base class for all coercions.
"""
def process_element(
self,
element: ObjectDetectionInstance,
then: ThenFunction[ObjectDetectionInstance],
done: DoneFunction
):
# Get the located objects from the instance
image_info, located_objects = element
# Process each located object
if located_objects is not None:
for located_object in located_objects:
self._process_located_object(located_object)
then(element)
@abstractmethod
def _process_located_object(self, located_object: LocatedObject):
"""
Handles the processing of individual located objects.
:param located_object: The located object to coerce.
"""
pass
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than class... | 3 | src/wai/annotations/isp/coercions/component/_Coercion.py | waikato-ufdl/wai-annotations-core |
import time
import threading
import random
from queue import Queue
from pool_workers import Pool
# Our logic to be performed Asynchronously.
def our_process(a):
t = threading.current_thread()
# just to semulate how mush time this logic is going to take to be done.
time.sleep(random.uniform(0, 3))
print(f'{t.getName()} is finished the task {a} ...')
# Our function to handle thrown exceptions from 'our_process' logic.
def execption_handler(thread_name, exception):
print(f'{thread_name}: {exception}')
# create a queue & pool.
q = Queue()
p = Pool(name='Pool_1', queue=q, max_workers=2, wait_queue=False, execption_handler=execption_handler)
# adding some tasks the the queue.
for i in range(10):
# task is a tuple of a function, args and kwargs.
our_task = (our_process, (i,), {})
q.put(our_task)
try:
# start the Pool
p.start()
# go back to the main thread from time to another to check the KeyboardInterrupt
while p.is_alive():
p.join(0.5)
except (KeyboardInterrupt, SystemExit):
# shutdown the pool by aborting its Workers/threads.
p.shutdown()
"""output result
Worker_1_Pool_1 is finished the task 1 ...
Worker_1_Pool_1 is finished the task 2 ...
Worker_0_Pool_1 is finished the task 0 ...
Worker_0_Pool_1 is finished the task 4 ...
Worker_0_Pool_1 is finished the task 5 ...
Worker_1_Pool_1 is finished the task 3 ...
Worker_0_Pool_1 is finished the task 6 ...
Worker_1_Pool_1 is finished the task 7 ...
Worker_0_Pool_1 is finished the task 8 ...
Worker_0_Pool_1: The Queue is empty.
Worker_1_Pool_1 is finished the task 9 ...
Worker_1_Pool_1: The Queue is empty.
Worker_0_Pool_1 is stopped
Worker_1_Pool_1 is stopped
Pool_1 is shutted down
""" | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | examples/example_1.py | medram/Pool_Workers |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import pytest
from PyInstaller import compat
from PyInstaller._shared_with_waf import _pyi_machine
def test_exec_command_subprocess_wrong_encoding_reports_nicely(capsys):
# Ensure a nice error message is printed if decoding the output of the subprocess fails.
# As `exec_python()` is used for running the progam, we can use a small Python script.
prog = """import sys; sys.stdout.buffer.write(b'dfadfadf\\xa0:::::')"""
with pytest.raises(UnicodeDecodeError):
compat.exec_python('-c', prog)
out, err = capsys.readouterr()
assert 'bytes around the offending' in err
# List every known platform.machine() or waf's ctx.env.DEST_CPU (in the bootloader/wscript file) output on Linux here.
@pytest.mark.parametrize(
"input, output", [
("x86_64", "intel"),
("x64", "intel"),
("i686", "intel"),
("i386", "intel"),
("x86", "intel"),
("armv5", "arm"),
("armv7h", "arm"),
("armv7a", "arm"),
("arm", "arm"),
("aarch64", "arm"),
("ppc64le", "ppc"),
("ppc64", "ppc"),
("ppc32le", "ppc"),
("powerpc", "ppc"),
("s390x", "s390x"),
("something-alien", "unknown"),
]
)
def test_linux_machine(input, output):
assert _pyi_machine(input, "Linux") == output
def test_non_linux_machine():
assert _pyi_machine("foo", "Darwin") is None
assert _pyi_machine("foo", "Windows") is None
assert _pyi_machine("foo", "FreeBSD") is None
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | tests/unit/test_compat.py | mathiascode/pyinstaller |
import unittest
from CSVReader import CSVReader, class_factory
class MyTestCase(unittest.TestCase):
def setUp(self):
self.csv_reader = CSVReader('/src/Unit Test Addition.csv')
def test_return_data_as_object(self):
num = self.csv_reader.return_data_as_object('number')
self.assertIsInstance(num, list)
test_class = class_factory('number', self.csv_reader.data[0])
for number in num:
self.assertEqual(number.__name__, test_class.__name__)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | src/CSVTest.py | cadibemma/Calculator |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
def _glu_old_input(draw):
dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))
axis = draw(st.integers(min_value=0, max_value=len(dims)))
# The axis dimension must be divisible by two
axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))
dims.insert(axis, axis_dim)
X = draw(hu.arrays(dims, np.float32, None))
return (X, axis)
class TestGlu(serial.SerializedTestCase):
@given(
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
return [Y]
op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis)
self.assertReferenceChecks(gc, op, [X], glu_ref)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | pytorch-frontend/caffe2/python/operator_test/glu_op_test.py | AndreasKaratzas/stonne |
import datetime
import os
from pathlib import Path
import attr
import orjson
from dis_snek.mixins.serialization import DictSerializationMixin
from storage.genius import Genius
from storage.nerf import Nerf
@attr.s(slots=True)
class Container(DictSerializationMixin):
nerf: Nerf = attr.ib(factory=dict, converter=Nerf.from_dict)
genius: Genius = attr.ib(factory=dict, converter=Genius.from_dict)
class JsonStorage:
def __init__(self, filename: str, backup_folder: str, max_backups=5):
self.filename = Path(filename)
self.backup_folder = Path(backup_folder)
self.max_backups = max_backups
self.container = None
self._init_data()
def _init_data(self):
if self.filename.is_file():
with open(self.filename, "r") as file:
data = orjson.loads(file.read())
self.container = Container.from_dict(data)
else:
self.container = Container()
self.backup_folder.mkdir(exist_ok=True)
def save(self):
self._save_file(self.filename)
def backup(self):
backup_filename = f"backup-{datetime.datetime.now().timestamp()}.json"
backup_path = self.backup_folder.joinpath(backup_filename)
self._save_file(backup_path)
backup_files = sorted(os.listdir(self.backup_folder), key=lambda file: os.path.getctime(self.backup_folder.joinpath(file).absolute()))
if len(backup_files) > self.max_backups:
os.remove(self.backup_folder.joinpath(backup_files[0]).absolute())
print("Backup done")
def _save_file(self, path):
with open(path, "wb") as file:
data = orjson.dumps(self.container.to_dict(), option=orjson.OPT_NON_STR_KEYS)
file.write(data)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | storage/storage.py | np-overflow/bytehackz-discord-bot |
from scripts.plugin_base import ArtefactPlugin
from scripts.ilapfuncs import logfunc, tsv
from scripts import artifact_report
class AdbHostsPlugin(ArtefactPlugin):
"""
"""
def __init__(self):
super().__init__()
self.author = 'Unknown'
self.author_email = ''
self.author_url = ''
self.category = 'ADB Hosts'
self.name = 'ADB Hosts'
self.description = ''
self.artefact_reference = '' # Description on what the artefact is.
self.path_filters = ['**/data/misc/adb/adb_keys'] # Collection of regex search filters to locate an artefact.
self.icon = 'terminal' # feathricon for report.
def _processor(self) -> bool:
data_list = []
file_found = str(self.files_found[0])
with open(file_found, 'r') as f:
user_and_host_list = [line.split(" ")[1].rstrip('\n').split('@', 1) for line in f]
data_list = user_and_host_list
if len(data_list) > 0:
data_headers = ('Username', 'Hostname')
artifact_report.GenerateHtmlReport(self, file_found, data_headers, data_list)
tsv(self.report_folder, data_headers, data_list, self.full_name())
else:
logfunc(f'No ADB Hosts file available')
return True
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | scripts/artifacts/adb_hosts.py | JamieSharpe/ALEAPP |
# coding: utf-8
"""
Galaxy 3.2 API (wip)
Galaxy 3.2 API (wip) # noqa: E501
The version of the OpenAPI document: 1.2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.tags_page import TagsPage # noqa: E501
from openapi_client.rest import ApiException
class TestTagsPage(unittest.TestCase):
"""TagsPage unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTagsPage(self):
"""Test TagsPage"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.tags_page.TagsPage() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | client_apis/python/test/test_tags_page.py | alikins/galaxy-api-swaggerhub |
from cms import models
def test_create_no_media(db):
"""
Test creating an info panel.
"""
models.InfoPanel.objects.create(
text="The quick brown fox jumped over the lazy dog.", title="No Media"
)
def test_ordering(info_panel_factory):
"""
Panels should be ordered by their ``order`` attribute.
"""
p1 = info_panel_factory(order=2)
p2 = info_panel_factory(order=3)
p3 = info_panel_factory(order=1)
assert list(models.InfoPanel.objects.all()) == [p3, p1, p2]
def test_repr():
"""
The representation of the panel should contain the information
necessary to reconstruct it.
"""
panel = models.InfoPanel(title="Test Panel")
expected = f"InfoPanel(id={repr(panel.id)}, title={repr(panel.title)})"
assert repr(panel) == expected
def test_str():
"""
Converting an info panel to a string should return the panel's
title.
"""
panel = models.InfoPanel(title="Test Panel")
assert str(panel) == panel.title
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | darksite/cms/test/models/test_info_panel_model.py | UNCDarkside/DarksiteAPI |
#!python3
#encoding:utf-8
from abc import ABCMeta, abstractmethod
import AGitHubUser
import BasicAuthenticationUser
class TwoFactorAuthenticationUser(BasicAuthenticationUser.BasicAuthenticationUser):
def __init__(self, username, password, secret):
super().__init__(username, password)
self.__secret = secret
def __GetOtp(self):
# self.__secretを使って算出する
return None
OneTimePassword = property(__GetOtp)
def CreateHeaders(self):
return {"X-GitHub-OTP": self.OneTimePassword}
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | TwoFactorAuthenticationUser.py | ytyaru/GitHubUser.201704101437 |
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test dclrcoind aborts if can't disconnect a block.
- Start a single node and generate 3 blocks.
- Delete the undo data.
- Mine a fork that requires disconnecting the tip.
- Verify that dclrcoind AbortNode's.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, get_datadir_path, connect_nodes
import os
class AbortNodeTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 240
def setup_network(self):
self.setup_nodes()
# We'll connect the nodes later
def run_test(self):
self.nodes[0].generate(3)
datadir = get_datadir_path(self.options.tmpdir, 0)
# Deleting the undo file will result in reorg failure
os.unlink(os.path.join(datadir, self.chain, 'blocks', 'rev00000.dat'))
# Connecting to a node with a more work chain will trigger a reorg
# attempt.
self.nodes[1].generate(3)
with self.nodes[0].assert_debug_log(["Failed to disconnect block"]):
connect_nodes(self.nodes[0], 1)
self.nodes[1].generate(1)
# Check that node0 aborted
self.log.info("Waiting for crash")
wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=200)
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
if __name__ == '__main__':
AbortNodeTest().main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | test/functional/feature_abortnode.py | DclrCoin/dclrcoin |
from django.test import TestCase
from .models import Editor,Pics,tags,Category,Location
class EditorTestClass(TestCase):
# Set up method
def setUp(self):
self.james = Editor(first_name = 'James', last_name ='Muriuki', email ='james@moringaschool.com')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.james,Editor))
# Testing Save Method
def test_save_method(self):
self.james.save_editor()
editors = Editor.objects.all()
self.assertTrue(len(editors) > 0)
class PicsTestClass(TestCase):
# Set up method
def setUp(self):
self.shoot = Pics(title = 'Learn', post = 'lets learn today', editor = 'shoot', category = 'study', location = 'Juja', tags = '#tusome', pub_date = '2019-12-16', cover = 'cover.png')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.shoot,Pics))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
... | 3 | pics/tests.py | AmosMwangi/bavilion |
# Scraper for California's First District Court of Appeal
# CourtID: calctapp_1st
# Court Short Name: Cal. Ct. App.
from juriscraper.opinions.united_states.state import cal
class Site(cal.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_code = "A"
self.division = "1st App. Dist."
self.url = self.build_url()
def _get_divisions(self):
return [self.division] * len(self.case_names)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | juriscraper/opinions/united_states/state/calctapp_1st.py | EvandoBlanco/juriscraper |
def solution(number): # O(N)
"""
Write a function to compute the fibonacci sequence value to the requested iteration.
>>> solution(3)
2
>>> solution(10)
55
>>> solution(20)
6765
"""
m = {
0: 0,
1: 1
} # O(1)
def run_sequence(n): # O(N)
if not isinstance(m.get(n), int): # O(1)
m[n] = run_sequence(n - 1) + run_sequence(n - 2) # O(N)
return m[n] # O(1)
return run_sequence(number) # O(N)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"... | 3 | python/recursion/fibonacci.py | suddi/coding-challenges |
from __future__ import print_function
import pytest
import six
import sys
from abc import ABCMeta, abstractmethod
from inspect import isabstract
class Foo(object):
pass
class Abstract:
__metaclass__ = ABCMeta
@abstractmethod
def foo(self):
pass
@six.add_metaclass(ABCMeta)
class AbstractSix:
@abstractmethod
def foo(self):
pass
@pytest.mark.skipif(
sys.version_info > (2, 7), reason="__metaclass__ is not read for Python 3.x"
)
def test_isabstract():
assert not isabstract(Foo)
assert isabstract(Abstract)
assert isabstract(AbstractSix)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | python/test_abstract_2.py | berquist/eg |
from tests.integration.util import (
create_client,
CREDENTIALS,
SANDBOX_INSTITUTION,
)
access_token = None
def setup_module(module):
client = create_client()
response = client.Item.create(
CREDENTIALS, SANDBOX_INSTITUTION, ['identity'])
global access_token
access_token = response['access_token']
def teardown_module(module):
client = create_client()
client.Item.remove(access_token)
def test_get():
client = create_client()
response = client.Identity.get(access_token)
assert response['identity'] is not None
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | tests/integration/test_identity.py | mattiskan/plaid-python |
class ClassE:
def __init__(self):
"""
This is ClassE, a class whose constructor has
no keyword arguments and which has a class
method with no keyword args
"""
pass
@classmethod
def from_string(cls):
return cls()
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | integration_tests/test-packages/python/testpkguno/testpkguno/ClassE.py | franklinen/doppel-cli |
from typing import Tuple
import pytest
from flake8_annotations.error_codes import Error
from testing.helpers import check_is_empty, check_is_not_empty, check_source
from testing.test_cases.overload_decorator_test_cases import (
OverloadDecoratorTestCase,
overload_decorator_test_cases,
)
class TestOverloadDecoratorErrorSuppression:
"""Test suppression of errors for the closing def of a `typing.overload` series."""
@pytest.fixture(
params=overload_decorator_test_cases.items(), ids=overload_decorator_test_cases.keys()
)
def yielded_errors(
self, request # noqa: ANN001
) -> Tuple[str, OverloadDecoratorTestCase, Tuple[Error]]:
"""
Build a fixture for the errors emitted from parsing `@overload` decorated test code.
Fixture provides a tuple of: test case name, its corresponding
`OverloadDecoratorTestCase` instance, and a tuple of the errors yielded by the
checker, which should be empty if the test case's `should_yield_error` is `False`.
To support decorator aliases, the `overload_decorators` param is optionally specified by the
test case. If none is explicitly set, the decorator list defaults to the checker's default.
"""
test_case_name, test_case = request.param
return (
test_case_name,
test_case,
tuple(check_source(test_case.src, overload_decorators=test_case.overload_decorators)),
)
def test_overload_decorator_error_suppression(
self, yielded_errors: Tuple[str, OverloadDecoratorTestCase, Tuple[Error]]
) -> None:
"""Test that no errors are yielded for the closing def of a `typing.overload` series."""
test_case_name, test_case, errors = yielded_errors
failure_msg = f"Check failed for case '{test_case_name}'"
if test_case.should_yield_error:
check_is_not_empty(errors, msg=failure_msg)
else:
check_is_empty(errors, msg=failure_msg)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"an... | 3 | testing/test_overload_decorator.py | python-discord/flake8-annotations |
"""
Read from the MLB Gameday API.
Base URL: https://statsapi.mlb.com/docs/#operation/stats
Hitter stat URL: https://statsapi.mlb.com/api/v1/stats?stats=season&group=hitting
"""
from typing import Dict, List
from schema.player import Player
from schema.team import Team
import requests
import utils
def get_top_hitter_stats() -> List[Player]:
"""
Pull from the MLB Gameday API for hitter stats.
todo: figure out how to get all players and not just the top 50
"""
url = utils.HITTER_STATS_URL
response = requests.get(url)
response_json: Dict = response.json()
splits_list = response_json['stats'][0]['splits']
players = []
for split in splits_list:
players.append(Player.from_splits_json(split))
return players
def get_team_info() -> List[Team]:
"""
Pull from the MLB Gameday API for teams. This will give you a comprehensive
list of all teams, hopefully we can use that to pull all stats for all
players on all teams.
"""
url = utils.TEAM_INFO_URL
response = requests.get(url)
response_json: Dict = response.json()
teams = response_json['teams']
parsed_teams = []
for team in teams:
parsed_teams.append(Team.from_json(team))
return parsed_teams
# print(response.text)
def get_hitter_stats_for_team_id(team_id: int, season: int, game_type: str) -> List[Player]:
"""
Get hitter stats for the provided team, season, and game type.
todo: this should def be combined with get_top_hitter_stats()
"""
url = utils.hitter_url_for_team(team_id, season, game_type)
response = requests.get(url)
response_json: Dict = response.json()
splits_list = response_json['stats'][0]['splits']
players = []
for split in splits_list:
players.append(Player.from_splits_json(split))
return players
def test():
print("here") | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | mlb-ml/api/gameday_api_handler.py | alhart2015/mlb-ml |
from .models import Shop
import logging
import requests
import time
logger = logging.getLogger('storelocator')
def update_shops():
limit = 2500
for shop in Shop.objects.filter(latitude=None, longitude=None)[:limit]:
location = "%s %s %s" % (shop.city, shop.postalcode, shop.street)
try:
json = reverse_geocoding(location)
except requests.HTTPError:
continue
geo = json['results'][0]['geometry']['location']
shop.latitude = geo['lat']
shop.longitude = geo['lng']
shop.save()
logger.debug('Saved lat & lon for: %s' % shop)
def reverse_geocoding(location):
url = "http://maps.googleapis.com/maps/api/geocode/json"
qs = "?address=%s&components=country:Germany&sensor=false" % location
combined = url+qs
attempts = 0
success = False
max_attempts = 3
while success != True and attempts < max_attempts:
response = requests.get(combined).json()
attempts += 1
status = response.get('status')
if status == "OVER_QUERY_LIMIT":
logger.debug('API Limit reached. Sleeping 2 seconds')
time.sleep(2)
continue
if status == "ZERO_RESULTS":
logger.debug("Zero results: %s" % location)
raise requests.HTTPError()
success = True
return response
if attempts == max_attempts:
logger.debug("Can't fetch geocoding for: %s" % location)
raise requests.HTTPError()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | storelocator/updaters/google.py | moccu/django-storelocator |
import itertools
import random
import networkx as nx
import sys
import pandas as pd
sys.setrecursionlimit(2000)
def prettyGenome(arr):
return '(' + ' '.join('{0:+d}'.format(_) for _ in arr) + ')'
def GreedySorting(genome):
length = len(genome)
res = []
for i in range(1, length+1):
try:
pos = genome.index(i)
except:
pos = genome.index(-i)
if pos==i-1 and genome[pos] > 0:
continue
if i==1:
part = genome[pos::-1]
else:
part = genome[pos:i-2:-1]
part = [-_ for _ in part]
genome[i-1:pos+1] = part
res.append(prettyGenome(genome))
if genome[i-1] < 0:
genome[i-1] *= -1
res.append(prettyGenome(genome))
return res
def main(infile, outfile):
# Read the input, but do something non-trivial instead of count the lines in the file
inp = [line.rstrip('\n') for line in infile]
print(inp)
output = GreedySorting([int(a) for a in inp[0][1:-1].split(' ')])
output = '\n'.join(output)
print(output)
# Write the output.
outfile.write(output) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | solutions/ba6a.py | RafikFarhad/Bioinformatics_Codes |
#!usr/bin/emv python3
# -*- coding: utf-8 -*-
# metaclass是创建类,所以必须从`type`类型派生
class ListMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value: self.append(value)
return type.__new__(cls, name, bases, attrs)
# 指示使用ListMetaclass来定制类
class MyList(list, metaclass=ListMetaclass):
pass
L = MyList()
L.add(1)
L.add(2)
L.add(3)
L.add('END')
print(L)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | py_codes/037metaclass.py | fusugz/lifeisshort |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test.py. This file doesnot import from __future__.
"""
class FakeClass:
"""This is a fake docstring for valid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
"""This doesn't do anything.
Args:
name: str. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple.
"""
yield (name, name)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | scripts/linters/test_files/invalid_python_three.py | yash10019coder/oppia |
import os
from minifw import config_default
class Dict(dict):
def __init__(self, names=(), values=(), **kwargs):
super().__init__(**kwargs)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError('Dict object has no attribute {}'.format(key))
def __setattr__(self, key, value):
self[key] = value
def merge(defaults, override):
r = dict()
for k, v in defaults.items():
if k in override:
if isinstance(v, dict):
r[k] = merge(v, override[k])
else:
r[k] = override[k]
else:
r[k] = v
return r
def to_dict(d):
obj = Dict()
for k, v in d.items():
obj[k] = to_dict(v) if isinstance(v, dict) else v
return obj
configs = config_default.configs
project_dir = os.path.split(os.getcwd())
try:
my_module = __import__('{}.config_override'.format(project_dir[1]), globals(), locals(), ['configs'])
except ImportError:
print('import error')
pass
else:
configs = merge(configs, my_module.configs)
configs = to_dict(configs)
__all__ = (configs, ) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | minifw/config.py | ResolveWang/minifw |
import numpy as np
def random(texture, num):
# idx = np.random.choice(texture.shape[0], num, replace=False) # 乱数を抽出するときに重複を許さない場合(ただし、サンプル数が少ないとエラーになりやすい)
idx = np.random.choice(texture.shape[0], num) # 乱数を抽出するときに重複を許す場合(ただし、サンプル数が少ない時でも安定)
return texture[idx]
def stat(texture, num):
pass
def hybrid(texture, num):
pass
method = {'random': random, 'STAT': stat, 'HybridIA': hybrid}
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | extract.py | akusumoto/sample_dash |
from typing import Dict, Optional
from ciphey.iface import Checker, Config, ParamSpec, registry
@registry.register
class HumanChecker(Checker[str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass
def check(self, text: str) -> Optional[str]:
with self._config().pause_spinner_handle():
response = input(f"Result {text.__repr__()} (y/N): ").lower()
if response == "y":
return ""
elif response in ("n", ""):
return None
else:
return self.check(text)
def getExpectedRuntime(self, text: str) -> float:
return 1 # About a second
def __init__(self, config: Config):
super().__init__(config)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | ciphey/basemods/Checkers/human.py | blackcat-917/Ciphey |
from http import HTTPStatus
from src.app.post.enum import PostStatus
from src.app.post.model import PostModel
from src.common.authorization import Authorizer
from src.common.decorator import api_response
from src.common.exceptions import (ExceptionHandler, ItemNotFoundException)
class GetService(object):
def __init__(self, path_param, user_group):
self.path_param = path_param
self.user_group = user_group
@api_response()
def execute(self):
try:
if Authorizer.is_admin(user_group=self.user_group):
return self._get_post_object()
return self._find_filtered_result()
except ItemNotFoundException as ex:
ExceptionHandler.handel_exception(exception=ex)
return HTTPStatus.NOT_FOUND
except PostModel.DoesNotExist as ex:
ExceptionHandler.handel_exception(exception=ex)
return HTTPStatus.NOT_FOUND
def _get_post_object(self, query_filter=None):
for item in PostModel.query(self.path_param, filter_condition=query_filter):
return item
raise ItemNotFoundException
def _find_filtered_result(self):
return self._get_post_object(
query_filter=PostModel.status.is_in(PostStatus.PUBLISHED.value))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | src/app/post/get.py | Thiqah-Lab/aws-serverless-skeleton |
import pytest
from peer_lending.users.models import User
from peer_lending.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | peer_lending/conftest.py | jamesreinhold/peerlending_starter |
from idunn.blocks.services_and_information import InternetAccessBlock
def test_internet_access_block():
internet_access_block = InternetAccessBlock.from_es({"properties": {"wifi": "no"}}, lang="en")
assert internet_access_block is None
def test_internet_access_block_ok():
internet_access_block = InternetAccessBlock.from_es(
{"properties": {"internet_access": "wlan"}}, lang="en"
)
assert internet_access_block == InternetAccessBlock(wifi=True)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | tests/test_internet_access.py | QwantResearch/idunn |
from flask import Flask, render_template, url_for, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
app.config['SQLALCHEMY_TRACK MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30))
password = db.Column(db.String(30))
@app.route('/', methods=['POST', 'GET'])
def login():
username = request.form['username']
password = request.form['password']
db.session.add(username)
db.session.add(password)
db.session.commit()
return render_template("index.html")
@app.route('/secret')
def secret():
return render_template("secret.html")
if __name__ == "__main__":
app.run(debug=True) | [
{
"point_num": 1,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | app.py | PrateekBing/fake-instagram-page |
from conftest import run_validator_for_test_file
def test_always_ok_for_empty_file():
errors = run_validator_for_test_file('empty.py')
assert not errors
errors = run_validator_for_test_file('empty.py', max_annotations_complexity=1)
assert not errors
def test_ok_for_unannotated_file():
errors = run_validator_for_test_file('unannotated.py', max_annotations_complexity=1)
assert not errors
def test_ok_for_dynamic_annotations_file():
errors = run_validator_for_test_file('dynamic_annotations.py')
assert len(errors) == 1
errors = run_validator_for_test_file('dynamic_annotations.py', max_annotations_complexity=1)
assert len(errors) == 2
def test_ok_for_string_annotations_file():
errors = run_validator_for_test_file('string_annotations.py')
assert len(errors) == 1
errors = run_validator_for_test_file('string_annotations.py', max_annotations_complexity=1)
assert len(errors) == 2
def test_validates_annotations_complexity_for_annassigments():
errors = run_validator_for_test_file('var_annotation.py')
assert len(errors) == 1
def test_ok_for_empty_tuple():
errors = run_validator_for_test_file('empty_tuple.py')
assert not errors
errors = run_validator_for_test_file('empty_tuple.py', max_annotations_complexity=1)
assert len(errors) == 1
errors = run_validator_for_test_file('empty_tuple.py', max_annotations_complexity=2)
assert not errors
def test_not_raises_errors_for_weird_annotations():
errors = run_validator_for_test_file('weird_annotations.py')
assert not errors
def test_ok_for_empty_string():
errors = run_validator_for_test_file('empty_string.py')
assert not errors
errors = run_validator_for_test_file('empty_string.py', max_annotations_complexity=1)
assert len(errors) == 2
errors = run_validator_for_test_file('empty_string.py', max_annotations_complexity=2)
assert not errors
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tests/test_annotations_complexity.py | michael-k/flake8-annotations-complexity |
from . import GIT
from . import functions
from . import root
from pathlib import Path
import datetime
import os
NONE, STAGED, CHANGED, UNTRACKED = 'none', 'staged', 'changed', 'untracked'
PREFIX = '_gitz_'
SAVE_FILE = Path('._gitz_save_.txt')
@root.run_in_root
def save(untracked=False, stash=True):
timestamp = datetime.datetime.now().strftime('%c')
def commit(flag, name):
try:
GIT.commit(flag, '%s%s: %s' % (PREFIX, name, timestamp))
except Exception:
pass
commit('-m', STAGED)
commit('-am', CHANGED)
if untracked:
GIT.add('.')
commit('-m', UNTRACKED)
state = functions.commit_id()
if stash:
with SAVE_FILE.open('w') as fp:
fp.write(state)
GIT.add(str(SAVE_FILE))
GIT.stash()
restore(state, clean=False)
return state, functions.message(state)
@root.run_in_root
def restore(state, clean=True):
if state == 'pop':
GIT.stash('pop')
if not SAVE_FILE.exists():
GIT.stash()
raise ValueError('Stash was not built with gitz-save')
with SAVE_FILE.open() as fp:
state = fp.read().strip()
os.remove(str(SAVE_FILE))
GIT.reset('--hard', state)
if clean:
GIT.clean('-f')
msg = functions.message('HEAD')
while msg.startswith(PREFIX):
msg = functions.message('HEAD~')
if msg.startswith(PREFIX):
GIT.reset('--mixed', 'HEAD~')
else:
GIT.reset('--soft', 'HEAD~')
return state, functions.message(state)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answ... | 3 | gitz/git/save.py | rec/gitz |
"""
exceptions
Created by: Martin Sicho
On: 7/23/20, 10:08 AM
"""
import json
import traceback
class GenUIException(Exception):
def __init__(self, original, *args, **kwargs):
super().__init__(*args)
self.original = original
def getData(self):
return ''
def __repr__(self):
return self.asJSON()
def asJSON(self):
return json.dumps({
"original" : str(type(self.original)) if self.original else '',
"current" : str(type(self)),
"reprOrig" : repr(self.original) if self.original else '',
"tracebacks" : {
"original" : traceback.extract_tb(self.original.__traceback__).format() if self.original else '',
"current" : traceback.extract_tb(self.__traceback__).format()
},
"messages" : {
"original" : [x for x in self.original.args] if self.original else [],
"current" : [x for x in self.args]
},
"data" : self.getData()
})
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | src/genui/utils/exceptions.py | Tontolda/genui |
"""
Code originates from: https://machinelearningmastery.com/a-gentle-introduction-to-normality-tests-in-python/
"""
from scipy.stats import shapiro, normaltest, anderson
"""
Shapiro-Wilk Test of Normality
The Shapiro-Wilk Test is more appropriate for small sample sizes (< 50 samples), but can also handle sample sizes as large as 2000.
The Shapiro-Wilk test is used as a numerical means of assessing normality.
"""
def run_shapiro_wilk_normality_test(data, alpha=0.05, print_results=True):
stat, p = shapiro(data)
if print_results:
print('Statistics=%.3f, p=%.3f' % (stat, p))
if p > alpha:
print('Sample looks Gaussian (fail to reject H0) at significance level ', alpha)
else:
print('Sample does not look Gaussian (reject H0) at significance level ', alpha)
return stat, p
def run_dagostino_pearson_test(data, alpha, print_results=True):
stat, p = normaltest(data)
if print_results:
print('Statistics=%.3f, p=%.3f' % (stat, p))
if p > alpha:
print('Sample looks Gaussian (fail to reject H0) at significance level ', alpha)
else:
print('Sample does not look Gaussian (reject H0) at significance level ', alpha)
return stat, p
def run_anderson_darling(data, print_results=True):
result = anderson(data)
print('Statistic: %.3f' % result.statistic)
if print_results:
for i in range(len(result.critical_values)):
sl, cv = result.significance_level[i], result.critical_values[i]
if result.statistic < result.critical_values[i]:
print('%.3f: %.3f, data looks normal (fail to reject H0)' % (sl, cv))
else:
print('%.3f: %.3f, data does not look normal (reject H0)' % (sl, cv))
return result
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | backend/stat/normality_tests.py | Julian-Theis/stat-kiste |
import os
import time
from multiprocessing import Pool # 首字母大写
def test(name):
print("[子进程-%s]PID=%d,PPID=%d" % (name, os.getpid(), os.getppid()))
time.sleep(1)
def main():
print("[父进程]PID=%d,PPID=%d" % (os.getpid(), os.getppid()))
p = Pool(5) # 设置最多5个进程(不设置就是CPU核数)
for i in range(10):
# 异步执行
p.apply_async(test, args=(i, )) # 同步用apply(如非必要不建议用)
p.close() # 关闭池,不再加入新任务
p.join() # 等待所有子进程执行完毕回收资源
print("over")
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | python/5.concurrent/PythonProcess/1.Process_Pool_SubProcess/2.pool.py | dunitian/BaseCode |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
__copyright__ = ('Copyright Amazon.com, Inc. or its affiliates. '
'All Rights Reserved.')
__version__ = '2.6.0'
__license__ = 'MIT-0'
__author__ = 'Akihiro Nakajima'
__url__ = 'https://github.com/aws-samples/siem-on-amazon-opensearch-service'
from functools import cached_property
from aws_lambda_powertools import Logger
from siem import FileFormatBase, utils
logger = Logger(child=True)
class FileFormatCsv(FileFormatBase):
@cached_property
def log_count(self):
# _log_count = len(self.rawdata.readlines())
return sum(1 for line in self.rawdata)
@property
def ignore_header_line_number(self):
# to exclude CSV Header
return 1
@cached_property
def _csv_header(self):
return self.rawdata.readlines()[0].strip().split()
def extract_log(self, start, end, logmeta={}):
start_index = start - 1
end_index = end
for logdata in self.rawdata.readlines()[start_index:end_index]:
lograw = logdata.strip()
logdict = self.convert_lograw_to_dict(lograw)
yield (lograw, logdict, logmeta)
def convert_lograw_to_dict(self, lograw, logconfig=None):
logdict = dict(zip(self._csv_header, lograw.split()))
logdict = utils.convert_keyname_to_safe_field(logdict)
return logdict
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | source/lambda/es_loader/siem/fileformat_csv.py | acsrujan/siem-on-amazon-opensearch-service |
import unittest
import os
import json
from app import *
class RegApiTest(unittest.TestCase):
def setUp(self):
self.app = api.app
self.client = self.app.test_client
def test_start(self):
result = self.client().post('/')
self.assertEqual(result.status_code, 200)
print(result.data)
self.assertEqual(result.json['result'], 'Welcome to Regression api!')
def test_train_svr(self):
result = self.client().post('/train_svr')
self.assertEqual(result.status_code, 200)
def test_train_rfr(self):
result = self.client().post('/train_rfr')
self.assertEqual(result.status_code, 200)
def test_train_lr(self):
result = self.client().post('/train_lr')
self.assertEqual(result.status_code, 200)
def test_train_br(self):
result = self.client().post('/train_br')
self.assertEqual(result.status_code, 200)
def test_train_dtr(self):
result = self.client().post('/train_dtr')
self.assertEqual(result.status_code, 200)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | test_api.py | sushmaakoju/regression-api |
from unittest import TestCase
from unittest.mock import patch
from django_mock_queries.query import MockModel
from bson import ObjectId
from mlplaygrounds.datasets.serializers.models import MLModelLiteSerializer
class TestMLModelLiteSerializer(TestCase):
def setUp(self):
self.valid_instance = MockModel(uid=ObjectId(),
name='test model',
user_id='test_user',
dataset_id='test_dataset',
algorithm='testalg')
self.expected_data = {
'uid': str(self.valid_instance.uid),
'name': self.valid_instance.name,
'algorithm': self.valid_instance.algorithm,
}
def test_serialize_instance(self):
serialized_data = MLModelLiteSerializer(self.valid_instance).data
self.assertDictEqual(serialized_data, self.expected_data)
def test_serialize_many(self):
expected_list = [self.expected_data, self.expected_data]
serialized_data = MLModelLiteSerializer(
[self.valid_instance, self.valid_instance], many=True).data
self.assertListEqual(serialized_data, expected_list)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | mlplaygrounds/datasets/tests/test_serializers/test_mlmodel_lite_serializer.py | rennym19/ml-playgrounds |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s" % (self.id, self.name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | model/group_data.py | AlexeyKozlov/python_training-master |
#!/usr/bin/env python
#
# Copyright (c) 2018 Alexandru Catrina <alex@codeissues.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
class Log(object):
"""Log wrapper. That's it...
"""
config = {
r"format": r"%(asctime)-15s %(message)s"
}
@classmethod
def configure(cls, verbose=False):
cls.config["level"] = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(**cls.config)
@classmethod
def info(cls, message: str):
logging.info(message)
@classmethod
def debug(cls, message: str):
logging.debug(message)
@classmethod
def warn(cls, message: str):
logging.warning(message)
@classmethod
def error(cls, message: str):
logging.error(message)
@classmethod
def fatal(cls, message: str):
cls.error(message)
raise SystemExit(message)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | python3/hap/log.py | lexndru/hap |
import numpy as np
import pandas as pd
import copy
import re
class PreProcess(object):
def __init__(self):
self.df = None
def _standardize_string(self, a_str):
"""Replace whitespace with underscore
remove non-alphanumeric characters
"""
if isinstance(a_str, str) or isinstance(a_str, unicode):
a_str = re.sub(r'\s+', '_', a_str)
a_str = re.sub(r'\W+', '_', a_str)
return a_str.lower()
else:
return ''
feature2categorizer = {
"market_id": _standardize_string,
# "store_id",
'store_primary_category': _standardize_string,
'order_protocol': _standardize_string
}
def _categorize_features(self):
if type(self.df) is dict:
pass
else:
columns_to_dummify = []
for feature in self.feature2categorizer.keys():
categorizer = self.feature2categorizer[feature]
if feature in self.df:
# first apply categorizer/replace
self.df.loc[:, feature] = self.df[feature].apply(lambda x: categorizer(self, x))
# add the column to be dummified
columns_to_dummify.append(feature)
self.df = pd.get_dummies(
self.df,
columns=columns_to_dummify).copy(deep=True)
def preprocess(self, df):
"""
Returns:
preprocess dataframe of features, model ready
"""
if df is None or len(df) == 0:
raise Exception("Dataframe in Preprocessing is not initilized")
else:
if type(df) is dict:
self.df = copy.deepcopy(df)
else:
self.df = df # this is for offline training, reference is OK
self._categorize_features()
return self.df
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | src/DoorDash/src/process.py | zhouwubai/kaggle |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: Chris Brasnett, University of Bristol, christopher.brasnett@bristol.ac.uk
"""
import numpy as np
from QIIDderivative import derivative
def nominator(F_x, F_y, F_z, F_xx, F_xy, F_yy, F_yz, F_zz, F_xz):
m = np.array([[F_xx, F_xy, F_xz, F_x],
[F_xy, F_yy, F_yz, F_y],
[F_xz, F_yz, F_zz, F_z],
[F_x, F_y, F_z, 0]])
d = np.linalg.det(m)
return d
def denominator(F_x,F_y, F_z):
g = np.array([F_x,F_y,F_z])
mag_g = np.linalg.norm(g)
return mag_g**4
def main(x, y, z, lamb):
vals = derivative(x, y, z, lamb)
n = nominator(vals[0],vals[1],vals[2],vals[3],vals[4],vals[5],vals[6],vals[7],vals[8])
d = denominator(vals[0],vals[1],vals[2])
K = -(n/d)
return K | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | QIIDcurvature.py | csbrasnett/lipid-md |
from handler.handler import Handler
from handler.char_handler import CharHandler
from handler.dot_handler import DotHandler
from handler.star_handler import StarHandler
from handler.abstract_handler import AbstractHandler
from match import Match
def make_pattern():
head = CharHandler()
c = CharHandler()
d = DotHandler()
t = CharHandler()
head.set_next(StarHandler()).set_next(CharHandler())
print(head)
user_pattern = "c.t"
user_target_string = "act"
print("pattern:{}, target_string:{}".format(user_pattern, user_target_string))
res = client_code(head)
print("final res:",res)
def client_code(handler: Handler) -> None:
"""
The client code is usually suited to work with a single handler. In most
cases, it is not even aware that the handler is part of a chain.
"""
user_pattern = "c*t"
user_target_string = "xxxxcat"
pattern_pos = 0
target_string_pos = 0
for index, ele in enumerate(user_target_string):
print(f"\nClient: Who wants a {ele}?")
result = handler.handle(pattern_pos, user_pattern, index, user_target_string)
print(result)
if result == -1 and index < len(user_target_string):
continue
else:
break
return result
if __name__ == "__main__":
#x = Match("c.t")
#x.find_first_ln("ffffffffffffffack")
make_pattern()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | client.py | melrick8196/string-matcher |
# auth0login/auth0backend.py
from urllib import request
from jose import jwt
from social_core.backends.oauth import BaseOAuth2
from accounts.models import UserProfile
class Auth0(BaseOAuth2):
"""Auth0 OAuth authentication backend"""
name = 'auth0'
SCOPE_SEPARATOR = ' '
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('picture', 'picture'),
('email', 'email')
]
def authorization_url(self):
return 'https://' + self.setting('DOMAIN') + '/authorize'
def access_token_url(self):
return 'https://' + self.setting('DOMAIN') + '/oauth/token'
def get_user_id(self, details, response):
"""Return current user id."""
print("is this using user ID?")
print("user id: {}".format(details['user_id']))
return details['user_id']
def get_user_details(self, response):
# Obtain JWT and the keys to validate the signature
id_token = response.get('id_token')
jwks = request.urlopen('https://' + self.setting('DOMAIN') + '/.well-known/jwks.json')
issuer = 'https://' + self.setting('DOMAIN') + '/'
audience = self.setting('KEY') # CLIENT_ID
payload = jwt.decode(id_token, jwks.read(), algorithms=['RS256'], audience=audience, issuer=issuer)
return {'username': payload['nickname'],
'first_name': payload['name'],
'picture': payload['picture'],
'user_id': payload['sub'],
'email': payload['email']}
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | auth0login/auth0backend.py | chop-dbhi/biorepo-portal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# filters.py
#
# Authors:
# - Mamadou CISSE <mciissee.@gmail.com>
#
from django.contrib.auth import get_user_model
from django_filters import rest_framework as filters
from django.db.models import Q
User = get_user_model()
class UserFilter(filters.FilterSet):
is_admin = filters.BooleanFilter(label='Admin', method='filter_is_admin')
class Meta:
model = User
fields = {
'is_editor': ['exact'],
}
def filter_is_admin(self, queryset, name, value):
if value:
return queryset.filter(
Q(is_staff=True) | Q(is_superuser=True)
)
return queryset.filter(
Q(is_staff=False) & Q(is_superuser=False)
)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | apps/pl_users/filters.py | PremierLangage/platon_assets |
import os
import time
from Data.parameters import Data
from filenames import file_extention
from get_dir import pwd
from reuse_func import GetData
class BlockwiseCsv():
def __init__(self, driver, year, month):
self.driver = driver
self.year = year.strip()
self.month = month.strip()
def click_download_icon_of_blocks(self):
cal = GetData()
files = file_extention()
cal.click_on_state(self.driver)
cal.page_loading(self.driver)
self.driver.find_element_by_id(Data.SAR_Blocks_btn).click()
cal.page_loading(self.driver)
time.sleep(5)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(5)
p = pwd()
self.filename = p.get_download_dir() +'/'+files.teacher_block_download()+cal.get_current_date()+".csv"
print(self.filename)
return os.path.isfile(self.filename)
def remove_csv(self):
os.remove(self.filename)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | tests/src/Teacher_Attendance/download_blockwise_csv.py | sreenivas8084/cQube |
from more.jinja2 import Jinja2App
class App(Jinja2App):
pass
@App.path(path="persons/{name}")
class Person:
def __init__(self, name):
self.name = name
@App.template_directory()
def get_template_dir():
return "templates"
@App.html(model=Person, template="person_inherit.jinja2")
def person_default(self, request):
return {"name": self.name}
class SubApp(App):
pass
@SubApp.template_directory()
def get_template_dir_override():
return "templates_override"
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answ... | 3 | more/jinja2/tests/fixtures/override_template_inheritance.py | sgaist/more.jinja2 |
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.ext.db import djangoforms
from google.appengine.api import users
import hfwwgDB
class SightingForm(djangoforms.ModelForm):
class Meta:
model = hfwwgDB.Sighting
exclude = ['which_user']
class SightingInputPage(webapp.RequestHandler):
def get(self):
html = template.render('templates/header.html', {'title': 'Report a Possible Sighting'})
html = html + template.render('templates/form_start.html', {})
html = html + str(SightingForm(auto_id=False))
html = html + template.render('templates/form_end.html', {'sub_title': 'Submit Sighting'})
html = html + template.render('templates/footer.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', SightingInputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | hfpy_code/chapter10/page372.py | leobarros/use_cabeca_python |
# delwin 2016
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from allauth.account.signals import user_logged_in, user_signed_up
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
# Create your models here.
class profile(models.Model):
name = models.CharField(max_length=120)
user = models.OneToOneField(settings.AUTH_USER_MODEL, null=True, blank=True)
descriptions = models.TextField(default = 'description default text')
def __unicode__(self):
return self.name
class userStripe(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
stripe_id = models.CharField(max_length=200, null=True, blank=True)
def __unicode__(self):
if self.stripe_id:
return str(self.stripe_id)
else:
return self.user.username
def stripeCallback(sender, request, user, **kwargs):
user_stripe_account, created = userStripe.objects.get_or_create(user=user)
if created:
print('created for %s'%(user.username))
if user_stripe_account.stripe_id is None or user_stripe_account.stripe_id =='':
new_stripe_id = stripe.Customer.create(email=user.email)
user_stripe_account.stripe_id = new_stripe_id['id']
user_stripe_account.save()
def profileCallback(sender, request, user, **kwargs):
userProfile, is_created = profile.objects.get_or_create(user=user)
if is_created:
userProfile.name = user.username
userProfile.save()
user_logged_in.connect(stripeCallback)
user_signed_up.connect(profileCallback)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | MODELS/sih/models.py | mladenangel/myprojects |
import pickle
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
acro = load_obj("acronymsDict")
# Spit out
# for a in acro.keys():
# print(a + " : " + acro[a])
acroLines = open("acroClean.txt","r").readlines()
acronymsDict = dict()
for a in acroLines:
k,v = a.split(" : ")
k,v = k.strip().lower(),v.strip().lower()
acronymsDict[k] = v
print(len(acronymsDict))
save_obj(acronymsDict,"acronymsDict")
print(acronymsDict["plz"])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | models/models_code/Acronyms/Clean.py | tpsatish95/SocialTextFilter |
# Helper class that stores all relevant information of a document
class document:
def __init__(self, id, externalid=0, title="", author="", publishingYear=0, journal="", terms=[], uri="" ):
self.id = id
self.externalid = externalid
self.title = title
self.author = author
self.publishingYear = str(publishingYear).strip()
self.journal = journal
self.terms = terms
self.terms = list(filter(None, self.terms))
self.uri = uri.rstrip()
self.color = -2
self.ende = False
self.nbClusters = []
self.fulltext = ""
def getTerms(self):
return self.terms
def appendNeighborCluster(self, color):
if color not in self.nbClusters:
self.nbClusters.append (color)
def extendNeighborCluster(self, colorList):
for i in colorList:
self.appendNeighborCluster(i)
def returnNeighborCluster(self):
return self.nbClusters
def setColor (self, color):
self.color = color
def getColor (self):
return self.color
def setEnde (self, end):
self.ende = end
def getEnde (self):
return self.ende
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excl... | 3 | src/DocClustering/data/document.py | jd-s/DocClustering |
#!/usr/bin/env python
# Copyright (c) 2021, Farid Rashidi Mehrabadi All rights reserved.
# ======================================================================================
# Author : Farid Rashidi Mehrabadi (farid.rashidimehrabadi@nih.gov)
# Last Update: Aug 14, 2020
# Description: cleaning
# ======================================================================================
import glob
def _is_ok(name):
file = open(name)
body = file.read()
file.close()
a = body.count("&& echo Done! )")
b = body.count("Done!\n")
if a == 0 and b == 1:
return True
else:
return a == b
def after01(config):
if config["isrna"]:
steps = [
"s01indexing",
"s02mapping",
"s03indexing",
"s04mapping",
"s05calling",
"s06jointcalling",
"s07merging",
"s08annotating",
"s09expressing",
"s10velocitying",
]
else:
steps = [
"s02mapping",
"s04mapping",
"s05calling",
"s06jointcalling",
"s07merging",
"s08annotating",
]
conds = {}
for cond in steps:
x = 0
for file in glob.glob(f"{config['tmpdir']}/log/{cond}/*.o"):
if not _is_ok(file):
x += 1
conds[cond] = x
print(conds)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | trisicell/commands/mcalling/z01status.py | faridrashidi/trisicell |
import torch
from mmdet3d.models.builder import build_voxel_encoder
def test_pillar_feature_net():
pillar_feature_net_cfg = dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[64],
with_distance=False,
voxel_size=(0.2, 0.2, 8),
point_cloud_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))
pillar_feature_net = build_voxel_encoder(pillar_feature_net_cfg)
features = torch.rand([97297, 20, 5])
num_voxels = torch.randint(1, 100, [97297])
coors = torch.randint(0, 100, [97297, 4])
features = pillar_feature_net(features, num_voxels, coors)
assert features.shape == torch.Size([97297, 64])
def test_hard_simple_VFE():
hard_simple_VFE_cfg = dict(type='HardSimpleVFE', num_features=5)
hard_simple_VFE = build_voxel_encoder(hard_simple_VFE_cfg)
features = torch.rand([240000, 10, 5])
num_voxels = torch.randint(1, 10, [240000])
outputs = hard_simple_VFE(features, num_voxels, None)
assert outputs.shape == torch.Size([240000, 5])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | tests/test_voxel_encoders.py | BB88Lee/mmdetection3d |
#!/usr/bin/env python3
import sys
from collections import defaultdict
def other(pair, x): return pair[0] if x == pair[1] else pair[1]
def search(m, avail, cur):
top = 0
for choice in m[cur]:
if choice not in avail: continue
avail.remove(choice)
val = search(m, avail, other(choice, cur)) + choice[0] + choice[1]
top = max(top, val)
avail.add(choice)
return top
def main(args):
data = [tuple(map(int, s.strip().split("/"))) for s in sys.stdin]
print(len(data), len(set(data)))
avail = set(data)
m = defaultdict(list)
for a in avail:
m[a[0]] += [a]
m[a[1]] += [a]
print(search(m, avail, 0))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | 2017/24a.py | msullivan/advent-of-code |
import sqlite3
import pandas as pd
conn = sqlite3.connect('demo_data.sqlite3')
curs = conn.cursor()
create_demo_table = """
CREATE TABLE demo (
s varchar(5),
x int,
y int
);"""
curs.execute(create_demo_table)
conn.commit()
curs.execute("""INSERT INTO demo (
s, x, y) VALUES""" + str(('g', 3, 9)))
conn.commit()
curs.execute("""INSERT INTO demo (
s, x, y) VALUES""" + str(('v', 5, 7)))
conn.commit()
curs.execute("""INSERT INTO demo (
s, x, y) VALUES""" + str(('f', 8, 7)))
conn.commit()
# Queries for SC questions
# Count how many rows you have - it should be 3!
def row_count():
print(pd.read_sql_query("""SELECT COUNT(*) as row_count
FROM demo;""", conn))
# row_count
# 0 3
# How many rows are there where both x and y are at least 5?
def row_xy5():
print(pd.read_sql_query("""SELECT COUNT(*) as row_count
FROM demo
WHERE x >= 5
AND y >= 5;""", conn))
# row_count
# 0 2
# How many unique values of y are there (hint - COUNT() can accept
# a keyword DISTINCT)?
def y_values():
print(pd.read_sql_query("""SELECT COUNT(distinct y) as y_values
FROM demo;""", conn))
# y_values
# 0 2
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | demo_data.py | krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases |
from datetime import datetime
import unittest
from unittest.mock import MagicMock
import numpy as np
from pyhsi.cameras import BaslerCamera
class MockGrab:
def __init__(self, data):
self.Array = data
def GrabSucceeded(self):
return True
def Release(self):
pass
class TestBaslerCamera(unittest.TestCase):
def setUp(self):
self.mock_device = MagicMock()
self.mock_stage = MagicMock()
self.mock_stage.default_velocity = 20
self.cam = BaslerCamera(device=self.mock_device)
def test_capture(self):
self.mock_device.RetrieveResult = MagicMock(side_effect=[
MockGrab([[0, 12], [3, 100]]),
MockGrab([[9, 8], [31, 5]])
])
self.mock_stage.is_moving = MagicMock(side_effect=[True, True, False])
data = self.cam.capture(self.mock_stage, [0, 100])
target = np.array([[[12, 100], [0, 3]], [[8, 5], [9, 31]]])
np.testing.assert_array_equal(data, target)
def test_file_name_basic(self):
fn = "test_sample"
out = self.cam._process_file_name(fn, datetime(2020, 6, 20),
0, 100, 10, (227, 300, 400))
self.assertEqual(out, "test_sample.hdr")
def test_file_name_fields(self):
fn = "sample_{date}_{time}_exp={exp}_{frames}_frames"
out = self.cam._process_file_name(fn, datetime(2020, 6, 20, 13, 40),
0, 100, 10, (227, 300, 400))
target = "sample_2020-06-20_13:40:00_exp=4000_227_frames.hdr"
self.assertEqual(out, target)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | test/test_cameras.py | rddunphy/pyHSI |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The CounosH Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_framework import CounosHTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(CounosHTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-addresstype=legacy"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
expected_signature = 'INbVnW4e6PeRmsv2Qgu8NuopvrVjkcxob+sX8OcZG0SALhWybUjzMLPdAsXI46YZGb0KQTRii+wWIQzRpG/U+S0='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert self.nodes[0].verifymessage(address, signature, message)
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert self.nodes[0].verifymessage(address, signature, message)
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert not self.nodes[0].verifymessage(other_address, signature, message)
assert not self.nodes[0].verifymessage(address, other_signature, message)
if __name__ == '__main__':
SignMessagesTest().main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | test/functional/rpc_signmessage.py | CounosH/cch |
class RSVPRouter(object):
"""
A router to control all database operations on models in the
rsvp application.
"""
apps = ["rsvp"]
using = "rsvp_db"
def db_for_read(self, model, **hints):
if model._meta.app_label in self.apps:
return self.using
return None
def db_for_write(self, model, **hints):
if model._meta.app_label in self.apps:
return self.using
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the app is involved.
"""
if obj1._meta.app_label in self.apps or obj2._meta.app_label in self.apps:
return True
return None
def allow_syncdb(self, db, model):
"""Make sure the apps we care about appear in the db"""
if model._meta.app_label in ['south']:
return True
if db == self.using:
return model._meta.app_label in self.apps
elif model._meta.app_label in self.apps:
return False
return None
def allow_migrate(self, db, model):
if db == self.using:
return model._meta.app_label in self.apps
elif model._meta.app_label in self.apps:
return False
return None
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 li... | 3 | kyleandemily/rsvp/db_router.py | ehayne/KyleAndEmily |
import random, copy
import cv2 as cv
from .augmenter import Augmenter
class Rotator(Augmenter):
'''
Augmenter that rotates the SampleImages randomly based on
the min_angle and max_angle parameters.
'''
def __init__(
self,
min_angle,
max_angle,
**kwargs
):
super().__init__(**kwargs)
self.min_angle = min_angle
self.max_angle = max_angle
def augment(self, sample):
im_h, im_w, _ = sample.image.shape
angle = random.uniform(self.min_angle, self.max_angle)
rotation_matrix = cv.getRotationMatrix2D(sample.roi_center, angle, 1)
rotated = cv.warpAffine(sample.image, rotation_matrix, (im_w, im_h))
sample_copy = copy.copy(sample)
sample_copy.image = rotated
return sample_copy
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | jakso_ml/training_data/rotator.py | JaksoSoftware/jakso-ml |
import numpy as np
import time
import cv2
import mss
def shot(height, width):
with mss.mss() as sct:
img = np.array(
sct.grab(
{'top': 0,
'left': 0,
'width': width,
'height': height}
)
)[:, :, :3]
return img
def record_screen(signal, fname, width, height, frame_rate=24.0):
video = cv2.VideoWriter(fname, cv2.VideoWriter_fourcc(*'MJPG'), frame_rate, (width, height), True)
while signal.value == 1:
video.write(shot(height, width))
video.release()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | utils/record_screen.py | Sindy98/spc2 |
#Perform Edge Detection using Roberts Cross Gradient & Sobel Operators over an Image
import cv2
import math
import numpy as np
def robertCrossGradient(image):
#Objective: Performing Robert Cross Gradient Edge Detection over an Image
#Input: Original Image
#Output: Resultant Image
#Robert Cross Operator
# x 0 1
# -1 0
# y 1 0
# 0 -1
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
resultant_image = image.copy()
for i in range(0,image.shape[0]-1):
for j in range(0,image.shape[1]-1):
gx = image[i, j+1] - image[i+1, j]
gy = image[i, j] - image[i+1, j+1]
resultant_image[i, j] = math.sqrt(gx*gx + gy*gy)
return resultant_image
def sobelOperator(image):
#Objective: Performing Sobel Edge Detection over an Image
#Input: Original Image
#Output: Resultant Image
#Sobel Operator
# x -1 -2 -1
# 0 0 0
# 1 2 1
#y -1 0 1
# -2 0 2
# -1 0 1
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
resultant_image = image.copy()
#Applying Padding
rows,cols = image.shape
image = np.insert(image,0,0,axis=0) #top
image = np.insert(image,rows+1,0,axis=0) #bottom
image = np.insert(image,0,0,axis=1) #left
image = np.insert(image,cols+1,0,axis=1) #right
for i in range(1, image.shape[0]-1):
for j in range(1, image.shape[1]-1):
fx = image[i+1, j-1] + 2*image[i+1, j] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i-1, j] - image[i+1, j-1]
fy = image[i-1, j+1] + 2*image[i, j+1] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i, j-1] - image[i+1, j-1]
resultant_image[i-1, j-1] = math.sqrt(fx*fx + fy*fy)
return resultant_image
img = cv2.imread('image5.jpg')
output = sobelOperator(img)
cv2.imshow('image',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | edgeDetection.py | krishna1401/Digital-Image-Processing |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.