source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
class Solution(object):
def backspaceCompare(self, S1, S2):
r1 = len(S1) - 1
r2 = len (S2) - 1
while r1 >= 0 or r2 >= 0:
char1 = char2 = ""
if r1 >= 0:
char1, r1 = self.getChar(S1, r1)
if r2 >= 0:
char2, r2 = self.getChar(S2, r2)
print(char1, r1, char2, r2)
if char1 != char2:
return False
return True
def getChar(self, s , r):
char, count = '', 0
while r >= 0 and not char:
if s[r] == '#':
count += 1
elif count == 0:
char = s[r]
else:
count -= 1
r -= 1
return char, r
S = "ab#c"
T = "add##c"
print(Solution().backspaceCompare(S, T)) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | stack/0844_backspace_string_compare/0844_backspace_string_compare.py | zdyxry/LeetCode |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import nd, relay
from tvm.runtime import container as _container
def test_adt_constructor():
arr = nd.array([1, 2, 3])
fields = [arr, arr]
y = _container.ADT(0, [arr, arr])
assert len(y) == 2
assert isinstance(y, _container.ADT)
y[0:1][-1] == arr
assert y.tag == 0
assert isinstance(arr, nd.NDArray)
def test_tuple_object():
x = relay.var(
'x',
type_annotation=relay.ty.TupleType([
relay.ty.TensorType((), 'int32'),
relay.ty.TensorType((), 'int32')
]))
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = tvm.IRModule.from_expr(fn)
exe = relay.create_executor(
kind="vm", mod=mod, ctx=nd.cpu(), target="llvm")
f = exe.evaluate()
value_tuple = _container.tuple_object(
[nd.array(np.array(11)),
nd.array(np.array(12))])
# pass an ADT object to evaluate
out = f(value_tuple)
tvm.testing.assert_allclose(out.asnumpy(), np.array(11))
if __name__ == "__main__":
test_adt_constructor()
test_tuple_object()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/python/unittest/test_container.py | jheo4/incubator-tvm |
"""new fields in user moodel
Revision ID: f1578ff17ae1
Revises: bda639e5aafd
Create Date: 2021-01-11 10:01:54.417977
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1578ff17ae1'
down_revision = 'bda639e5aafd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | migrations/versions/f1578ff17ae1_new_fields_in_user_moodel.py | aboladebaba/flaskTut |
import tensorflow as tf
class MockModel(tf.keras.Model):
"""
A Mock keras model to test basic tester functionality.
This model only has one variable: a weight matrix of shape 2x1.
This model accepts 2-dimensional input data and outputs 1-d data
"""
def __init__(self):
super(MockModel, self).__init__()
self.weight = tf.Variable(tf.ones((2, 1)), dtype=tf.float32)
def call(self, input_data):
return tf.linalg.matmul(input_data, self.weight)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | test/utils.py | TTitcombe/tfShell2 |
# Python program for implementation of heap Sort
# To heapify subtree rooted at index i.
# n is size of heap
def heapify(arr, n, i):
largest = i # Initialize largest as root
l = 2 * i + 1 # left = 2*i + 1
r = 2 * i + 2 # right = 2*i + 2
# See if left child of root exists and is
# greater than root
if l < n and arr[largest] < arr[l]:
largest = l
# See if right child of root exists and is
# greater than root
if r < n and arr[largest] < arr[r]:
largest = r
# Change root, if needed
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i] # swap
# Heapify the root.
heapify(arr, n, largest)
# The main function to sort an array of given size
def heapSort(arr):
n = len(arr)
# Build a maxheap.
for i in range(n//2 - 1, -1, -1):
heapify(arr, n, i)
# One by one extract elements
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, i, 0)
# Driver code
arr = [12, 11, 13, 5, 6, 7]
heapSort(arr)
n = len(arr)
print("Sorted array is")
for i in range(n):
print("%d" % arr[i]),
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | Python/HeapSort.py | Mario263/Hacktoberfest_2021 |
'''
Created on Sep 3, 2012
@author: Daniel J. Rivers
'''
from DataAccess.TableData import TableData
from DataAccess.TableHandler import TableHandler
class EpisodeHandler( TableHandler ):
pass
class Episode( TableData ):
def __init__( self ):
self.columnNames = [ ( "SEASON_ID", "INTEGER" ), ( "EPISODE_NUM", "INTEGER" ), ( "FILE", "TEXT" ), ( "TOD", "TEXT" ) ]
self.tableName = "EPISODE"
self.where = 1
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | FileInventory/DataAccess/Tables/EpisodeHandler.py | ErebusMaligan/python |
from djangobench.utils import run_benchmark
def benchmark():
global Book
Book.objects.all().delete()
def setup():
global Book
from query_delete_related.models import Book, Chapter
b1 = Book.objects.create(title='hi')
b2 = Book.objects.create(title='hi')
b3 = Book.objects.create(title='hi')
for i in range(0, 5):
Chapter.objects.create(book=b1, title='chapter%d' % i)
Chapter.objects.create(book=b2, title='chapter%d' % i)
Chapter.objects.create(book=b3, title='chapter%d' % i)
run_benchmark(
benchmark,
meta={
'description': 'Delete an object via QuerySet.delete(), '
'objects deleted have related objects.',
},
setup=setup
)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | djangobench/benchmarks/query_delete_related/benchmark.py | smithdc1/djangobench |
import numpy as np
__all__ = ["binomial"]
def binomial(chr1, chr2):
"""
Picks one allele or the other with 50% success
:type chr1: Sequence
:type chr2: Sequence
"""
if len(chr1) != len(chr2):
raise ValueError("Incompatible chromosome lengths")
choice_mask = np.random.binomial(1, 0.5, len(chr1))
return [a if ch else b for (ch, a, b) in zip(choice_mask, chr1, chr2)]
def breakpoint(chr1, chr2, rate):
raise NotImplemented
if __name__ == "__main__":
raise RuntimeError
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | genetic/recombination/_recombination.py | skoblov-lab/genetic |
import sys
if sys.version_info[0] >= 3:
INT_TYPES = (int,)
else:
INT_TYPES = (int, long)
def pow2_check (n):
return n > 0 and (n & (n - 1)) == 0
def pow2_round_down (n, p):
assert pow2_check(p)
return n & ~(p - 1)
def pow2_round_up (n, p):
assert pow2_check(p)
return (n + p - 1) & ~(p - 1)
def hex (n, prefix='0x', suffix=''):
return '{}{:X}{}'.format(prefix, n, suffix)
def log2_ceil (n):
p = 0
while n > (1 << p): p += 1
return p
def hex_items (l, sep = ', ', prefix='', suffix='', item_prefix='0x', item_suffix=''):
return ''.join((prefix, sep.join(hex(n, prefix=item_prefix, suffix=item_suffix) for n in l), suffix))
def u8_hex (n):
return '{:02X}'.format(n)
def u16_hex (n):
return '{:04X}'.format(n)
def u32_hex (n):
return '{:04X}'.format(n)
def u64_hex (n):
return '{:04X}'.format(n)
def u8_in_range (n):
return n >= 0 and n < 0x100
def u8_trunc (n):
return n & 0xFF
def u8_add (a, b):
return (a + b) & 0xFF
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a t... | 3 | zlx/int.py | icostin/zlx-py |
from __future__ import absolute_import, division, print_function
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import EmpiricalMarginal, TracePredictive
from pyro.infer.mcmc import MCMC, NUTS
from tests.common import assert_equal
def model(num_trials):
phi_prior = dist.Uniform(num_trials.new_tensor(0.), num_trials.new_tensor(1.))\
.expand_by([num_trials.shape[0]])
success_prob = pyro.sample("phi", phi_prior)
return pyro.sample("obs", dist.Binomial(num_trials, success_prob))
def test_posterior_predictive():
true_probs = torch.ones(5) * 0.7
num_trials = torch.ones(5) * 1000
num_success = dist.Binomial(num_trials, true_probs).sample()
conditioned_model = poutine.condition(model, data={"obs": num_success})
nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
mcmc_run = MCMC(nuts_kernel, num_samples=1000, warmup_steps=200).run(num_trials)
posterior_predictive = TracePredictive(model, mcmc_run, num_samples=10000).run(num_trials)
marginal_return_vals = EmpiricalMarginal(posterior_predictive)
assert_equal(marginal_return_vals.mean, torch.ones(5) * 700, prec=30)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/infer/test_abstract_infer.py | neerajprad/pyro |
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "djsniper.com",
"name": "djsniper",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | djsniper/contrib/sites/migrations/0003_set_site_domain_and_name.py | justdjango/django-nft-sniper |
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets
from tqdm import tqdm
from pixelsnail import PixelSNAIL
def train(epoch, loader, model, optimizer, device):
loader = tqdm(loader)
criterion = nn.CrossEntropyLoss()
for i, (img, label) in enumerate(loader):
model.zero_grad()
img = img.to(device)
out = model(img)
loss = criterion(out, img)
loss.backward()
optimizer.step()
_, pred = out.max(1)
correct = (pred == img).float()
accuracy = correct.sum() / img.numel()
loader.set_description(
(f'epoch: {epoch + 1}; loss: {loss.item():.5f}; ' f'acc: {accuracy:.5f}')
)
class PixelTransform:
def __init__(self):
pass
def __call__(self, input):
ar = np.array(input)
return torch.from_numpy(ar).long()
if __name__ == '__main__':
device = 'cuda'
epoch = 10
dataset = datasets.MNIST('.', transform=PixelTransform(), download=True)
loader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=4)
model = PixelSNAIL([28, 28], 256, 128, 5, 2, 4, 128)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for i in range(10):
train(i, loader, model, optimizer, device)
torch.save(model.state_dict(), f'checkpoint/mnist_{str(i + 1).zfill(3)}.pt')
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | pixelsnail_mnist.py | sajjad2014/vq-vae-2-pytorch |
# coding: utf-8
"""
Keras target formatters.
"""
__all__ = ["KerasModelFormatter", "TFKerasModelFormatter"]
from law.target.formatter import Formatter
from law.target.file import get_path
class ModelFormatter(Formatter):
@classmethod
def accepts(cls, path):
return get_path(path).endswith(".h5")
@classmethod
def dump(cls, path, model, *args, **kwargs):
model.save(path, *args, **kwargs)
class KerasModelFormatter(ModelFormatter):
name = "keras"
@classmethod
def load(cls, path, *args, **kwargs):
from keras.models import load_model
return load_model(path, *args, **kwargs)
class TFKerasModelFormatter(ModelFormatter):
name = "tf_keras"
@classmethod
def load(cls, path, *args, **kwargs):
from tensorflow import keras
return keras.models.load_model(path, *args, **kwargs)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | law/contrib/keras/formatter.py | b-fontana/law |
from math import sin, cos, radians
def func_args_unpack(func, args):
return func(*args)
def get_len(iterable, total):
try:
length = iterable.__len__()
except AttributeError:
length = total
return length
def cpu_bench(number):
product = 1.0
for elem in range(number):
angle = radians(elem)
product *= sin(angle)**2 + cos(angle)**2
return product
def fibonacci(number):
if number <= 1:
return number
else:
return fibonacci(number-2) + fibonacci(number-1)
def iterate_by_pack(iterable, pack_size: int = 1):
if pack_size < 1:
raise ValueError("pack_size must be greater than 0")
iterator = iter(iterable)
sentinel = object()
item = None
while item is not sentinel:
pack = []
for _ in range(pack_size):
item = next(iterator, sentinel)
if item is sentinel:
break
pack.append(item)
if pack:
yield pack
def get_packs_count(array, pack_size):
total, extra = divmod(len(array), pack_size)
if extra:
total += 1
return total
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | parallelbar/tools.py | dubovikmaster/parallelbar |
import os
import json
import requests
from datetime import datetime, date
class Common:
def __init__(self):
pass
def make_json_request(self,uri:str,body:dict = None):
if body is None:
response = requests.get(uri)
return response.json()
else:
response = requests.post(url=uri,data = body)
return response.json
def get_secret_info(self,key:str):
try:
jsonpath = os.path.abspath('./parameters/secrets.json')
with open(jsonpath,'r') as json_file:
data = json.load(json_file)
return data[key]
except:
raise Exception(f'Failure on Config load')
pass
def get_day_of_year(self):
return datetime.now().timetuple().tm_yday
def get_current_time(self):
return datetime.now().strftime("%H:%M:%S")
def post_discord_webhook(self,message,username=None) -> None:
webhook = {"content": message}
if username is not None:
webhook['username'] = username
webhook_url = self.get_secret_info("discord")['webhook']
requests.post(url=webhook_url,data=webhook)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | app/modules/common.py | Rabbit994/NoVA-Stonk-Bot |
from sqlalchemy import and_, exists, func
from sqlalchemy.orm import Session
from src.customer.domain.entities import Customer
class SQLACustomerRepository:
def __init__(self, sqla_session: Session):
self.sqla_session = sqla_session
def get_by_id(self, customer_id):
return (
self.sqla_session.query(Customer)
.filter(Customer.id == customer_id)
.one_or_none()
)
def get_by_email(self, email):
return (
self.sqla_session.query(Customer)
.filter(func.lower(Customer.email) == func.lower(email))
.one_or_none()
)
def has_customer_with_email(self, email, customer_id=None):
if customer_id:
stmt = exists().where(
and_(
func.lower(Customer.email) == func.lower(email),
Customer.id != customer_id,
)
)
else:
email_equal = func.lower(Customer.email) == func.lower(email)
stmt = exists().where(email_equal)
return self.sqla_session.query(stmt).scalar()
def save(self, customer):
self.sqla_session.add(customer)
self.sqla_session.commit()
def delete(self, customer):
self.sqla_session.delete(customer)
self.sqla_session.commit()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | src/customer/repository.py | csmaniottojr/wishlist_api |
import xml.etree.ElementTree as ET
import requests
import random
from functions.config import config
def dl_connect():
configuration = config()
link = configuration['Datalogger']['ip'] + configuration['Datalogger']['key']
try:
# When the session is expired, this link returns an error
# To fix it, BTicino requires us to connect to the home page ('/')
# and only after that we can connect to the link with the PUB key
conn = requests.get(link)
except Exception:
conn = requests.get(configuration['Datalogger']['ip'])
conn = requests.get(link)
key = str(conn.text)
key = key[5:29]
i = ConvertiChiave(key)
d = configuration['Datalogger']['password']
l = 1
o = Calcola(d,i)
W1 = o[0:24]
W2 = o[24:48]
link = configuration['Datalogger']['ip'] + configuration['Datalogger']['key_send'] + "?W1=" + W1 + "&W2=" + W2
conn = requests.get(link)
return conn
def ConvertiChiave(e):
t = ""
n = e[0]
n = int(n)
i = e[n-1]
i = int(i)
for d in range(0, 24):
if not(24 > i + d):
l = 24 - d
break
t += e[i+d]
for d in range(0, l):
t += e[d]
return t
def Calcola(e, t):
l=''
i=[]
d=[]
n=[]
for o in range(0, 12):
n = n + [int(t[2*o:2*o+2], 16)]
for o in range(0, len(e)):
i = i + [ord(e[o])]
for o in range(len(e), 12):
i = i + [random.randint(0, 40)]
for o in range(0, 12):
d = d + [int(i[o]) * int(n[o])]
s = hex(d[o]).lstrip('0x')
while len(s) < 4:
s = "0" + s
l += s
return l
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | functions/electricity/datalogger_connecter.py | FrancescoRisso/Domotico-casa |
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.xish import xmlstream
class XmlStreamTest(unittest.TestCase):
def setUp(self):
self.errorOccurred = False
self.streamStarted = False
self.streamEnded = False
self.outlist = []
self.xmlstream = xmlstream.XmlStream()
self.xmlstream.transport = self
self.xmlstream.transport.write = self.outlist.append
# Auxilary methods
def loseConnection(self):
self.xmlstream.connectionLost("no reason")
def streamStartEvent(self, rootelem):
self.streamStarted = True
def streamErrorEvent(self, errelem):
self.errorOccurred = True
def streamEndEvent(self, _):
self.streamEnded = True
def testBasicOp(self):
xs = self.xmlstream
xs.addObserver(xmlstream.STREAM_START_EVENT,
self.streamStartEvent)
xs.addObserver(xmlstream.STREAM_ERROR_EVENT,
self.streamErrorEvent)
xs.addObserver(xmlstream.STREAM_END_EVENT,
self.streamEndEvent)
# Go...
xs.connectionMade()
xs.send("<root>")
self.assertEquals(self.outlist[0], "<root>")
xs.dataReceived("<root>")
self.assertEquals(self.streamStarted, True)
self.assertEquals(self.errorOccurred, False)
self.assertEquals(self.streamEnded, False)
xs.dataReceived("<child><unclosed></child>")
self.assertEquals(self.errorOccurred, True)
self.assertEquals(self.streamEnded, True)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | python2.7/site-packages/twisted/words/test/test_xmlstream.py | 84KaliPleXon3/sslstrip-hsts-openwrt |
"""auto generate book_transaction table
Revision ID: 2e2c34db1cf5
Revises: 856336fb4dfc
Create Date: 2022-02-19 21:11:52.730614
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2e2c34db1cf5'
down_revision = '856336fb4dfc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('book_transaction',
sa.Column('book_transaction_id', sa.Integer(), nullable=False),
sa.Column('borrowed_by', sa.Integer(), nullable=False),
sa.Column('issued_date', sa.TIMESTAMP(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('due_date', sa.TIMESTAMP(timezone=True), server_default=sa.text("NOW() + INTERVAL '5 day'"), nullable=True),
sa.Column('book_fine', sa.Float(), nullable=True),
sa.Column('remarks', sa.String(), nullable=True),
sa.Column('created_at', sa.TIMESTAMP(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.ForeignKeyConstraint(['borrowed_by'], ['user_profile.user_profile_id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('book_transaction_id')
)
op.create_index(op.f('ix_book_transaction_book_transaction_id'), 'book_transaction', ['book_transaction_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_book_transaction_book_transaction_id'), table_name='book_transaction')
op.drop_table('book_transaction')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | alembic/versions/2e2c34db1cf5_auto_generate_book_transaction_table.py | KamalDGRT/libms |
"""Tool for displaying a selection of colours."""
import math
import pathlib
from PIL import Image, ImageDraw, ImageFont
_font_path = str(pathlib.Path(__file__).parent.absolute() / 'res' / 'font.ttf')
FONT = ImageFont.truetype(_font_path, size=20)
class ColourDisplayer:
"""Tool for displaying a selection of colours."""
def __init__(self, colours: list[tuple[int, int, int]]):
"""Store the colours."""
self.colours = colours
self.im = None
self.draw = None
def display(self) -> Image.Image:
"""Draw the colours."""
columns = round((0.3 * len(self.colours)) ** 0.5)
rows = math.ceil(len(self.colours) / columns)
self.im = Image.new('RGB', (columns * 100, rows * 30))
self.draw = ImageDraw.Draw(self.im)
row = column = 0
for colour in self.colours:
self.draw_colour(colour, row, column)
column += 1
if column >= columns:
column = 0
row += 1
return self.im
def draw_colour(self, colour: tuple[int, int, int], row: int, column: int):
"""Draw a colour on the image."""
text = '#{0:0>2x}{1:0>2x}{2:0>2x}'.format(*colour).upper()
if sum(colour) / 3 > 128:
text_colour = (0, 0, 0)
else:
text_colour = (255, 255, 255)
x_start = column * 100
y_start = row * 30
self.draw.rectangle(
(x_start, y_start, x_start + 100, y_start + 30), fill=colour
)
self.draw.text(
(x_start + 8, y_start + 3), text, fill=text_colour, font=FONT
)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than class... | 3 | src/colour_displayer.py | Artemis21/image-analysis |
class Status:
OK = "OK"
ERROR = "ERROR"
class Response(dict):
def __init__(self, status, data):
super().__init__()
self["status"] = status
self["data"] = data
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | app/model/response.py | Djaler/VkGraph |
from storm.variables import Variable
from spans import *
__all__ = [
"RangeVariable",
"IntRangeVariable",
"FloatRangeVariable",
"DateRangeVariable",
"DateTimeRangeVariable"
]
class RangeVariable(Variable):
"""
Extension of standard variable class to handle conversion to and from Psycopg2
range types
"""
def parse_set(self, value, from_db):
if not isinstance(value, self.range_type):
raise ValueError(
"Expected '{range_type.__name__}' '{value!r}' given".format(
range_type=self.range_type,
value=value))
return value
def parse_get(self, value, to_db):
return value
# Define the builtin range properties and variables
class IntRangeVariable(RangeVariable):
range_type = intrange
class FloatRangeVariable(RangeVariable):
range_type = floatrange
class DateRangeVariable(RangeVariable):
range_type = daterange
class DateTimeRangeVariable(RangeVariable):
range_type = datetimerange
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | stormspans/variables.py | runfalk/stormspans |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import numpy as np
from skimage import io
from monai.data import write_png
class TestPngWrite(unittest.TestCase):
def test_write_gray(self):
out_dir = tempfile.mkdtemp()
image_name = os.path.join(out_dir, "test.png")
img = np.random.rand(2, 3, 1)
img_save_val = 255 * img
# saving with io.imsave (h, w, 1) will only give us (h,w) while reading it back.
img_save_val = img_save_val[:, :, 0].astype(np.uint8)
write_png(img, image_name, scale=True)
out = io.imread(image_name)
np.testing.assert_allclose(out, img_save_val)
shutil.rmtree(out_dir)
def test_write_rgb(self):
out_dir = tempfile.mkdtemp()
image_name = os.path.join(out_dir, "test.png")
img = np.random.rand(2, 3, 3)
img_save_val = (255 * img).astype(np.uint8)
write_png(img, image_name, scale=True)
out = io.imread(image_name)
np.testing.assert_allclose(out, img_save_val)
shutil.rmtree(out_dir)
def test_write_output_shape(self):
out_dir = tempfile.mkdtemp()
image_name = os.path.join(out_dir, "test.png")
img = np.random.rand(2, 2, 3)
write_png(img, image_name, (4, 4), scale=True)
out = io.imread(image_name)
np.testing.assert_allclose(out.shape, (4, 4, 3))
shutil.rmtree(out_dir)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | tests/test_png_rw.py | crtrentz/MONAI |
#!/usr/bin/env python3
import os
import errno
import logging
from src.issue import Issue
from settings import jira_to_zen_backlog_map, jira_to_zen_sprint_map, zen_to_jira_map, urls
logger = logging.getLogger(__name__)
def check_for_git_config(git_config_file):
"""
User must have ~/.gitconfig in home directory in order to use this function.
"""
logging.info('Checking whether .gitconfig exists on local system')
user_home = os.path.expanduser('~')
if not os.path.isfile(os.path.join(user_home, git_config_file)):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), git_config_file)
def _get_repo_url(repo_name, org_name):
"""
Return URL using GitHub API.
Example:
If repo_name = 'bar' and org_name is 'foo', this returns
'https://api.github.com/repos/foo/bar'
"""
base_url = urls['github_api']
return f'{base_url}{org_name}/{repo_name}'
def get_zenhub_pipeline(i: 'Issue'):
"""Return the corresponding ZenHub pipeline for a Jira issue using the mapping in settings.py"""
if i.sprint_id is None: # issue is in the backlog
return jira_to_zen_backlog_map[i.status]
else:
return jira_to_zen_sprint_map[i.status]
def get_jira_status(i: 'Issue'):
"""Return the corresponding Jira status for a ZenHub issue using the mapping in settings.py"""
return zen_to_jira_map[i.pipeline]
class CustomFieldNames:
"""A class to hold field ids with names that aren't self-explanatory"""
sprint = 'customfield_10010'
story_points = 'customfield_10014'
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | src/utilities.py | ucsc-cgp/sync_agile_boards |
import sys, copy
from itertools import *
from StringIO import StringIO
import benchbase
from benchbase import with_attributes, with_text, onlylib, serialized
############################################################
# Benchmarks
############################################################
class XSLTBenchMark(benchbase.TreeBenchMark):
@onlylib('lxe')
def bench_xslt_extensions_old(self, root):
tree = self.etree.XML("""\
<xsl:stylesheet version="1.0"
xmlns:l="test"
xmlns:testns="testns"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<l:data>TEST</l:data>
<xsl:template match="/">
<l:result>
<xsl:for-each select="*/*">
<xsl:copy-of select="testns:child(.)"/>
</xsl:for-each>
</l:result>
</xsl:template>
</xsl:stylesheet>
""")
def return_child(_, elements):
return elements[0][0]
extensions = {('testns', 'child') : return_child}
transform = self.etree.XSLT(tree, extensions)
for i in range(10):
transform(root)
@onlylib('lxe')
def bench_xslt_document(self, root):
transform = self.etree.XSLT(self.etree.XML("""\
<xsl:stylesheet version="1.0"
xmlns:l="test"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<l:data>TEST</l:data>
<xsl:template match="/">
<l:result>
<xsl:for-each select="*/*">
<l:test><xsl:copy-of select="document('')//l:data/text()"/></l:test>
</xsl:for-each>
</l:result>
</xsl:template>
</xsl:stylesheet>
"""))
transform(root)
if __name__ == '__main__':
benchbase.main(XSLTBenchMark)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | desktop/core/ext-py/lxml/benchmark/bench_xslt.py | t3hi3x/hue |
#!/usr/bin/python3
def check_brackets_match(text):
"""
Checks, whether brackets in the given string are in correct sequence.
Any opening bracket should have closing bracket of the same type.
Bracket pairs should not overlap, though they could be nested.
Returns true if bracket sequence is correct, false otherwise.
"""
assert isinstance(text, str)
opening = '([{<'
closing = ')]}>'
bracket_stack = []
stack_length = 0
for char in text:
if char in opening:
bracket = closing[opening.index(char)]
bracket_stack.append(bracket)
stack_length += 1
elif char in closing:
if (stack_length < 1) or (char != bracket_stack.pop()):
return False
stack_length -= 1
return not (stack_length or bracket_stack)
def check_brackets_number(text):
"""
This function only checks if the number of opening brackets matches
the number of closing brackets of the same type.
Returns true if the number of opening and closing brackets of each type
is the same, false otherwise.
"""
assert isinstance(text, str)
brackets = [text.count(op) == text.count(cl) for op, cl in zip('([{<', ')]}>')]
return all(brackets)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": tru... | 3 | Python scripts/match_brackets.py | 0xd2e/python_playground |
import unittest
from policy_sentry.writing.minimize import minimize_statement_actions
from policy_sentry.querying.all import get_all_actions
class MinimizeWildcardActionsTestCase(unittest.TestCase):
def test_minimize_statement_actions(self):
actions_to_minimize = [
"kms:CreateGrant",
"kms:CreateCustomKeyStore",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
# minimized_actions_list = minimize_statement_actions(desired_actions, all_actions, minchars)
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
def test_minimize_statement_actions_funky_case(self):
actions_to_minimize = [
"kms:creategrant",
"kms:createcustomkeystore",
"ec2:authorizesecuritygroupegress",
"ec2:authorizesecuritygroupingress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
# minimized_actions_list = minimize_statement_actions(desired_actions, all_actions, minchars)
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | test/writing/test_minimize.py | backwardn/policy_sentry |
"""Gaussian Elimination"""
import numpy as np
def gaussian_elimination(matrix: np.ndarray):
return matrix
def main():
matrix = np.array([[4, 8, -4, 4],
[3, 8, 5, -11],
[-2, 1, 12, -17]])
values = gaussian_elimination(matrix)
print(values)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | systems_of_linear_equations/gaussian_elimination.py | jrpespinas/numerical-analysis |
from flask import Blueprint, render_template, redirect, url_for, flash, jsonify
from sqlalchemy import exc
from application import db
from application.routes.leads.models import Lead
from application.routes.leads.forms import AddLeadForm
leads = Blueprint("leads", __name__)
@leads.route("/")
def index():
return render_template("leads_index.html", leads=Lead.query.all())
@leads.route("/add", methods=['GET', 'POST'])
def add():
form = AddLeadForm()
if form.validate_on_submit():
print(form)
item = Lead(**form.to_dict())
db.session.add(item)
try:
db.session.commit()
except exc.IntegrityError as e:
flash("Lead already exists for this email.")
print(e)
except exc.SQLAlchemyError as e:
flash("An unknown error occurred while adding Lead.")
print(e)
else:
return redirect(url_for("leads.index"))
elif form.errors:
flash(form.errors)
return render_template("leads_add.html", form=form)
@leads.route("/json/names")
def json_names():
names = tuple("#%d - %s <%s> {%s}" % (int(lead.id), lead.name, lead.email, lead.company) for lead in Lead.query.all())
return jsonify(names)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | application/routes/leads/views.py | dejbug/full-stack-python-test-1 |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import pytest
import warnings
def has_vtki():
"""Check that vtki is installed."""
try:
import vtki # noqa: F401
return True
except ImportError:
return False
def has_mayavi():
"""Check that mayavi is installed."""
try:
with warnings.catch_warnings(record=True): # traits
from mayavi import mlab # noqa F401
return True
except ImportError:
return False
skips_if_not_mayavi = pytest.mark.skipif(not(has_mayavi()),
reason='requires mayavi')
skips_if_not_vtki = pytest.mark.skipif(not(has_vtki()),
reason='requires vtki')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | mne/viz/backends/tests/_utils.py | kalenkovich/mne-python |
from sanskrit_data.db.interfaces import DbInterface, get_random_string, users_db, ullekhanam_db
from sanskrit_data.schema.common import JsonObject
class InMemoryDb(DbInterface):
def __init__(self, db_name_frontend, external_file_store=None):
super(InMemoryDb, self).__init__(external_file_store=external_file_store, db_name_frontend=db_name_frontend)
self.db = {}
# noinspection PyShadowingBuiltins
def find_by_id(self, id):
return self.db.get(id, None)
def find(self, find_filter):
for index, key in enumerate(self.db):
if JsonObject.make_from_dict(self.db[key]).match_filter(find_filter=find_filter):
yield self.db[key]
def update_doc(self, doc):
if not "_id" in doc:
doc["_id"] = get_random_string(8)
self.db[doc["_id"]] = doc
return doc
def delete_doc(self, doc_id):
self.db.pop(doc_id)
class BookPortionsInMemory(InMemoryDb, ullekhanam_db.BookPortionsInterface):
def __init__(self, db_name_frontend, external_file_store=None):
super(BookPortionsInMemory, self).__init__(db_name_frontend=db_name_frontend,
external_file_store=external_file_store)
class UsersInMemory(InMemoryDb, users_db.UsersInterface):
def __init__(self, db_name_frontend, external_file_store=None):
super(UsersInMemory, self).__init__(db_name_frontend=db_name_frontend,
external_file_store=external_file_store)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | sanskrit_data/db/in_memory.py | sanskrit-coders/sanskrit_data |
import pytest
from theheck.types import Command
from theheck.rules.brew_uninstall import get_new_command, match
@pytest.fixture
def output():
return ("Uninstalling /usr/local/Cellar/tbb/4.4-20160916... (118 files, 1.9M)\n"
"tbb 4.4-20160526, 4.4-20160722 are still installed.\n"
"Remove all versions with `brew uninstall --force tbb`.\n")
@pytest.fixture
def new_command(formula):
return 'brew uninstall --force {}'.format(formula)
@pytest.mark.parametrize('script', ['brew uninstall tbb', 'brew rm tbb', 'brew remove tbb'])
def test_match(output, script):
assert match(Command(script, output))
@pytest.mark.parametrize('script', ['brew remove gnuplot'])
def test_not_match(script):
output = 'Uninstalling /usr/local/Cellar/gnuplot/5.0.4_1... (44 files, 2.3M)\n'
assert not match(Command(script, output))
@pytest.mark.parametrize('script, formula, ', [('brew uninstall tbb', 'tbb')])
def test_get_new_command(output, new_command, script, formula):
assert get_new_command(Command(script, output)) == new_command
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | tests/rules/test_brew_uninstall.py | jlandrum/theheck |
class Mancala:
def __init__(self):
print("(python) Mancala::init")
self.state = 0
def get_state(self):
print("(python) Mancala::get_state")
self.state += 1
return "(python) current state: {!r}".format(self.state)
def play_position(self, value):
print("(python) Mancala::play_position(value={!r})".format(value))
self.state = value
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | src/py_mancala/mancala.py | jgsogo/godot-python |
# Copyright 2021 The Narrenschiff Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
from narrenschiff.log import NarrenschiffLogger
class NarrenschiffLoggerTestCase(unittest.TestCase):
def setUp(self):
self.logger = NarrenschiffLogger()
def test_log_level(self):
self.logger.set_verbosity(5)
self.assertEqual(
self.logger.logger.level,
self.logger.LOG_LEVEL.get(5)
)
def test_wrong_log_level_does_not_change_default(self):
self.logger.set_verbosity(0)
self.assertEqual(
self.logger.logger.level,
logging.INFO
)
def test_narrenschiff_logger_info_translates_into_logger_jnfo_method(self):
logger = logging.getLogger('narrenschiff')
self.assertEqual(self.logger.info, logger.info)
def test_getattr_returns_none_for_non_existent_attributes(self):
self.assertEqual(None, self.logger.dummy)
def tearDown(self):
self.logger.logger.level = logging.INFO
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tests/test_log.py | petarGitNik/narrenschiff |
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == "POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, f'Your account has been created. Log In')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required()
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
if u_form.is_valid():
u_profile = u_form.save(commit=False)
u_profile.user = request.user
u_profile.save()
messages.success(request, f'Your account has been updated.')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
context = {
'u_form': u_form
}
return render(request, 'users/profile.html', context)
@login_required()
def preferences(request):
if request.method == 'POST':
p_form = ProfileUpdateForm(request.POST, instance=request.user.profile)
if p_form.is_valid():
p_form.save()
messages.success(request, f'Your preferences have been updated.')
return redirect('preferences')
else:
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'p_form': p_form
}
return render(request, 'users/preferences.html', context)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | users/views.py | Sammeeey/quick_mart |
def lend_money(debts, person, amount):
value = debts.get(person, 0)
quantity = [amount]
if value != 0:
debts[person] = value + quantity
else:
debts[person] = quantity
print(debts)
def amount_owed_by(debts, person):
value = debts.get(person, [0])
out = sum(value)
return out
def total_amount_owed(debts):
my_money = 0
for values in debts.values():
for numbers in values:
my_money += numbers
return my_money | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | set3/p3_4.py | Felpezs/IPL_2021 |
import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class WarmupLinearScheduleNonZero(_LRScheduler):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to max_lr over `warmup_steps` training steps.
Linearly decreases learning rate linearly to min_lr over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, min_lr=1e-5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.min_lr = min_lr
super(WarmupLinearScheduleNonZero, self).__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
step = self.last_epoch
if step < self.warmup_steps:
lr_factor = float(step) / float(max(1, self.warmup_steps))
else:
lr_factor = max(0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
return [base_lr * lr_factor if (base_lr * lr_factor) > self.min_lr else self.min_lr for base_lr in self.base_lrs] | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | utils/optim_utils.py | hudaAlamri/visdial-bert |
import braintree
from django.shortcuts import render, redirect, get_object_or_404
from django.conf import settings
from orders.models import Order
gateway = braintree.BraintreeGateway(settings.BRAINTREE_CONF)
def payment_process(request):
order_id = request.session.get('order_id')
order = get_object_or_404(Order, id=order_id)
total_cost = order.get_total_cost()
if request.method == 'POST':
nonce = request.POST.get('payment_method_nonce', None)
result = gateway.transaction.sale({
'amount': f'{total_cost:.2f}',
'payment_method_nonce': nonce,
'options': {
'submit_for_settlement': True
}
})
if result.is_success:
order.paid = True
order.braintree_id = result.transaction.id
order.save()
return redirect('payment:done')
else:
return redirect('payment:canceled')
else:
client_token = gateway.client_token.generate()
return render(request,
'views/process.html',
{'order': order,
'client_token': client_token})
def payment_done(request):
return render(request, 'views/done.html')
def payment_canceled(request):
return render(request, 'views/canceled.html')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | payment/views.py | kbilak/Personal-E-commerce |
#!/usr/bin/env python
import argparse
from datetime import date
import hashlib
import logging
import sys
import textwrap
from classes.resource import Resource
from classes.dbmanager import ResourceStorage
from classes.reporter import HtmlReport
import helpers
def get_reports_path(path):
today = date.today()
return "{0}/{1}/{2}/".format(path, today.month, today.day)
def check_differences(resources, report):
report.add_urls(resources)
changed_resources = []
for resource in resources:
actual_content = helpers.fetch_resource(resource.url)
if actual_content:
if (hashlib.sha256(actual_content).hexdigest() != resource.content.hash):
report.add(resource, actual_content)
resource.update(actual_content)
changed_resources.append(resource)
report.save()
return changed_resources
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="diffcheck.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Resource Difference Checker
See https://github.com/bayotop/resdiffcheck for more information.
"""))
parser.add_argument("db", help="database with resources to check")
parser.add_argument("report_dir", help="target directory for reports (without trailing /)")
parser.add_argument("-l", "--logfile", default="process.log", help="default ./process.log")
args = parser.parse_args()
logging.basicConfig(filename=args.logfile,level=logging.DEBUG)
storage = ResourceStorage(args.db)
if not storage.load():
sys.exit()
report = HtmlReport(get_reports_path(args.report_dir), "diff.html")
changed_resources = check_differences(storage.getall(), report)
if changed_resources:
storage.add_multiple(changed_resources) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | resdiffcheck/diffcheck.py | bayotop/resdiffcheck |
from browser import document, alert
import sys
from pprint import pprint
class redirect:
def write(text, text2):
document["output"].innerHTML += text2
sys.stdout = redirect()
sys.stderr = redirect()
d = document["output"]
d.clear()
d.innerHTML = "Hello"
print("Hello again")
def hello(ev):
alert("Hello !")
document["button1"].bind("click", hello) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | data/tests/redirect.py | citizendatascience/ErysNotes |
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from .models import FAQSinglePluginModel, FAQCategoryPluginModel, FAQ
class FAQTOCPlugin(CMSPluginBase):
model = CMSPlugin
name = _('FAQ Table of Contents (auto-generated from FAQs on page)')
render_template = 'faq/FAQ_TOC.html'
cache = True
module = _('FAQs')
class FAQAllPlugin(CMSPluginBase):
model = CMSPlugin
name = _('List of All FAQs')
render_template = 'faq/FAQs.html'
cache = True
module = _('FAQs')
def render(self, context, instance, placeholder):
listing = FAQ.objects.filter(draft=False)
context.update({
'faq_list': listing,
})
return context
class FAQCategoryPlugin(CMSPluginBase):
model = FAQCategoryPluginModel
name = _('List of FAQs by Category')
render_template = 'faq/FAQs.html'
cache = True
module = _('FAQs')
def render(self, context, instance, placeholder):
listing = FAQ.objects.filter(category=instance.category,draft=False)
context.update({
'faq_list': listing,
})
return context
class SingleQuestionPlugin(CMSPluginBase):
model = FAQSinglePluginModel
name = _('Individual FAQ Item')
render_template = 'faq/single_FAQ.html'
cache = True
module = _('FAQs')
def render(self, context, instance, placeholder):
context.update({
'faq_list': [instance.question,],
})
return context
plugin_pool.register_plugin(FAQTOCPlugin)
plugin_pool.register_plugin(FAQAllPlugin)
plugin_pool.register_plugin(FAQCategoryPlugin)
plugin_pool.register_plugin(SingleQuestionPlugin)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | danceschool/faq/cms_plugins.py | benjwrdill/django-danceschool |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ExportTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.bulkexports.v1.exports("resource_type").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://bulkexports.twilio.com/v1/Exports/resource_type',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"resource_type": "Calls",
"url": "https://bulkexports.twilio.com/v1/Exports/Calls",
"links": {
"days": "https://bulkexports.twilio.com/v1/Exports/Calls/Days"
}
}
'''
))
actual = self.client.bulkexports.v1.exports("resource_type").fetch()
self.assertIsNotNone(actual)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | tests/integration/bulkexports/v1/test_export.py | ashish-s/twilio-python |
import re
import os
import logging
import locale
import json
import datetime, time
from django import forms
from django.conf import settings
from django.db.models import CharField
from django.core.exceptions import ValidationError
from django.forms.utils import flatatt
from django.utils.safestring import mark_safe
logger = logging.getLogger(__name__)
class BusinessHoursField(forms.CharField):
def __init__(self, *args, **kwargs):
if 'widget' not in kwargs:
kwargs['widget'] = BusinessHoursInput(
expanded=kwargs.pop('expanded', False),
)
super().__init__(*args, **kwargs)
class BusinessHoursInput(forms.widgets.TextInput):
class Media:
css = {
'all': (settings.STATIC_URL + 'contrib/vendors/pretty-json/pretty-json.css',)
}
js = (settings.STATIC_URL + 'contrib/packages/json.js',)
def __init__(self, *args, **kwargs):
self.expanded = kwargs.pop('expanded', False)
super().__init__(*args, **kwargs)
def render(self, name, value, attrs={}, **kwargs):
if 'id' not in attrs:
attrs['id'] = "id_%s" % name
obj = json.dumps(value, ensure_ascii=False)
return '''<div id="%(id)s" ></div><script type="text/javascript">
var node = new PrettyJSON.view.Node({
el: $('#%(id)s'),
data: JSON.parse(%(obj)s),
});
%(expandAll)s
</script>
''' % {
'id' : attrs['id'],
'obj': obj,
'expandAll': 'node.expandAll();' if self.expanded else '',
}
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | workon/forms/business_hours.py | devittek/django-workon |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `jr_tools` package."""
import unittest
from click.testing import CliRunner
from jr_tools import cli
class JasperReportsToolsTestCase(unittest.TestCase):
"""Tests for `jr_tools` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'run_report' in result.output
help_result = runner.invoke(cli.main, ['run_report', '--help'])
assert help_result.exit_code == 0
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | tests/test_jr_tools.py | erickgnavar/jr_tools |
from functools import reduce
import keyfunctions.globals as consts
def create_key(values, depth):
"""
This function returns a Z-order key for any number of dimensions.
:param values: a list of floats - one of dimension - each of them with value between 0 and 1 [0,1)
:param depth: an strictly positive int
:return: a string of depth size
"""
mul = 1 << depth
val_casted = [int(value * mul) for value in values]
key = ""
for i in range(0, depth):
mask = 1 << i
last = reduce(
lambda x, y: x | y,
[((value & mask) >> i) << dimension for dimension, value in enumerate(val_casted)])
# We want to use printable chars: from char '!' to '~' (we skipped space).
key = chr(last + consts.PRINTABLE_OFFSET) + key
return key
def create_element_rand(element_id):
"""
This function simply returns a 32 bit hash of the element id.
The result value should be used a random priority.
:param element_id: The element unique identifier
:return: an random integer
"""
import mmh3
import struct
if isinstance(element_id, int):
obj = struct.pack('i', element_id)
elif isinstance(element_id, long):
obj = struct.pack('q', element_id)
elif isinstance(element_id, str):
obj = element_id
else:
raise TypeError('Unknown type: pack it yourself with struct')
return int(mmh3.hash(obj))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | keyfunctions/core.py | cugni/keyfunctions-python |
from abc import ABC
from dataclasses import dataclass, asdict
from typing import Set, Dict
@dataclass
class Mixin(ABC):
pass
@dataclass
class AsDictMixin(Mixin):
def as_dict(self, exclude: Set["str"]) -> Dict:
entity_dict = asdict(self)
for key in exclude:
del entity_dict[key]
return entity_dict
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | memrise/core/mixins.py | kolesnikov-bn/django-memrise-scraper |
### 多个函数间的配合
## 变量的作用域
rent = 3000
variable_cost = 0
def cost():
global variable_cost # 使用全局的变量
utilities = int(input('请输入本月的水电费用'))
food_cost = int(input('请输入本月的食材费用'))
variable_cost = utilities + food_cost
print('本月的变动成本费用是' + str(variable_cost))
def sum_cost():
sum = rent + variable_cost
print('本月的总成本是' + str(sum))
cost()
sum_cost()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | code/day09/demo04.py | picktsh/python |
from http import HTTPStatus
from api import app, database
from flask import jsonify, request, abort, make_response
def check_and_insert(company):
"""
Inserts the supplied company record if it doesnt already exists
"""
# Get the companies collection from the database
company_collection = database()
# Check if a company document already exists with the id provided and throw HTTP error if so
company_in_db = company_collection.find_one({'_id': company['_id']}, {'_id': 0})
if company_in_db:
conflict_msg = f"Company with id '{company['_id']}' already exists in the database."
abort(make_response(jsonify(message=conflict_msg), HTTPStatus.CONFLICT))
# Insert the posted company json body as a document in the database
company_collection.insert_one(company)
@app.route('/companies', methods=['POST'])
def create_company():
"""
Creates a new company
"""
# Get the posted data
companies_json = request.get_json()
# Check if the json only contains a single company document
if '_id' in companies_json:
check_and_insert(companies_json)
# Iterate and insert each company document if there are multiple documents
else:
for company in companies_json:
check_and_insert(company)
# Return the created company as JSON
return jsonify(companies_json), HTTPStatus.CREATED
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true... | 3 | api/companies/create_company.py | taha27/restipy |
import os
import pickle
import time
_KEEPTIME = 300 # 5 minutes
class CacheItem(object):
def __init__(self, etag, content, cached_at):
self.etag = etag
self.content = content
self.cached_at = cached_at
class URLCache(object):
"""
URLCache is a simple pickle cache, intended to be used as an HTTP
response cache.
"""
def __init__(self, path=None):
"""
Initialize a URLCache, loading entries from @path, if provided.
"""
self._path = path
self._cache = {}
if os.path.isfile(self._path):
with open(self._path, "r+b") as f:
try:
self._cache = pickle.load(f)
except EOFError:
self._cache = {}
if not os.path.exists(os.path.dirname(self._path)):
os.makedirs(os.path.dirname(self._path))
def get(self, url):
try:
item = self._cache[url]
if item.cached_at + _KEEPTIME <= time.time():
del (self._cache, url)
return None
return self._cache[url]
except KeyError:
return None
def set(self, url, etag, content):
self._cache[url] = CacheItem(etag, content, time.time())
def save(self):
with open(self._path, "w+b") as f:
pickle.dump(self._cache, f)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherit... | 3 | insights/client/url_cache.py | dpensi/insights-core |
from brownie import *
from helpers.constants import AddressZero
from helpers.registry import registry
from dotmap import DotMap
def connect_gnosis_safe(address):
return Contract.from_abi(
"GnosisSafe", address, registry.gnosis_safe.artifacts.GnosisSafe["abi"],
)
class GnosisSafeSystem:
def __init__(self):
self.masterCopy = Contract.from_abi(
"GnosisSafe",
web3.toChecksumAddress(registry.gnosis_safe.addresses.masterCopy),
registry.gnosis_safe.artifacts.GnosisSafe["abi"],
)
self.proxyFactory = Contract.from_abi(
"ProxyFactory",
web3.toChecksumAddress(registry.gnosis_safe.addresses.proxyFactory),
registry.gnosis_safe.artifacts.ProxyFactory["abi"],
)
def deployGnosisSafe(self, params, signer):
encodedParams = self.masterCopy.setup.encode_input(
params.owners,
params.threshold,
params.to,
params.data,
params.fallbackHandler,
params.paymentToken,
params.payment,
params.paymentReceiver,
)
tx = self.proxyFactory.createProxy(
self.masterCopy, encodedParams, {"from": signer}
)
return Contract.from_abi(
"GnosisSafe",
tx.events["ProxyCreation"][0]["proxy"],
registry.gnosis_safe.artifacts.GnosisSafe["abi"],
)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | scripts/systems/gnosis_safe_system.py | EchoDao-BSC/badger-system |
def mergesort(items):
if len(items) <= 1:
return items
mid = len(items) // 2
left = items[:mid]
right = items[mid:]
left = mergesort(left)
right = mergesort(right)
return merge(left, right)
def merge(left, right):
merged = []
left_index = 0
right_index = 0
while left_index < len(left) and right_index < len(right):
if left[left_index] > right[right_index]:
merged.append(right[right_index])
right_index += 1
else:
merged.append(left[left_index])
left_index += 1
merged += left[left_index:]
merged += right[right_index:]
return merged
test_list_1 = [8, 3, 1, 7, 0, 10, 2]
test_list_2 = [1, 0]
test_list_3 = [97, 98, 99]
print('{} to {}'.format(test_list_1, mergesort(test_list_1)))
print('{} to {}'.format(test_list_2, mergesort(test_list_2)))
print('{} to {}'.format(test_list_3, mergesort(test_list_3))) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | Searching and Sorting/Merge sort/Python/merge_sort.py | Subrato-oid/cs-algorithms |
#!/bin/env/python3
# -*- encoding: utf-8 -*-
import os
__version__ = '0.1.0dev'
__license__ = 'BSD3'
__author__ = 'Kyle B. Westfall'
__maintainer__ = 'Kyle B. Westfall'
__email__ = 'westfall@ucolick.org'
__copyright__ = '(c) 2018, Kyle B. Westfall'
def enyo_source_dir():
"""Return the root path to the DAP source directory."""
import pkg_resources
data_dir = pkg_resources.resource_filename('enyo', 'data')
return os.path.split(data_dir) [0]
os.environ['ENYO_DIR'] = enyo_source_dir()
def short_warning(message, category, filename, lineno, file=None, line=None):
"""
Return the format for a short warning message.
"""
return ' %s: %s\n' % (category.__name__, message)
import warnings
warnings.formatwarning = short_warning
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | enyo/__init__.py | Keck-FOBOS/enyo |
import torch
import torch.nn as nn
import torch.nn.init as init
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | super_resolution/model.py | janilbols-w/examples |
import shutil
import os
import string
class ArrangeScripts:
def __init__(self, path_to_folder):
self.folders = ['a_e', 'f_j', 'k_o', 'p_t', 'u_z']
self.folder_mapping = {}
for alphabet in list(string.ascii_lowercase):
if alphabet in list('abcde'):
self.folder_mapping[alphabet] = 'a_e'
elif alphabet in list('fghij'):
self.folder_mapping[alphabet] = 'f_j'
elif alphabet in list('klmno'):
self.folder_mapping[alphabet] = 'k_o'
elif alphabet in list('pqrst'):
self.folder_mapping[alphabet] = 'p_t'
elif alphabet in list('uvwxyz'):
self.folder_mapping[alphabet] = 'u_z'
self.path_to_folder = path_to_folder
def create_folders(self):
for folder in self.folders:
new_folder = os.path.join(self.path_to_folder, folder)
if not os.path.isdir(new_folder):
os.mkdir(new_folder)
def organize_folder(self):
self.create_folders()
for a_folder in os.listdir(self.path_to_folder):
if a_folder in self.folders:
continue
source_path = os.path.join(self.path_to_folder, a_folder)
first_char = a_folder.lower()[0]
destination_path = os.path.join(self.path_to_folder, self.folder_mapping[first_char])
shutil.move(source_path, destination_path, copy_function=shutil.copytree)
def process_folders():
# get folder path
user_input = input('Enter path to folder which needs to be organized: ')
arrange = ArrangeScripts(user_input)
arrange.organize_folder()
if __name__ == "__main__":
try:
process_folders()
except Exception as e:
print(e.__class__, "occurred.")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | folders_arranger/folders_arranger.py | devded/Automation-scripts |
"""
This Python script is designed to perform unit testing of Wordhoard's
Synonyms module.
"""
__author__ = 'John Bumgarner'
__date__ = 'September 20, 2020'
__status__ = 'Quality Assurance'
__license__ = 'MIT'
__copyright__ = "Copyright (C) 2021 John Bumgarner"
import unittest
import warnings
from wordhoard import Synonyms
class TestSynonymFunction(unittest.TestCase):
def test_synonym_always_pass(self):
"""
This test is designed to pass, because the word "good" has known synonyms
and the default output format is a list
:return:
"""
# this warning filter suppresses ResourceWarnings related to unclosed sockets
warnings.filterwarnings(action="ignore", category=ResourceWarning)
self.assertIsInstance(Synonyms('good').find_synonyms(), list)
def test_synonym_always_fail(self):
"""
This test is designed to fail, because the word "good" has known synonyms
:return:
"""
# this warning filter suppresses ResourceWarnings related to unclosed sockets
warnings.filterwarnings(action="ignore", category=ResourceWarning)
self.assertIsNone(Synonyms('good').find_synonyms())
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | test cases/unittest_synonym_module.py | johnbumgarner/wordhoard |
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
temp = ListNode(0)
temp.next = head
p = temp
q = temp.next
while q and q.next:
p.next = q.next
q.next = p.next.next
p.next.next = q
p = q
q = q.next
return temp.next | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | 24.py | RafaelHuang87/Leet-Code-Practice |
from .base_element import BaseElement
from ....utilities.datamodel import Element
class TimeFrame(BaseElement):
def __init__(self, element: Element):
super().__init__(element)
@property
def start(self):
return self._element['start']
@property
def duration(self):
return self._element['duration']
@property
def offset(self):
return self._element['offset']
@property
def scale(self):
return self._element['scale']
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | source1/dmx/sfm/time_frame.py | half5life/SourceIO |
'''
Check that various monitors work correctly.
'''
from brian2 import *
from brian2.tests.features import FeatureTest, InaccuracyError
class SpikeMonitorTest(FeatureTest):
category = "Monitors"
name = "SpikeMonitor"
tags = ["NeuronGroup", "run",
"SpikeMonitor"]
def run(self):
N = 100
tau = 10*ms
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
'''
self.G = G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0')
G.I = linspace(0, 2, N)
self.M = M = SpikeMonitor(G)
run(100*ms)
def results(self):
return {'i': self.M.i[:], 't': self.M.t[:]}
compare = FeatureTest.compare_arrays
class StateMonitorTest(FeatureTest):
category = "Monitors"
name = "StateMonitor"
tags = ["NeuronGroup", "run",
"StateMonitor"]
def run(self):
N = 10
tau = 10*ms
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
'''
self.G = G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0.1')
G.v = 0.1
G.I = linspace(1.1, 2, N)
self.M = M = StateMonitor(G, 'v', record=True)
run(100*ms)
def results(self):
return self.M.v[:]
compare = FeatureTest.compare_arrays
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"an... | 3 | brian2/tests/features/monitors.py | CharleeSF/brian2 |
from flask import Flask, request
from hbase_manager import HBaseRecord
import json
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/')
@app.route('/test')
@app.route('/hello')
def hello():
return 'Hello World!'
@app.route('/get_places', methods=['GET', 'POST'])
def get_places():
if request.method == 'GET':
data = request.args
else:
if request.headers.get('Content-Type') == 'application/json':
data = json.loads(request.data.decode())
else:
data = request.form
eid = data.get('eid')
start_time = data.get('start_time')
end_time = data.get('end_time')
if not eid or not start_time or not end_time:
return '参数格式错误', 400
records = app.config.get('hbase').get_places(eid, start_time, end_time)
if not records:
return '[]', 200, {'Content-Type': 'application/json'}
result = []
for record in records:
eid, timestamp, place_id = record[0].split('##')
result.append({
'eid': eid,
'timestamp': timestamp,
'place_id': place_id,
'place_name': record[1],
'longitude': record[2],
'latitude': record[3]
})
return json.dumps(result), 200, {'Content-Type': 'application/json'}
if __name__ == '__main__':
app.config['hbase'] = HBaseRecord('http://cc-keybrl-node0:2048')
app.run(host='0.0.0.0', port=8080, debug=False)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | projects/g3h2-cc/src/get_places_web_api.py | keybrl/xdu-coursework |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Text, List
from ai_flow.graph.ai_node import AINode
from ai_flow.common.properties import ExecuteProperties
from ai_flow.executor.executor import BaseExecutor
from ai_flow.graph.channel import Channel, NoneChannel
class ExecutableNode(AINode):
def __init__(self,
executor: BaseExecutor,
name: Text = None,
instance_id=None,
properties: ExecuteProperties = None,
output_num=1
) -> None:
super().__init__(properties=properties,
name=name,
instance_id=instance_id,
output_num=output_num)
self.executor = executor
def outputs(self) -> List[Channel]:
if self.output_num > 0:
result = []
for i in range(self.output_num):
result.append(Channel(node_id=self.instance_id, port=i))
return result
else:
return [NoneChannel(self.instance_id)]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | flink-ai-flow/ai_flow/graph/ai_nodes/executable.py | MarvinMiao/flink-ai-extended |
import unittest
import pandas as pd
from scripts import FilePaths
from scripts import data_factory as factory
class TestDataFactory(unittest.TestCase):
def setUp(self):
self.__df = pd.read_pickle(FilePaths.us_patents_random_100_pickle_name)
self.__df = self.__df.reset_index()
def test_reads_xls(self):
df = factory.get('tests/data/USPTO-random-100.xls')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
def test_reads_xlsx(self):
df = factory.get('tests/data/USPTO-random-100.xlsx')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
@unittest.skip('Unicode char fails under windows; see task #172 bug')
def test_reads_csv(self):
df = factory.get('tests/data/USPTO-random-100.csv')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
def test_reads_pickles(self):
df = factory.get('tests/data/USPTO-random-100.pkl.bz2')
self.assertEquals(len(df['abstract']), 100)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | tests/test_data_factory.py | ExesiosPB/libm |
import smtplib
from django.core.mail.backends.locmem import EmailBackend as LocMemEmailBackend
class FakeConnection(object):
def __getstate__(self):
raise TypeError("Connections can't be pickled")
class TestMailerEmailBackend(object):
outbox = []
def __init__(self, **kwargs):
self.connection = FakeConnection()
del self.outbox[:]
def open(self):
pass
def close(self):
pass
def send_messages(self, email_messages):
for m in email_messages:
m.extra_headers['X-Sent-By'] = 'django-mailer-tests'
self.outbox.extend(email_messages)
class FailingMailerEmailBackend(LocMemEmailBackend):
def send_messages(self, email_messages):
raise smtplib.SMTPSenderRefused(1, "foo", "foo@foo.com")
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | tests/__init__.py | jaap3/django-mailer |
class Person:
def __init__(self,fname,lname):
self.firstname=fname #proerties
self.lastname=lname
def printname(self): #Method
print(self.firstname,self.lastname)
class Student(Person): #child class
def __init__(self, fname, lname):
Person.__init__(self, fname, lname)
super().__init__(fname,lname)
#Use the person class to create an object,and then execute the printname method
x=Person("Pushkar", "Baviskar") #X is an object of class Person
x1=Student("Manasi", "Pushkar")
x.printname() #call the printname method using object
#created a Parent class
x1.printname()
f=open("E:\Github profile\PythonProgramming\BasicPythonPrograms\Pushkar.txt","r")
print(f.read())
print(f.readline())
f.close()
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": fa... | 3 | BasicPythonPrograms/PythonInheritance.py | Pushkar745/PythonProgramming |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Marc-Olivier Buob, Maxime Raynal"
__maintainer__ = "Marc-Olivier Buob, Maxime Raynal"
__email__ = "{marc-olivier.buob,maxime.raynal}@nokia.com"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from collections import defaultdict
from pybgl.graph import Graph
from pybgl.incidence_automaton import (
IncidenceAutomaton, finals, initial, remove_vertex, vertices
)
from pybgl.depth_first_search import depth_first_search_graph
from pybgl.property_map import make_assoc_property_map
from pybgl.reverse import reverse_graph
def find_reachable_vertices(g: Graph, sources: set) -> set:
"""
Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices
"""
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys())
def prune_incidence_automaton(g: IncidenceAutomaton):
"""
Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton
"""
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = set(vertices(g)) - to_keep
for q in to_remove:
remove_vertex(q, g)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | pybgl/prune_incidence_automaton.py | nokia/PyBGL |
import uuid
from datetime import datetime
from sqlalchemy.orm.scoping import scoped_session
import factory
import factory.fuzzy
from app.extensions import db
# import SQLAlchemy model
GUID = factory.LazyFunction(uuid.uuid4)
TODAY = factory.LazyFunction(datetime.now)
FACTORY_LIST = []
class FactoryRegistry:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
FACTORY_LIST.append(cls)
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory, FactoryRegistry):
class Meta:
abstract = True
sqlalchemy_session = db.session
sqlalchemy_session_persistence = 'flush'
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},... | 3 | services/nris-api/backend/tests/factories.py | parc-jason/mds |
import pytest
from django.conf import settings
from django.test import RequestFactory
from newsapp.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> settings.AUTH_USER_MODEL:
return UserFactory()
@pytest.fixture
def request_factory() -> RequestFactory:
return RequestFactory()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | newsapp/conftest.py | darmhoo/newsapp |
import logging
from typing import Set
import falcon
from common.consts import HTTP_WRITE_METHODS
from common.falcon_utils import auth_token
from common.util import is_public
from ui import BackendController
class ContentTypeValidator:
def process_resource(self, req: falcon.Request, _resp: falcon.Response, resource, _params):
if req.method in HTTP_WRITE_METHODS:
content_type = getattr(resource, 'content_type', 'application/x-www-form-urlencoded')
if content_type and content_type not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(description="This API only supports requests encoded as '" + content_type + "'")
class LoginValidator:
def __init__(self, backend: BackendController, login_path: str, public_paths: Set[str] = None):
self.login_path = login_path
self.public_paths = public_paths if public_paths else set()
self.public_paths.add(login_path)
self._backend = backend
def process_resource(self, req: falcon.Request, resp: falcon.Response, _resource, _params):
if is_public(req.path, self.public_paths):
logging.debug("This is a public resource which does not need a valid token")
return
token = auth_token(req)
if not token:
raise falcon.HTTPSeeOther(self.login_path)
resp.auth_user = self._backend.user_info(auth_token=token)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?"... | 3 | ui/middleware.py | ove/ove-asset-manager |
#!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file tests/pytests/materials/TestGenMaxwellPlaneStrain.py
## @brief Unit testing of GenMaxwellPlaneStrain object.
import unittest
from pylith.materials.GenMaxwellPlaneStrain import GenMaxwellPlaneStrain
# ----------------------------------------------------------------------
class TestGenMaxwellPlaneStrain(unittest.TestCase):
"""
Unit testing of GenMaxwellPlaneStrain object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.material = GenMaxwellPlaneStrain()
return
def test_constructor(self):
"""
Test constructor.
"""
self.assertEqual(2, self.material.dimension())
return
def test_useElasticBehavior(self):
"""
Test useElasticBehavior().
"""
self.material.useElasticBehavior(False)
return
def testHasStateVars(self):
self.failUnless(self.material.hasStateVars())
return
def testTensorSize(self):
self.assertEqual(3, self.material.tensorSize())
return
def testNeedNewJacobian(self):
"""
Test needNewJacobian().
"""
# Default should be False.
self.failIf(self.material.needNewJacobian())
# Changing time step should require new Jacobian.
self.material.timeStep(1.0)
self.material.timeStep(2.0)
self.failUnless(self.material.needNewJacobian())
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.materials.GenMaxwellPlaneStrain import material
m = material()
return
# End of file
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | tests/pytests/materials/obsolete/TestGenMaxwellPlaneStrain.py | Grant-Block/pylith |
# -*- coding: utf-8 -*-
"""Models helper
These are helper functions for models.
"""
import torch.optim as optim
import torch.nn as nn
from configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION
def get_optimizer(cfg: object, network: object) -> object:
"""Get optimizer function
This is function to get optimizer.
Args:
cfg: Config of optimizer.
network: Network of model.
Returns:
Optimizer object.
Raises:
NotImplementedError: If the optimizer you want to use is not suppoeted.
"""
optimizer_name = cfg.name
if not optimizer_name:
return None
if optimizer_name not in SUPPORTED_OPTIMIZER:
raise NotImplementedError('The optimizer is not supported.')
if optimizer_name == "adam":
return optim.Adam(network.parameters(),
lr=cfg.lr,
weight_decay=cfg.decay)
def get_criterion(cfg: object) -> object:
"""Get criterion function
This is function to get criterion.
Args:
cfg: Config of criterion.
Returns:
Criterion object.
Raises:
NotImplementedError: If the criterion you want to use is not suppoeted.
"""
criterion_name = cfg.name
if not criterion_name:
return None
if criterion_name not in SUPPORTED_CRITERION:
raise NotImplementedError('The loss function is not supported.')
if criterion_name == "cross_entropy":
return nn.CrossEntropyLoss()
elif criterion_name == "nll_loss":
return nn.NLLLoss() | [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer... | 3 | models/helper.py | kobakobashu/posenet-python |
#! /usr/bin/env python3
#-*- coding: utf-8 -*-
##
# Cryptokitty Create tables
#
##
import pymysql,traceback
import logging
from contextlib import closing
from tokens import Tokens
logging.basicConfig(filename="createdb.log", level=logging.DEBUG)
create_user_table_string = """
CREATE TABLE IF NOT EXISTS Cryptokitties.User (
telegram_id INT(15) NOT NULL,
generation_index INT(5) NULL,
cooldown_index INT(5) NULL,
offset_start INT(5) NULL,
offset_end INT(5) NULL,
alert VARCHAR(50) NULL,
PRIMARY KEY (telegram_id)
)
"""
create_attributes_table_string = """
CREATE TABLE IF NOT EXISTS Cryptokitties.Attribute (
telegram_id INT(15),
attribute_name VARCHAR(50)
)
"""
class CreateDb():
def __init__(self):
token_list = Tokens().mysql()
self.conn = pymysql.connect(**token_list)
self.conn.autocommit(True)
def create_user_table(self):
try:
with closing(self.conn.cursor()) as cur:
cur.execute(create_user_table_string) #Change cryptokitties to whatever you call your db in tokens.
except Exception as e:
catcherror = traceback.format_exc()
self.write_error(catcherror)
else:
self.write_error('User Table Sucessfully created')
def create_attributes_table(self):
try:
with closing(self.conn.cursor()) as cur:
cur.execute(create_attributes_table_string)
conn.close()
except Exception as e:
catcherror = traceback.format_exc()
self.write_error(catcherror)
else:
self.write_error('Attributes table successfully created')
def write_error(self,error):
logging.debug(error)
if __name__ == '__main__':
CreateDb().create_user_table()
CreateDb().create_attributes_table()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | src/createdb.py | xlanor/CryptoKitties |
# The tests for the CI server - ci.sagrid.ac.za
# Run with testinfra --host=ansible@ci.sagrid.ac.za
# - make sure you have ssh credentials.
# - make sure the test starts with "test"
# Most tests require access to sensitive information, so use the
# --sudo option
def test_ssh_protocol(host):
file = host.file('/etc/ssh/sshd_config')
assert host.package('openssh-server').is_installed
# The default is 2, so this can be commented out
assert file.contains('^[\\#]Protocol 2$')
def test_ssh_ciphers(host):
file = host.file('/etc/ssh/sshd_config')
assert file.contains('^Ciphers aes128-ctr,aes192-ctr,aes256-ctr$')
assert not file.contains('^Ciphers.*arcfour.*$')
def test_jenkins_service(host):
service = host.service('jenkins')
assert service.is_running
assert service.is_enabled
def test_track_and_trace(host):
config_file = host.file('/etc/httpd/conf.d/jenkins.conf')
assert config_file.contains('RewriteEngine On')
assert config_file.contains('RewriteCond \%{REQUEST_METHOD} \^\(TRACE\|TRACK\)')
assert config_file.contains('RewriteRule \.\* - \[F\]')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | Testing/TestInfra/ci.py | ezeasorekene/DevOps |
import os
from ..sprite import Sprite
import pygame
class FirebatFire(Sprite):
"""
Represents a firebat fire
"""
def __init__(
self,
position,
max_health: float = 100,
path: str = 'advancing_hero/images/sprites/boss_enemies/fire/',
) -> None:
super().__init__(path=os.path.abspath(path),
position=position,
max_health=max_health)
self.image = pygame.transform.scale2x(self.image)
self.rect = self.image.get_rect()
self.speed = 5
self.position = position
self.rect.centerx = position[0]
self.rect.centery = position[1]
self.mask = pygame.mask.from_surface(self.image)
self.damage = 10
def update(self, player, stage):
super().update()
if self.frame_counter >= 120:
self.kill()
if self.frame_counter % 5 == 0:
self.image_frame = (self.image_frame + 1) % len(self.image_list)
self.update_image(self.image_frame)
self.player_collision(player)
def update_image(self, index):
temp_rect = self.rect
self.image = self.image_list[index]
self.image = pygame.transform.scale2x(self.image)
self.rect = self.image.get_rect()
self.rect.centerx = temp_rect.centerx
self.rect.centery = temp_rect.centery
def player_collision(self, player):
if self.rect.colliderect(player.rect):
player.hurt(self.damage)
self.kill()
def hurt(self, damage):
return False | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | advancing_hero/sprites/boss_enemies/firebat_fire.py | EnzoVargasM/advancing-hero |
import sqlite3
con = sqlite3.connect("ogrenciler.db")
cursor = con.cursor()
def tabloolustur():
cursor.execute("CREATE TABLE IF NOT EXISTS ogrenciler(ad TEXT,soyad TEXT,numara INT,ogrenci_notu INT)")
def degerekle():
cursor.execute("INSERT INTO ogrenciler VALUES('Gulay Busenur','Elmas','2014010213007','78')")
con.commit()
con.close()
tabloolustur()
degerekle()
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | Sqlite_Database.py | elmasbusenur/Sqlite_Database |
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from flask import abort
from flask.json import JSONEncoder
import requests
import socket
import sys
if sys.version_info.major < 3:
from xmlrpclib import DateTime
else:
from xmlrpc.client import DateTime
TIMEOUT = 2
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, DateTime):
return datetime.datetime.strptime(
obj.value, "%Y%m%dT%H:%M:%S").isoformat()
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
# Forward the url to the given destination
def forward(url, request):
req = requests.Request(method=request.method, url=url, params=request.args,
headers=request.headers, data=request.data)
resp = requests.Session().send(req.prepare())
return (resp.text, resp.status_code, resp.headers.items())
def ping(host, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(TIMEOUT)
s.connect((host, port))
except Exception:
abort(404)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | ardana_installer_server/util.py | rsalevsky/ardana-installer-server |
import os
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from dcr.scenario_utils.common_utils import random_alphanum
class OpenSshKey(object):
"""
Represents an OpenSSH key pair.
"""
def __init__(self, public_key: bytes, private_key: bytes):
self._private_key = private_key
self._public_key = public_key
@property
def private_key(self) -> bytes:
return self._private_key
@property
def public_key(self) -> bytes:
return self._public_key
class OpenSshKeyFactory(object):
@staticmethod
def create() -> OpenSshKey:
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
return OpenSshKey(public_key, private_key)
def generate_ssh_key_pair(key_prefix='dcr_id_rsa'):
# New SSH public/private keys
ssh_keys = OpenSshKeyFactory().create()
private_key_file_name = '{0}_{1}'.format(key_prefix, random_alphanum(10))
with open(private_key_file_name, 'wb') as fh:
fh.write(ssh_keys.private_key)
private_key_file = os.path.abspath(private_key_file_name)
return ssh_keys.public_key.decode('utf-8'), private_key_file | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | dcr/scenario_utils/crypto.py | ce-bu/WALinuxAgent |
from PySock import client
def abc(data,con):
print(f"Message from {data['sender_name']} : {data['data']}")
con.SEND("test","Hurrah! it's working.")
def client_msg(data):
print(f"Message from : {data['sender_name']} => {data['data']}")
c = client(client_name = "swat", debug = True)
c.CLIENT("localhost",8888)
c.CREATE_CHANNEL("test")
while True:
c.LISTEN( channel = "test", function = abc, args = (c,) )
c.LISTEN( channel = "DSP_MSG", function = client_msg) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | shiSock-0.3.0/test/test_one_unsecure/t2.py | AnanyaRamanA/shiSock |
####### Special object
class Person(object):
def __init__(self, name, age):
self.name=name
self.age=age
def getName(self):
return 'My name is '+self.name
def getAge(self):
return self.age
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | examples/attributes/Person.py | irmen/Pyro3 |
"""added public id for users
Revision ID: 7cf6acee3bbb
Revises: fb69e94ff942
Create Date: 2019-02-15 19:55:34.010287
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7cf6acee3bbb'
down_revision = 'fb69e94ff942'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('public_id', sa.String(length=50), nullable=True))
op.create_unique_constraint(None, 'users', ['public_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='unique')
op.drop_column('users', 'public_id')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | migrations/versions/7cf6acee3bbb_.py | mmb186/MobileApp_API |
from django.db import models
CHOICES = (
('Gray', 'Серый'),
('Black', 'Чёрный'),
('White', 'Белый'),
('Ginger', 'Рыжий'),
('Mixed', 'Смешанный'),
)
class Owner(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
def __str__(self):
return f'{self.first_name} {self.last_name}'
class Achievement(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class Cat(models.Model):
name = models.CharField(max_length=16)
color = models.CharField(
max_length=16,
choices=CHOICES
)
birth_year = models.IntegerField()
owner = models.ForeignKey(
to=Owner,
related_name='cats',
on_delete=models.CASCADE,
blank=True,
null=True
)
achievements = models.ManyToManyField(
to=Achievement,
through='AchievementCat',
)
def __str__(self):
return self.name
class AchievementCat(models.Model):
achievement = models.ForeignKey(
to=Achievement,
on_delete=models.CASCADE,
)
cat = models.ForeignKey(
to=Cat,
on_delete=models.CASCADE
)
def __str__(self):
return f'{self.achievement} {self.cat}'
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | cats/models.py | a-s-frolov/kittygram_plus |
import unittest
import mppysam.mp_helper as mpp
def add(a=0, b=0):
return (a + b, )
class TestApplyWithAdd(unittest.TestCase):
"""Test apply() with a simple add function."""
def test_returns_empty_list_if_empty_args(self):
args_list = []
self.assertEqual(mpp.apply(add, args_list, processes=1), [])
def test_returns_one_output_if_one_call(self):
args_list = [(1, 2)]
self.assertEqual(mpp.apply(add, args_list, processes=1), [3])
def test_returns_multiple_outputs_if_multiple_calls(self):
args_list = [(1, 2), (7, 0), (5, -1)]
self.assertEqual(mpp.apply(add, args_list, processes=1), [3, 7, 4])
def test_returns_multiple_outputs_if_multiple_calls_2_processes(self):
args_list = [(1, 2), (7, 0), (5, -1)]
self.assertEqual(mpp.apply(add, args_list, processes=2), [3, 7, 4])
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | mppysam/tests/test_mp_helper.py | jamesbaye/mppysam |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import aiokubernetes
from aiokubernetes.models.v1beta1_custom_resource_subresources import V1beta1CustomResourceSubresources # noqa: E501
from aiokubernetes.rest import ApiException
class TestV1beta1CustomResourceSubresources(unittest.TestCase):
"""V1beta1CustomResourceSubresources unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceSubresources(self):
"""Test V1beta1CustomResourceSubresources"""
# FIXME: construct object with mandatory attributes with example values
# model = aiokubernetes.models.v1beta1_custom_resource_subresources.V1beta1CustomResourceSubresources() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer":... | 3 | test/test_v1beta1_custom_resource_subresources.py | tantioch/aiokubernetes |
from __future__ import absolute_import, division, print_function
from getpass import getpass
def get_input(prompt=''):
try:
line = raw_input(prompt)
except NameError:
line = input(prompt)
return line
def get_credentials():
"""Prompt for and return a username and password."""
username = get_input('Enter Username: ')
password = None
while not password:
password = getpass()
password_verify = getpass('Retype your password: ')
if password != password_verify:
print('Passwords do not match. Try again.')
password = None
return username, password
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | mytools.py | richard-ziga/csv2show2csv |
#!/usr/bin/env python3
# Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
"""Implements the client for test."""
# pylint: skip-file
# pydocstyle: add-ignore=D105,D107,D401
import contextlib
import json
from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional
import requests
import requests.auth
class RemoteCaller:
"""Executes the remote calls to the server."""
def __init__(self, url_prefix: str, auth: Optional[requests.auth.AuthBase] = None) -> None:
self.url_prefix = url_prefix
self.auth = auth
def test_me(
self,
query_some_parameter: str,
path_some_parameter: str) -> bytes:
"""
Is a test endpoint.
:param query_some_parameter:
:param path_some_parameter:
:return: a confirmation
"""
url = "".join([
self.url_prefix,
'/products/',
str(path_some_parameter)])
params = {} # type: Dict[str, str]
params['some_parameter'] = query_some_parameter
resp = requests.request(
method='get',
url=url,
params=params,
auth=self.auth)
with contextlib.closing(resp):
resp.raise_for_status()
return resp.content
# Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | tests/cases/py_client/parameter_name_conflict/client.py | koji8y/swagger-to |
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
@onnx_op("SequenceErase")
class SequenceErase(BackendHandler):
@classmethod
def chk_pos_in_bounds(cls, input_seq, pos):
"""
Check the position is in-bounds with respect to the sequence.
Accepted range for 'position' is in [-n, n - 1], where n is the
number of tensors in 'input_sequence'.
:param input_seq: input sequence
:param pos: position of the output tensor
:return: True if position is in-bounds
"""
seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]
cond1 = tf.greater_equal(pos, tf.negative(seq_length))
cond2 = tf.less_equal(pos, seq_length - 1)
# pos >= -n and pos < n
return tf.reduce_all(tf.logical_and(cond1, cond2))
@classmethod
def version_11(cls, node, **kwargs):
tensor_dict = kwargs["tensor_dict"]
input_sequence = tensor_dict[node.inputs[0]]
seq_length = tf.shape(input_sequence.to_sparse())[0]
position = tensor_dict[node.inputs[1]] if len(
node.inputs) == 2 else seq_length - 1
# check whether position is in-bounds and assert if not
result = cls.chk_pos_in_bounds(input_sequence, position)
assert_pos = tf.Assert(tf.equal(result, True), [result])
with tf.control_dependencies([assert_pos]):
s1 = input_sequence[:position]
s2 = input_sequence[position + 1:]
return [tf.concat([s1, s2], axis=0)]
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | onnx_tf/handlers/backend/sequence_erase.py | malisit/onnx-tensorflow |
# -*- coding: utf-8 -*-
# pylint: disable=unused-argument
"""Tests for the `CifBaseParser`."""
from aiida_codtools.calculations.cif_filter import CifFilterCalculation
def test_cif_filter(aiida_profile_clean, fixture_localhost, fixture_calc_job_node, generate_parser):
"""Test a default `cif_filter` calculation."""
entry_point_calc_job = 'codtools.cif_filter'
entry_point_parser = 'codtools.cif_base'
node = fixture_calc_job_node(entry_point_calc_job, fixture_localhost, 'default')
parser = generate_parser(entry_point_parser)
results, _ = parser.parse_from_node(node, store_provenance=False)
assert node.exit_status in (None, 0)
assert 'cif' in results
def test_cif_filter_invalid_cif(aiida_profile_clean, fixture_localhost, fixture_calc_job_node, generate_parser):
"""Test that invalid CIF written to stdout will result in `ERROR_PARSING_CIF_DATA`."""
entry_point_calc_job = 'codtools.cif_filter'
entry_point_parser = 'codtools.cif_base'
node = fixture_calc_job_node(entry_point_calc_job, fixture_localhost, 'invalid_cif')
parser = generate_parser(entry_point_parser)
_, calcfunction = parser.parse_from_node(node, store_provenance=False)
assert calcfunction.is_finished
assert not calcfunction.is_finished_ok
assert calcfunction.exit_status == CifFilterCalculation.exit_codes.ERROR_PARSING_CIF_DATA.status # pylint: disable=no-member
assert calcfunction.exit_message == CifFilterCalculation.exit_codes.ERROR_PARSING_CIF_DATA.message # pylint: disable=no-member
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | tests/parsers/test_cif_filter.py | aiidateam/aiida-codtools |
from cwltool.main import main
from .util import get_data
def test_missing_cwl_version():
"""No cwlVersion in the workflow."""
assert main([get_data('tests/wf/missing_cwlVersion.cwl')]) == 1
def test_incorrect_cwl_version():
"""Using cwlVersion: v0.1 in the workflow."""
assert main([get_data('tests/wf/wrong_cwlVersion.cwl')]) == 1
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | tests/test_cwl_version.py | jayvdb/cwltool |
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import inspect
from time import time, perf_counter
from eggroll.utils.log_utils import get_logger
L = get_logger(filename='profile')
def _method_profile_logger(func):
def wrapper(*args, **kwargs):
start_wall_time = time()
start_cpu_time = perf_counter()
result = func(*args, **kwargs)
end_wall_time = time()
end_cpu_time = perf_counter()
code = func.__code__
try:
outerframes = inspect.getouterframes(inspect.currentframe(), 2)
real_caller = outerframes[1]
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "{real_caller.filename.rsplit("/", 1)[-1]}:{real_caller.lineno}", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return result
except Exception as e:
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "unknown", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return wrapper
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answ... | 3 | python/eggroll/core/aspects.py | liszekei/eggroll |
# Advent of Code 2021 - Day: 24
# Imports (Always imports data based on the folder and file name)
from aocd import data, submit
def solve(lines):
# We need to simply find all the pairs of numbers, i.e. the numbers on lines 6 and 16 and store them.
pairs = [(int(lines[i * 18 + 5][6:]), int(lines[i * 18 + 15][6:])) for i in range(14)]
# Once getting the pairs we will need a stack and a map to store the pairs, as well constraints.
stack = []
constraints = {}
# Enumerate helps because we can get the index of the pair at the same time.
for i, (a, b) in enumerate(pairs):
# If (line 6) is positive we need to add line 16 and index to stack, else pop the last element from the stack and add it to constraints.
if a > 0:
stack.append((i, b))
else:
k, bk = stack.pop()
constraints[i] = (k, bk + a)
# At this point the constraints are stored at the relevant index for which they affect and can be used to find the minimum or maximum element at that index in the answer.
max_ans = {}
min_ans = {}
for i, (k, d) in constraints.items():
max_ans[i] = min(9, 9 + d)
max_ans[k] = min(9, 9 - d)
min_ans[i] = max(1, 1 + d)
min_ans[k] = max(1, 1 - d)
p1 = "".join(str(max_ans[i]) for i in range(14))
p2 = "".join(str(min_ans[i]) for i in range(14))
print("Star 1:", p1)
print("Star 2:", p2)
submit(p1, part="a", day=24, year=2021)
submit(p2, part="b", day=24, year=2021)
# Solution
def main():
solve(data.splitlines())
# Call the main function.
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | Solutions/2021/24.py | Azurealistic/Winter |
from __future__ import annotations
import itertools
import unittest
from rules_python.python.runfiles import runfiles
import sudoku_solver
class TestSudokuSolver(unittest.TestCase):
def _assert_solved_instance(self) -> None:
self.assert_solved_instance(
"example_sudoku.txt",
[
[ 6, 4, 2, 7, 9, 1, 3, 8, 5 ],
[ 1, 3, 7, 5, 8, 6, 2, 4, 9 ],
[ 8, 9, 5, 4, 3, 2, 1, 7, 6 ],
[ 4, 6, 1, 8, 7, 5, 9, 3, 2 ],
[ 3, 5, 8, 2, 6, 9, 4, 1, 7 ],
[ 7, 2, 9, 1, 4, 3, 6, 5, 8 ],
[ 9, 1, 3, 6, 5, 7, 8, 2, 4 ],
[ 5, 8, 6, 3, 2, 4, 7, 9, 1 ],
[ 2, 7, 4, 9, 1, 8, 5, 6, 3 ],
],
)
def test_solve_killer_sudoku(self) -> None:
self._assert_solved_instance(
"example_killer_sudoku.txt",
[
[ 1, 6, 5, 8, 7, 2, 3, 9, 4, ],
[ 4, 7, 2, 5, 3, 9, 6, 1, 8, ],
[ 9, 3, 8, 1, 4, 6, 5, 7, 2, ],
[ 5, 9, 1, 4, 2, 7, 8, 3, 6, ],
[ 6, 8, 7, 3, 1, 5, 4, 2, 9, ],
[ 2, 4, 3, 9, 6, 8, 1, 5, 7, ],
[ 3, 5, 9, 2, 8, 4, 7, 6, 1, ],
[ 8, 1, 6, 7, 9, 3, 2, 4, 5, ],
[ 7, 2, 4, 6, 5, 1, 9, 8, 3, ],
],
)
def _assert_solved_instance(self, input: str, expected: list[list[int]]) -> None:
with open(runfiles.Create().Rlocation(f"sudoku_solver/{input}"), "r") as f:
problem = sudoku_solver.Problem.parse(f)
solution = problem.solve()
for row, col in itertools.product(*(2 * [range(1, 10)])):
assert solution[(row, col)] == expected[row - 1][col - 1]
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | test_sudoku_solver.py | MichaelRHead/sudoku_solver |
import db
from src.modeling.model_manager.files import download_file
from src.modeling.reporting.feature_importance import display_importances
def get_column(column, id):
query = f"SELECT {column} FROM model_manager WHERE id = {id}"
data = db.execute(query).fetchone()[0]
return data
def get_y_test(id):
column = get_column("y_test_filename", id)
return download_file(column)
def get_extra_features_test(id):
column = get_column("extra_features_test_filename", id)
return download_file(column)
def get_y_pred(id):
column = get_column("y_pred_filename", id)
return download_file(column)
def get_model(id):
column = get_column("model_filename", id)
return download_file(column)
def show_feature_importance(id):
feature_importance = get_column("feature_importance", id)
tuples = []
for x in feature_importance.replace(" ","")[2:-2].split("),("):
x = x.split(",")
x[0]=float(x[0])
tuples.append(tuple(x))
display_importances(tuples)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | src/modeling/model_manager/load.py | NovaSBE-DSKC/retention-evaluation |
import unittest
import pytest
import numpy as np
from spikeinterface.core.tests.testing_tools import generate_recording
from spikeinterface.toolkit.utils import (get_random_data_chunks,
get_closest_channels, get_noise_levels)
def test_get_random_data_chunks():
rec = generate_recording(num_channels=1, sampling_frequency=1000., durations=[10., 20.])
chunks = get_random_data_chunks(rec, num_chunks_per_segment=50, chunk_size=500, seed=0)
assert chunks.shape == (50000, 1)
def test_get_closest_channels():
rec = generate_recording(num_channels=32, sampling_frequency=1000., durations=[0.1])
closest_channels_inds, distances = get_closest_channels(rec)
closest_channels_inds, distances = get_closest_channels(rec, num_channels=4)
def test_get_noise_levels():
rec = generate_recording(num_channels=2, sampling_frequency=1000., durations=[60.])
noise_levels = get_noise_levels(rec)
print(noise_levels)
if __name__ == '__main__':
test_get_random_data_chunks()
# test_get_closest_channels()
# test_get_noise_levels()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | spikeinterface/toolkit/tests/test_utils.py | marcbue/spikeinterface |
#-*- coding: utf-8 -*-
#Vstream https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.hosters.hoster import iHoster
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'FilePup'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR] [COLOR khaki]' + self.__sHD + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'filepup'
def setHD(self, sHD):
self.__sHD = ''
def getHD(self):
return self.__sHD
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self, sUrl):
return ''
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
def checkUrl(self, sUrl):
return True
def __getUrl(self, media_id):
return
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
url = self.__sUrl
oRequestHandler = cRequestHandler(url)
#oRequestHandler.addParameters('login', '1')
sHtmlContent = oRequestHandler.request()
oParser = cParser()
sPattern = 'type: "video\/mp4", *src: "([^<>"{}]+?)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
return True, aResult[1][0]
return False, False
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | plugin.video.vstream/resources/hosters/filepup.py | akuala/REPO.KUALA |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database context manager for placement database connection."""
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from placement.util import run_once
LOG = logging.getLogger(__name__)
placement_context_manager = enginefacade.transaction_context()
def _get_db_conf(conf_group):
conf_dict = dict(conf_group.items())
# Remove the 'sync_on_startup' conf setting, enginefacade does not use it.
# Use pop since it might not be present in testing situations and we
# don't want to care here.
conf_dict.pop('sync_on_startup', None)
return conf_dict
@run_once("TransactionFactory already started, not reconfiguring.",
LOG.warning)
def configure(conf):
placement_context_manager.configure(
**_get_db_conf(conf.placement_database))
def get_placement_engine():
return placement_context_manager.writer.get_engine()
@enginefacade.transaction_context_provider
class DbContext(object):
"""Stub class for db session handling outside of web requests."""
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | openstack-placement-1.0.0/placement/db_api.py | scottwedge/OpenStack-Stein |
import pytest
from data.map import Map
from data import constants
def test_set_get_map():
map = Map()
map.set_map(
[
[(0, 0), constants.DEFAULT_WALL, 0],
[(0, 1), constants.DEFAULT_WALL, 90],
[(0, 2), constants.DEFAULT_WALL, 180]
]
)
assert map.get_map() == [
[(0, 0), constants.DEFAULT_WALL, 0],
[(0, 1), constants.DEFAULT_WALL, 90],
[(0, 2), constants.DEFAULT_WALL, 180]
]
def test_set_get_mapxy():
map = Map()
map.set_mapx(20)
map.set_mapy(15)
assert map.get_mapx() == 20
assert map.get_mapy() == 15
# pytest.main(["-v", "--tb=no", "test_map.py"]) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | lightbike/tests/test_map.py | ethancharles02/cse210-project |
#fake database to get the pygame running
import random
questions = ["Question 1?", "Question 2?", "Question 3?", "Question 4?"]
answers = ["Answer 1", "Answer 2", "Answer 3", "Answer 4"]
def get_question():
return(random.choice(questions))
def get_answer():
return(random.choice(answers)) | [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | quiz/fake_db.py | KelstonClub/quiz |
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.serializers import PrimaryKeyRelatedField, RelatedField
class UniqueRelatedField(RelatedField):
"""
Like rest_framework's PrimaryKeyRelatedField, but selecting by any unique
field instead of the primary key.
"""
default_error_messages = PrimaryKeyRelatedField.default_error_messages.copy()
def __init__(self, field_name, serializer_field=None, **kwargs):
super().__init__(**kwargs)
self.related_field_name = field_name
self.serializer_field = serializer_field
def to_internal_value(self, data):
if self.serializer_field is not None:
data = self.serializer_field.to_internal_value(data)
try:
return self.get_queryset().get(**{self.related_field_name: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
value = getattr(value, self.related_field_name)
if self.serializer_field is not None:
value = self.serializer_field.to_representation(value)
return value
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | apps/api/serializers.py | azengard/reseller-api |
from django import forms
from django.forms.models import inlineformset_factory
from django.utils.translation import pgettext_lazy
from crispy_forms.layout import (
Row,
Column,
Layout,
Field,
MultiField,
Fieldset
)
from projectjose.core.forms import (
BaseForm,
AjaxSelect2ChoiceField,
PrependedText,
Checkbox,
Fieldset as ProdabreFieldset
)
from projectjose.core.utils.text import icon_label
from projectjose.social.models import Link
__all__ = [
'LinkForm',
]
# ---------------------------------------------------------------------------- #
# Product Form
# ---------------------------------------------------------------------------- #
class LinkForm(BaseForm):
class Meta:
model = Link
fields = [
'key',
'name',
'url',
]
labels = {
'key': 'Llave',
'name': 'Nombre',
'url': 'link de la red social',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
ProdabreFieldset(
'Red Social',
PrependedText(
'key',
icon_label(icon='barcode'),
),
PrependedText(
'name',
icon_label(icon='ticket-alt'),
),
PrependedText(
'url',
icon_label(icon='pen-square'),
),
),
)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | projectjose/dashboard/social/forms.py | Chaoslecion123/blog_jose |
import py, os
class NullPyPathLocal(py.path.local):
def join(self, *args):
return self.__class__(py.path.local.join(self, *args))
def open(self, mode):
return open(os.devnull, mode)
def __repr__(self):
return py.path.local.__repr__(self) + ' [fake]'
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | rpython/tool/nullpath.py | nanjekyejoannah/pypy |
### Drawing line using Digital Differential Analyzer Line Drawing Algorithm in Computer Graphics.
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
w, h = 25,25
m=0
def ROUND(a):
return int(a + 0.5)
def drawDDA(x1,y1,x2,y2):
x,y = x1,y1
length = abs((x2-x1) if abs(x2-x1) > abs(y2-y1) else (y2-y1))
dx = (x2-x1)/float(length)
dy = (y2-y1)/float(length)
# print ('x = %s, y = %s' % (((ROUND(x),ROUND(y)))) )
for i in range(length):
a = x
b = y
x += dx
y += dy
# if(m==0):
print ('x= %s, y = %s' % (((ROUND(x),ROUND(y)))) )
Line(a,b,x,y)
# m=m+1
# ---Section 1---
def Line(x1,y1,x2,y2):
# We have to declare the points in this sequence: bottom left, bottom right, top right, top left
x1 = 2*x1 / w - 1
y1 = 2*y1 / h - 1
x2 = 2*x2 / w - 1
y2 = 2*y2 / h - 1
glBegin(GL_LINES)
glVertex2f(x1, y1)
glVertex2f(x2, y2)
glEnd()
# This alone isn't enough to draw our square
def iterate():
glViewport(0, 0, 500,500)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, 500, 0.0, 500, 0.0, 1.0)
glMatrixMode (GL_MODELVIEW)
glLoadIdentity()
# ---Section 2---
def showScreen():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Remove everything from screen (i.e. displays all white)
glLoadIdentity() # Reset all graphic/shape's position
# iterate()
glColor3f(1.0, 0.0, 3.0) # Set the color to pink
drawDDA(2,5,10,20)
glutSwapBuffers()
#---Section 3---
glutInit()
glutInitDisplayMode(GLUT_RGBA) # Set the display mode to be colored
glutInitWindowSize(500, 500) # Set the w and h of your window
glutInitWindowPosition(0, 0) # Set the position at which this windows should appear
wind = glutCreateWindow("DDA Line Drawing Algorithm") # Set a window title
glutDisplayFunc(showScreen)
glutIdleFunc(showScreen) # Keeps the window open
glutMainLoop() # Keeps the above created window displaying/running in a loop
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | Computer Engineering/Third Year/Computer Graphics/Python/digital differential analyzer.py | jatin-eleven/Somaiya-University |
from tkinter import *
from tkinter.ttk import * # styling library
from random import randint
root = Tk()
root.title("GUESS ME")
root.geometry("350x100")
root.configure(background='#AEB6BF')
#Style
style = Style()
style.theme_use('classic')
for elem in ['TLabel', 'TButton']:
style.configure(elem, background='#AEB6BF')
class ValueSmallError(Exception):
pass
class ValueLargeError(Exception):
pass
ans = randint(1,100)
def guess():
num = int(num1.get())
try:
if num > ans:
raise ValueLargeError
elif num < ans:
raise ValueSmallError
else:
Label(root,text = "Congratulations, You won !!").grid(column=0,row=3)
except ValueLargeError:
Label(root,text = "Your no is large, guess again").grid(column=0,row=3)
except ValueSmallError:
Label(root,text = "Your no is small, guess again").grid(column=0,row=3)
return
Label(root,text = "\t\t*** GUESS ME ***").grid(column=0,row=0)
Label(root,text = "\nGuess the number(1-100)").grid(column=0,row=1)
num1 = Entry(root)
num1.grid(column=1,row=1)
btn1 = Button(root,text = "Submit",command = guess).grid(column=1,row=3)
root.mainloop()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | guess.py | kmranrg/GuessMe |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.