source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from pytest import fixture
from ..f9 import F9
@fixture(scope="module")
def f9() -> F9:
return F9()
def test_evaluate_solution(f9: F9, helpers):
helpers.test_evaluate_solution(f9)
def test_evaluate_population(f9: F9, helpers):
helpers.test_evaluate_population(f9)
def test_dsm(f9: F9, helpers):
helpers.test_dsm(f9)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | evobench/continuous/cec2013lsgo/test/test_f9.py | piotr-rarus/evobench |
from resources import relu, learnFunc, dot
class HiddenBlock:
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def feedForward(self, hidden_inputs):
output = [
relu(
dot(hidden_inputs, weights) + self.bias
)
for weights in self.weights]
return output
def train(self, hidden_inputs, hidden_errors):
error = sum(hidden_errors) / len(hidden_errors)
predictions = self.feedForward(hidden_inputs)
prevErrors = []
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
prevError = error*relu(predictions[y], deriv=True)*self.weights[y][x]
prevErrors.append(prevError)
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
update = error*relu(predictions[y], deriv=True)*hidden_inputs[x]
learn_rate = learnFunc(update)
self.weights[y][x] -= learn_rate*update
biasUpdate = 0
for x in range(len(self.weights)):
biasUpdate += error*relu(predictions[x], deriv=True)/len(predictions)
learn_rate = learnFunc(biasUpdate)
self.bias -= learn_rate*biasUpdate
return prevErrors | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | python/old/hiddenBlock.py | BenOsborn/Cerci |
import pytest
import yaml
import sqlalchemy as sa
from retry import retry
@pytest.fixture(scope='session')
def docker_compose_file(tmpdir_factory, mysql_service_def, postgres_service_def, oracle_service_def):
compose_file = tmpdir_factory.mktemp('docker_files').join('docker-compose.yml')
compose_conf = {
'version': '2',
'services': {
'mysql': mysql_service_def,
'postgres': postgres_service_def,
'oracle': oracle_service_def
}
}
with compose_file.open('w') as f:
yaml.dump(compose_conf, stream=f)
return compose_file.strpath
def engine_helper(dialect: str, user: str, password: str, host: str, port: int, database: str):
"""
:param dialect:
:param user:
:param password:
:param host:
:param port:
:param database:
:return:
"""
engine = sa.create_engine(
'{dialect}://{user}:{password}@{host}:{port}/{database}'.format(
dialect=dialect,
user=user,
password=password,
host=host,
port=port,
database=database
),
echo=True
)
@retry(delay=2, tries=10, exceptions=(
sa.exc.OperationalError,
sa.exc.DatabaseError,
sa.exc.InterfaceError,
))
def verify_connection():
conn = engine.connect()
conn.close()
return engine
return verify_connection()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a t... | 3 | tests/sql_sync/fixtures/db_fixtures_helper.py | jzcruiser/doltpy |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import Iterable
def flatten(input_arr, output_arr = None):
if output_arr is None:
output_arr = []
for t in input_arr:
if isinstance(t, Iterable):
flatten(t, output_arr)
else:
output_arr.append(t)
return output_arr
def flatten_iter(iterable):
for t in iterable:
if isinstance(t, Iterable):
yield from flatten_iter(t)
else:
yield t
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | algorithm/github_al/flatten.py | freedomDR/coding |
#Shows data from the first 1000 blocks
import random
import os
import subprocess
import json
#Set this to your raven-cli program
cli = "raven-cli"
#mode = "-testnet"
mode = ""
rpc_port = 8746
#Set this information in your raven.conf file (in datadir, not testnet3)
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def get_blockinfo(num):
rpc_connection = get_rpc_connection()
hash = rpc_connection.getblockhash(num)
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
#print("Connection: " + connection)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
for i in range(1,1000):
dta = get_blockinfo(i)
print("Block #" + str(i))
print(dta.get('hash'))
print(dta.get('difficulty'))
print(dta.get('time'))
print("")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | assets/tools/blockfacts.py | Clotonervo/TestCoin |
from app import db
from hashlib import md5
class User(db.Model):
""" Classe da tabela de usuários
"""
__tablename__ = "users"
user_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True)
password = db.Column(db.String)
email = db.Column(db.String, unique=True)
phone = db.Column(db.String, unique=True)
name = db.Column(db.String)
def avatar(self, size):
""" Gera o avatar de um usuário
Utiliza o site gravatar para gerar um avatar unico para o usuario.
:param size: Tamanho do avatar
:type size: Integer
:return: URL da imagem do avatar
:rtype: URL
"""
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return str(self.user_id)
def __init__(self, username, password, email, name, phone):
self.username = username
self.email = email
self.password = password
self.name = name
self.phone = phone
def __repr__(self):
return "<User %r>" % self.username
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | app/models/user.py | VitorBonella/ProjetoIntegrado1-2021-1 |
# full assembly of the sub-parts to form the complete net
import torch.nn.functional as F
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
self.sig = nn.Sigmoid()
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
x = self.sig(x)
return x
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherit... | 3 | hand_net/unet/unet_model.py | clearsky767/examples |
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
from django.template import engines
from django.test import TestCase
from wagtail.models import PAGE_TEMPLATE_VAR, Page, Site
from wagtail.test.utils import WagtailTestUtils
class TestCoreJinja(TestCase, WagtailTestUtils):
def setUp(self):
self.engine = engines["jinja2"]
self.user = self.create_superuser(
username="test", email="test@email.com", password="password"
)
self.homepage = Page.objects.get(id=2)
def render(self, string, context=None, request_context=True):
if context is None:
context = {}
template = self.engine.from_string(string)
return template.render(context)
def dummy_request(self, user=None):
site = Site.objects.get(is_default_site=True)
request = HttpRequest()
request.META["HTTP_HOST"] = site.hostname
request.META["SERVER_PORT"] = site.port
request.user = user or AnonymousUser()
return request
def test_userbar(self):
content = self.render(
"{{ wagtailuserbar() }}",
{
PAGE_TEMPLATE_VAR: self.homepage,
"request": self.dummy_request(self.user),
},
)
self.assertIn("<!-- Wagtail user bar embed code -->", content)
def test_userbar_anonymous_user(self):
content = self.render(
"{{ wagtailuserbar() }}",
{PAGE_TEMPLATE_VAR: self.homepage, "request": self.dummy_request()},
)
# Make sure nothing was rendered
self.assertEqual(content, "")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excl... | 3 | wagtail/admin/tests/test_jinja2.py | stevedya/wagtail |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: ppx
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TagResult(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TagResult()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTagResult(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TagResultBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x50\x50\x58\x46", size_prefixed=size_prefixed)
# TagResult
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Start(builder): builder.StartObject(0)
def TagResultStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def End(builder): return builder.EndObject()
def TagResultEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | python/ppx/TagResult.py | etalumis/ppx |
from __future__ import print_function
import sys
import string
from random import choice, randint
from perfect_hash import generate_hash
def flush_dot():
sys.stdout.write('.')
sys.stdout.flush()
def random_key():
return ''.join(choice(string.printable) for i in range(randint(1, 4)))
def main():
for N in range(1, 100):
print(N)
for _ in range(100):
keys = set()
while len(keys) < N:
keys.add(random_key())
keys = list(keys)
generate_hash(keys)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | stress.py | pombredanne/perfect-hash |
#!/usr/bin/env python
import os
import re
import warnings
import selenosis
# A dict for mapping test case classes to their import paths, to allow passing
# TestCaseClass.test_function as shorthand to runtests.py
TEST_CASE_MODULE_PATHS = {
'TestAdminWidgets': 'nested_admin.tests.admin_widgets.tests',
'TestWidgetMediaOrder': 'nested_admin.tests.admin_widgets.tests',
'TestGenericInlineAdmin': 'nested_admin.tests.gfk.tests',
'VisualComparisonTestCase': 'nested_admin.tests.one_deep.tests',
'TestDeepNesting': 'nested_admin.tests.three_deep.tests',
'TestStackedInlineAdmin': 'nested_admin.tests.two_deep.tests',
'TestTabularInlineAdmin': 'nested_admin.tests.two_deep.tests',
'TestSortablesWithExtra': 'nested_admin.tests.two_deep.tests',
'TestIdenticalPrefixes': 'nested_admin.tests.identical_prefixes.tests',
'PolymorphicStdTestCase': 'nested_admin.tests.nested_polymorphic.test_polymorphic_std.tests',
}
def expand_test_module(module):
module = os.path.normpath(module)
matches = re.search(r'^([^/.]+)(\.[^./]+)?$', module)
if not matches:
return module
cls, test_fn = matches.groups()
if not test_fn:
test_fn = ''
if cls not in TEST_CASE_MODULE_PATHS:
return module
return "%s.%s%s" % (TEST_CASE_MODULE_PATHS[cls], cls, test_fn)
class RunTests(selenosis.RunTests):
def execute(self, flags, test_labels):
test_labels = [expand_test_module(m) for m in test_labels]
super(RunTests, self).execute(flags, test_labels)
def main():
warnings.simplefilter("error", Warning)
warnings.filterwarnings(
"ignore",
"name used for saved screenshot does not match file type",
UserWarning)
runtests = RunTests("nested_admin.tests.settings", "nested_admin.tests")
runtests()
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | runtests.py | m4x4n0/django-nested-admin |
import hashlib
class Plugin:
def __init__(self, parser, sqlitecur):
parser.registerCommand([("hash", "Calculates all hashes", self._allHash)])
for h in hashlib.algorithms_available:
parser.registerCommand([("hash",), (h, "Calculates the %s" % h, self._hashCurry(h))])
def _allHash(self, params, fromUser):
if len(params) == 0:
return ("No data supplied", 1)
retLines = []
paramstr = " ".join(params).encode()
for h in hashlib.algorithms_available:
n = hashlib.new(h)
n.update(paramstr)
retLines.append("%s\t%s" % (h, n.hexdigest()))
return ("\n".join(retLines), 1)
def _hashCurry(self, hashfunction):
def hashIt(params, fromUser):
if len(params) == 0:
return ("No data supplied", 1)
n = hashlib.new(hashfunction)
n.update(' '.join(params).encode())
return (n.hexdigest(), 1)
return hashIt
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | plugins/hashit.py | Amarandus/xmppsh |
#!/usr/bin/python
# -*-coding=utf-8
from __future__ import print_function, division
import unittest
from onvif import ONVIFCamera, ONVIFError
CAM_HOST = '10.1.3.10'
CAM_PORT = 80
CAM_USER = 'root'
CAM_PASS = 'password'
DEBUG = False
def log(ret):
if DEBUG:
print(ret)
class TestDevice(unittest.TestCase):
# Class level cam. Run this test more efficiently..
cam = ONVIFCamera(CAM_HOST, CAM_PORT, CAM_USER, CAM_PASS)
# ***************** Test Capabilities ***************************
def test_GetWsdlUrl(self):
ret = self.cam.devicemgmt.GetWsdlUrl()
def test_GetHostname(self):
''' Get the hostname from a device '''
self.cam.devicemgmt.GetHostname()
def test_GetServiceCapabilities(self):
'''Returns the capabilities of the devce service.'''
ret = self.cam.devicemgmt.GetServiceCapabilities()
def test_GetDNS(self):
''' Gets the DNS setting from a device '''
ret = self.cam.devicemgmt.GetDNS()
self.assertTrue(hasattr(ret, 'FromDHCP'))
if not ret.FromDHCP and len(ret.DNSManual) > 0:
log(ret.DNSManual[0].Type)
log(ret.DNSManual[0].IPv4Address)
def test_GetNTP(self):
''' Get the NTP settings from a device '''
ret = self.cam.devicemgmt.GetNTP()
if ret.FromDHCP == False:
self.assertTrue(hasattr(ret, 'NTPManual'))
log(ret.NTPManual)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | tests/test.py | icetana-james/python-onvif-zeep |
class EditorBrowsableState(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the browsable state of a property or method from within an editor.
enum EditorBrowsableState,values: Advanced (2),Always (0),Never (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Advanced=None
Always=None
Never=None
value__=None
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
... | 3 | stubs.min/System/ComponentModel/__init___parts/EditorBrowsableState.py | ricardyn/ironpython-stubs |
import numpy as np
from PIL import Image
from copy import deepcopy
INPUT_SHAPE = (84, 84)
def init_state():
# return np.zeros((84, 84, 4))
return np.zeros((4, 84, 84))
def append_frame(state, frame):
# new_state = deepcopy(state)
# new_state[:, :, :-1] = state[:, :, 1:]
# new_state[:, :, -1] = frame
new_state = deepcopy(state)
new_state[:-1, :, :, ] = state[1:, :, :]
new_state[-1, :, :] = frame
del state
return new_state
def process_observation(observation):
assert observation.ndim == 3
img = Image.fromarray(observation)
img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale
processed_observation = np.array(img)
assert processed_observation.shape == INPUT_SHAPE
return processed_observation.astype('float32') / 255. # saves storage in experience memory
def process_state_batch(batch):
return np.asarray(batch).astype('float32') / 255.
def clip_rewards(reward):
return np.clip(reward, -1, 1)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | Pacman/processor.py | dorlivne/PoPS |
## Compiled from NodeLoads.ipynb on Sun Dec 10 12:51:11 2017
## DO NOT EDIT THIS FILE. YOUR CHANGES WILL BE LOST!!
## In [1]:
import numpy as np
from salib import extend
## In [9]:
class NodeLoad(object):
def __init__(self,fx=0.,fy=0.,mz=0.):
if np.isscalar(fx):
self.forces = np.matrix([fx,fy,mz],dtype=np.float64).T
else:
self.forces= fx.copy()
def __mul__(self,scale):
if scale == 1.0:
return self
return self.__class__(self.forces*scale)
__rmul__ = __mul__
def __repr__(self):
return "{}({},{},{})".format(self.__class__.__name__,*list(np.array(self.forces.T)[0]))
def __getitem__(self,ix):
return self.forces[ix,0]
## In [11]:
def makeNodeLoad(data):
G = data.get
return NodeLoad(G('FX',0),G('FY',0),G('MZ',0))
## In [13]:
id(NodeLoad)
## In [17]:
@extend
class NodeLoad:
@property
def fx(self):
return self.forces[0,0]
@fx.setter
def fx(self,v):
self.forces[0,0] = v
@property
def fy(self):
return self.forces[1,0]
@fy.setter
def fy(self,v):
self.forces[1,0] = v
@property
def mz(self):
return self.forces[2,0]
@mz.setter
def mz(self,v):
self.forces[2,0] = v
## In [ ]:
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?"... | 3 | matrix-methods/frame2d/Frame2D/NodeLoads.py | nholtz/structural-analysis |
from myqueue import CheckableQueue
def bfs(start, bases, players):
# a FIFO open_set
open_set = CheckableQueue()
# an empty set to maintain visited nodes
closed_set = set()
# a dictionary to maintain meta information (used for path formation)
meta = dict() # key -> (parent state, action to reach child)
# initialize
start = bases.index(start)
meta[start] = None
open_set.put(start)
while not open_set.empty():
parent_state = open_set.get()
if is_goal(bases[parent_state], players):
return construct_path(parent_state, meta)
for child_state in bases[parent_state]["Connections"]:
if child_state in closed_set:
continue
if child_state not in open_set:
meta[child_state] = parent_state
open_set.put(child_state)
closed_set.add(parent_state)
def construct_path(state, meta):
action_list = list()
while True:
action_list.insert(0, state)
row = meta.get(state)
if row != None:
state = row
else:
return action_list
def is_goal(state, players):
for player in players:
if state["Occupying_player"] == player:
return True
return False
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | AIs/Computer-py/mysearches.py | saeidh12/razmai-server |
'''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.tpi.markup import *
from arjuna.tpi.markup_helpers import *
@test_function
def assert_truth(my):
my.steps.assert_true("Should Pass for True value.", True)
my.steps.assert_false("Should Pass for False value.", False)
@test_function
def assert_true_fail_for_false(my):
my.steps.assert_true("Should Fail for False value.", False)
@test_function
def assert_false_fail_for_true(my):
my.steps.assert_false("Should Fail for True value.", True)
@test_function
def assert_true_non_boolean_raises_exception(my):
my.steps.assert_true("Should throw error for non-boolean value", "testing")
@test_function
def assert_false_non_boolean_raises_exception(my):
my.steps.assert_false("Should throw error for non-boolean value", "testing")
@test_function
def assert_true_errs_for_none(my):
my.steps.assert_true("Should throw error for non-boolean value.", None)
@test_function
def assert_false_errs_for_none(my):
my.steps.assert_false("Should throw error for non-boolean value.", None) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | arjuna-samples/workspace/arjex/tests/modules/s01unitee_engine/ep03_what_i_assert/ex02_assert_true_false.py | test-mile/arjuna |
# Copyright 2018 Mathias Burger <mathias.burger@gmail.com>
#
# SPDX-License-Identifier: MIT
from typing import Callable
import gimp
from pgimp.gimp.parameter import get_json
def open_xcf(filename):
"""
:type filename: str
:rtype: gimp.Image
"""
return gimp.pdb.gimp_xcf_load(0, filename, filename)
def save_xcf(image, filename):
"""
:type image: gimp.Image
:type filename: str
"""
gimp.pdb.gimp_xcf_save(0, image, None, filename, filename)
def close_image(image):
"""
:type image: gimp.Image
:return:
"""
gimp.pdb.gimp_image_delete(image)
class XcfFile:
def __init__(self, file, save=False):
"""
:type file: str
:type save: bool
"""
self._file = file
self._save = save
self._image = None
def __enter__(self):
"""
:rtype: gimp.Image
"""
self._image = open_xcf(self._file)
return self._image
def __exit__(self, exc_type, exc_val, exc_tb):
if self._save:
save_xcf(self._image, self._file)
close_image(self._image)
return False
def for_each_file(callback, save=False):
# type: (Callable[[gimp.Image, str], None], bool) -> None
files = get_json('__files__')
for file in files:
with XcfFile(file, save=save) as image:
callback(image, file)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | pgimp/gimp/file.py | netogallo/pgimp |
"""added user is_active
Revision ID: 0502a89b8394
Revises: 8f9f223976a8
Create Date: 2021-01-27 12:30:38.668195
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0502a89b8394'
down_revision = '8f9f223976a8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('is_active', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'is_active')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | API/alembic/versions/0502a89b8394_added_user_is_active.py | sourcery-ai-bot/PageMail |
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 PanXu, Inc. All Rights Reserved
#
"""
使用 bert 的 init weights
Authors: PanXu
Date: 2021/11/08 08:44:00
"""
from torch.nn import Module
from torch import nn
from transformers import BertConfig
class BertInitWeights:
"""
bert 初始化权重
参考: BertPreTrainedModel._init_weights
"""
def __init__(self, bert_config: BertConfig):
self.config = bert_config
def __call__(self, module: Module) -> None:
"""
参考: BertPreTrainedModel._init_weights
:param module: 模型
:return: None
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | easytext/utils/nn/bert_init_weights.py | cjopengler/easytext |
import pytest
import re
from pycolog.log_entry import LogEntry
@pytest.mark.parametrize('chars,expected', [
(4, 'S...'),
(8, 'Short...'),
(9, 'ShortLine'),
(12, 'ShortLine'),
])
def test_truncate(chars, expected):
assert LogEntry('ShortLine').truncate(chars) == expected
def test_fields_as_attributes():
subject = LogEntry(
'Space separated log line',
line_format=re.compile(r'^(?P<a>\w+)\s\w+\s(?P<b>\w+)\s(?P<c>\w+)'))
assert subject.a == 'Space'
assert subject.b == 'log'
assert subject.c == 'line'
def test_fields_as_items():
subject = LogEntry(
'Space separated log line',
line_format=re.compile(r'^(?P<a>\w+)\s\w+\s(?P<b>\w+)\s(?P<c>\w+)'))
assert subject['a'] == 'Space'
assert subject['b'] == 'log'
assert subject['c'] == 'line'
def test_formatted_field():
subject = LogEntry(
'Space separated log line',
line_format=re.compile(r'^(?P<a>\w+)\s\w+\s(?P<b>\w+)\s(?P<c>\w+)'),
fields={'a': {'callback': lambda s: s[::-1], 'argument': 's'}}
)
assert subject.a == 'ecapS'
def test_formatted_field_additional_args():
subject = LogEntry(
'Space separated log line',
line_format=re.compile(r'^\w+\s(?P<a>\w+)\s\w+\s\w+'),
fields={'a': {
'callback': lambda start, stop, step, value: value[start:stop:step],
'kwargs': {'start': 1, 'stop': 6, 'step': 2},
'argument': 'value'}
}
)
assert subject.a == 'eaa'
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | tests/test_log_entry.py | leupibr/pycolog |
class ConfigurationError(Exception):
"""
The exception raised by any object when it's misconfigured
(e.g. missing properties, invalid properties, unknown properties).
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return repr(self.message)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | torch_nlp_utils/common/checks.py | Nemexur/torch_data_utils |
import sys
# sys.path.append(
# '/home/rpl/Documents/rasmus/crazyswarm/ros_ws/src/crazyswarm/scripts/perceived-safety-study'
# )
# sys.path.append(
# '/home/rpl/Documents/rasmus/crazyswarm/ros_ws/src/crazyswarm/scripts/perceived-safety-study/utils'
# )
sys.path.append(
"/home/rpl/Documents/rasmus/crazyswarm/ros_ws/src/crazyswarm/scripts/perceived-safety-study"
)
sys.path.append(
"/home/rpl/Documents/rasmus/crazyswarm/ros_ws/src/crazyswarm/scripts/perceived-safety-study/utils"
)
from globalVariables import NUM_TRAJECTORIES_TO_TUNE_CBF
from Participant import Participant
from GaussianProcess import GaussianProcess
from globalVariables import PATH_TO_ROOT
def updateSafetyFunction():
gp = GaussianProcess(
pID=int(sys.argv[1]),
safetyFunction="sf2",
csvFileName="sf2 - input.csv",
savedTrajectoriesDir=f"{PATH_TO_ROOT}/preStudy/savedTrajectories")
gp.startProcess()
def plotSafetyFunction():
gp = GaussianProcess(
pID=0,
safetyFunction="sf2",
csvFileName="sf2 - input.csv",
savedTrajectoriesDir=f"{PATH_TO_ROOT}/preStudy/savedTrajectories")
gp.plotCurrentPredictionAs3d()
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "plot":
plotSafetyFunction()
else:
updateSafetyFunction() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | ros_ws/src/crazyswarm/scripts/perceived-safety-study/preStudy/main.py | rasmus-rudling/degree-thesis |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import petstore_api
from petstore_api.models.model200_response import Model200Response # noqa: E501
from petstore_api.rest import ApiException
class TestModel200Response(unittest.TestCase):
"""Model200Response unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Model200Response
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = petstore_api.models.model200_response.Model200Response() # noqa: E501
if include_optional :
return Model200Response(
name = 56,
_class = ''
)
else :
return Model200Response(
)
def testModel200Response(self):
"""Test Model200Response"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | samples/openapi3/client/petstore/python-legacy/test/test_model200_response.py | JigarJoshi/openapi-generator |
import curses
from curses import wrapper, textpad
# stdscr = curses.initscr()
def messages_screen(y, x, stdscr):
# stdscr = curses.initscr()
pad = curses.newpad(8, x/3 + x+x/2-2)
# nlines
# curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_BLUE)
# pad.bkgd(' ', curses.color_pair(1))
x2 = (x+2)*2
ncols = x+x/2
ulx = x/3
# stdscr.addstr(str(y))
textpad.rectangle(stdscr, y+3, ulx-1, 15, ulx + ncols)
pad.scrollok(1)
return pad
def test_textpad(x, y, stdscr, insert_mode=False):
ncols, nlines = x+x/2, 1
uly, ulx = y, x/3
# stdscr.addstr(uly-2, ulx, "Use Ctrl-G to send the message.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
# stdscr.addstr('\n')
stdscr.addstr(uly-2, ulx, 'Press any key to write again.')
# stdscr.getch()
stdscr.addstr(uly-2, ulx, ' ')
return contents
def main(stdscr):
# Clear screen
stdscr.clear()
x, y = stdscr.getmaxyx()
y=stdscr.getmaxyx()[0] / 7
x=stdscr.getmaxyx()[1] / 2 - 2
# with open('welcome_message.txt', 'r') as myfile:
# data=myfile.read().replace('\n', '')
# for msg in data:
# stdscr.addstr(y, x, msg)
stdscr.addstr(y, x, "Welcome")
pad = messages_screen(y, x, stdscr)
while True:
# pad = messages_screen(y, x, stdscr)
msg = test_textpad(x, y*7-2, stdscr, True)
pad.addstr(msg + "\n")
pad.refresh( 0,0, 7,15, 28, x/3 + x+x/2-2)
# stdscr.textpad.Textbox
stdscr.refresh()
stdscr.getkey()
wrapper(main)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | cursor.py | LowLevel96/MQTT_Chat |
import discord
from discord.ext import commands
from discord.utils import get
class c316(commands.Cog, name="c316"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Alexander_the_Impenetrable', aliases=['c316'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Alexander the Impenetrable',
color=0x00008B)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2367727.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type (Attribute)', value='Machine/Link/Effect (DARK)', inline=False)
embed.add_field(name='Link Rating (ATK/Link Arrows)', value='3 (3200/↙️⬇️➡️)', inline=False)
embed.add_field(name='Monster Effect', value='2+ Effect Monsters\nCannot attack. This effect cannot be negated. Once per turn, when a card or effect is activated that targets a monster(s) you control (Quick Effect): You can discard 1 card; negate the activation.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c316(bot)) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | upcfcardsearch/c316.py | ProfessorSean/Kasutamaiza |
# Tai Sakuma <tai.sakuma@gmail.com>
from __future__ import print_function
import os
import errno
import logging
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl import mkdir_p
##__________________________________________________________________||
@pytest.fixture()
def mock_makedirs(monkeypatch):
ret = mock.Mock()
monkeypatch.setattr(os, 'makedirs', ret)
return ret
@pytest.fixture()
def mock_isdir(monkeypatch):
ret = mock.Mock()
monkeypatch.setattr(os.path, 'isdir', ret)
return ret
##__________________________________________________________________||
def test_emtpy(mock_makedirs):
mkdir_p('')
assert [ ] == mock_makedirs.call_args_list
def test_success(mock_makedirs):
mkdir_p('a/b')
assert [mock.call('a/b')] == mock_makedirs.call_args_list
def test_already_exist(mock_makedirs, mock_isdir, caplog):
mock_isdir.return_value = True
mock_makedirs.side_effect = OSError(errno.EEXIST, 'already exist')
with caplog.at_level(logging.DEBUG - 1):
mkdir_p('a/b')
assert [mock.call('a/b')] == mock_makedirs.call_args_list
assert len(caplog.records) == 1
assert caplog.records[0].levelno == logging.DEBUG - 1
assert 'tried' in caplog.records[0].msg
def test_raise(mock_makedirs, mock_isdir, caplog):
mock_isdir.return_value = False
mock_makedirs.side_effect = OSError
with pytest.raises(OSError):
mkdir_p('a/b')
assert [mock.call('a/b')] == mock_makedirs.call_args_list
##__________________________________________________________________||
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | tests/unit/misc/test_mkdir_p.py | shane-breeze/AlphaTwirl |
import numpy as np
from lmfit import minimize, Parameters, Parameter, report_fit
from lmfit_testutils import assert_paramval, assert_paramattr
def test_basic():
# create data to be fitted
x = np.linspace(0, 15, 301)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=len(x), scale=0.2) )
# define objective function: returns the array to be minimized
def fcn2min(params, x, data):
""" model decaying sine wave, subtract data"""
amp = params['amp']
shift = params['shift']
omega = params['omega']
decay = params['decay']
model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
return model - data
# create a set of Parameters
params = Parameters()
params.add('amp', value= 10, min=0)
params.add('decay', value= 0.1)
params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
params.add('omega', value= 3.0)
# do fit, here with leastsq model
result = minimize(fcn2min, params, args=(x, data))
# calculate final result
final = data + result.residual
# report_fit(result)
assert(result.nfev > 5)
assert(result.nfev < 500)
assert(result.chisqr > 1)
assert(result.nvarys == 4)
assert_paramval(result.params['amp'], 5.03, tol=0.05)
assert_paramval(result.params['omega'], 2.0, tol=0.05)
if __name__ == '__main__':
test_basic()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | tests/test_basicfit.py | FaustinCarter/lmfit-py |
#Copyright ReportLab Europe Ltd. 2000-2018
#see license.txt for license details
__doc__="""The Reportlab PDF generation library."""
Version = "3.5.59"
__version__=Version
__date__='20210104'
import sys, os
__min_python_version__ = (3,6)
if sys.version_info[0:2]!=(2, 7) and sys.version_info< __min_python_version__:
raise ImportError("""reportlab requires Python 2.7+ or %s.%s+; other versions are unsupported.
If you want to try with other python versions edit line 10 of reportlab/__init__
to remove this error.""" % (__min_python_version__))
#define these early in reportlab's life
isPy3 = sys.version_info[0]==3
if isPy3:
def cmp(a,b):
return -1 if a<b else (1 if a>b else 0)
xrange = range
ascii = ascii
def _fake_import(fn,name):
from importlib import machinery
m = machinery.SourceFileLoader(name,fn)
try:
sys.modules[name] = m.load_module(name)
except FileNotFoundError:
raise ImportError('file %s not found' % ascii(fn))
else:
from future_builtins import ascii
xrange = xrange
cmp = cmp
def _fake_import(fn,name):
if os.path.isfile(fn):
import imp
with open(fn,'rb') as f:
sys.modules[name] = imp.load_source(name,fn,f)
#try to use dynamic modifications from
#reportlab.local_rl_mods.py
#reportlab_mods.py or ~/.reportlab_mods
try:
import reportlab.local_rl_mods
except ImportError:
pass
if not isPy3:
PermissionError = ImportError
try:
import reportlab_mods #application specific modifications can be anywhere on python path
except ImportError:
try:
_fake_import(os.path.expanduser(os.path.join('~','.reportlab_mods')),'reportlab_mods')
except (ImportError,KeyError,PermissionError):
pass
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | venv/lib/python2.7/site-packages/reportlab/__init__.py | Christian-Castro/castro_odoo8 |
class DoublyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
self.prev = None
def to_str(head):
# better lookup for test data :P
data = []
while head:
data.append(str(head.data))
head = head.next
return ', '.join(data)
def sortedInsert(head, data):
if head is None:
return DoublyLinkedListNode(data)
current = head
prev = None
while current and current.data < data:
prev = current
current = current.next
new = DoublyLinkedListNode(data)
new.next = current
if prev:
prev.next = new
return head
return new
if __name__ == '__main__':
h = DoublyLinkedListNode(0)
h_current = h
for x in range(1, 11):
n = DoublyLinkedListNode(x*2)
n.prev = h_current
h_current.next = n
h_current = n
print(to_str(h))
h = sortedInsert(h, -2)
print(to_str(h))
h = sortedInsert(h, 5)
print(to_str(h))
h = sortedInsert(h, 6)
print(to_str(h))
h = sortedInsert(h, 7)
print(to_str(h))
h = sortedInsert(h, 99)
print(to_str(h))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | data_structures__linked_lists/solution_14.py | rikkt0r/hackerrank_python |
# Copyright 2019 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
class RequestContext:
def __init__(self, api_request=None, http_request=None, api_response=None, http_response=None,
exception=None, retry_flag=True, retry_backoff=0, config=None, client=None,
result=None):
self.api_request = api_request
self.http_request = http_request
self.api_response = api_response
self.http_response = http_response
self.exception = exception
self.result = result
self.config = config
self.client = client
self.retry_flag = retry_flag
self.retry_backoff = retry_backoff
class RequestHandler:
def handle_request(self, context):
pass
def handle_response(self, context):
pass
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | alibabacloud/handlers/__init__.py | wallisyan/alibabacloud-python-sdk-v2 |
# based on https://ruder.io/optimizing-gradient-descent/#adam
# and https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/optimizers.py#L106
import numpy as np
class Adam:
"""Adam - Adaptive Moment Estimation
Parameters:
-----------
learning_rate: float = 0.001
The step length used when following the negative gradient.
beta_1: float = 0.9
The exponential decay rate for the 1st moment estimates.
beta_2: float = 0.999
The exponential decay rate for the 2nd moment estimates.
epsilon: float = 1e-07
A small floating point value to avoid zero denominator.
"""
def __init__(self, learning_rate: float = 0.001, beta_1: float = 0.9, beta_2: float = 0.999, epsilon: float = 1e-7) -> None:
self.learning_rate = learning_rate
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
self.t = 0
self.m = None # Decaying averages of past gradients
self.v = None # Decaying averages of past squared gradients
def update(self, w: np.ndarray, grad_wrt_w: np.ndarray) -> np.ndarray:
self.t += 1
if self.m is None:
self.m = np.zeros(np.shape(grad_wrt_w))
self.v = np.zeros(np.shape(grad_wrt_w))
self.m = self.beta_1 * self.m + (1 - self.beta_1) * grad_wrt_w
self.v = self.beta_2 * self.v + (1 - self.beta_2) * np.power(grad_wrt_w, 2)
m_hat = self.m / (1 - self.beta_1**self.t)
v_hat = self.v / (1 - self.beta_2**self.t)
w_update = self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)
return w - w_update
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | Optimizers/adam/code/adam.py | TannerGilbert/Machine-Learning-Explained |
import requests
import sqlalchemy
import xmltodict
from sqlalchemy import create_engine, MetaData
from collections import defaultdict
import datetime
from utils import *
class Capture(object):
def __init__(self,
schema,
database='projetocurio'
):
self.schema = schema
self.database = database
self.engine = self.connect_to_db()
self.meta = self.load_db_schema()
self.url = None
self.data = None
def connect_to_db(self):
return create_engine('postgresql://uploaddata:VgyBhu876%%%@104.155.150.247:5432/projetocurio')
def load_db_schema(self):
metadata = MetaData()
metadata.reflect(self.engine, schema='camara_v1')
return metadata
def request(self, url):
data = requests.get(url)
if data.status_code == 200:
self.data = data.text
else:
self.data = None
def xml_to_dict(self):
self.data = xmltodict.parse(self.data)
def to_default_dict(self, list_of_dic):
return [defaultdict(lambda: None, dic) for dic in force_list(list_of_dic)]
def capture_data(self, url):
self.request(url)
self.xml_to_dict()
def insert_data(self, list_of_dic, table):
table_string = self.schema + '.' + table
with self.engine.connect() as conn:
print('inserting data')
for dic in list_of_dic:
conn.execute(self.meta.tables[table_string].insert(), dic)
print('closing connection') | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | bigua/API/capture.py | AliferSales/bigua |
#!/usr/bin/env python
import os
from setuptools import setup
from setuptools.command.test import test
import codecs
def root_dir():
rd = os.path.dirname(__file__)
if rd:
return rd
return '.'
class pytest_test(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main([])
setup_args = dict(
name='django-treebeard',
version='3.0',
url='https://tabo.pe/projects/django-treebeard/',
author='Gustavo Picon',
author_email='tabo@tabo.pe',
license='Apache License 2.0',
packages=['treebeard', 'treebeard.templatetags', 'treebeard.tests'],
package_dir={'treebeard': 'treebeard'},
package_data={
'treebeard': ['templates/admin/*.html', 'static/treebeard/*']},
description='Efficient tree implementations for Django 1.6+',
long_description=codecs.open(root_dir() + '/README.rst', encoding='utf-8').read(),
cmdclass={'test': pytest_test},
install_requires=['Django>=1.6'],
tests_require=['pytest'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Environment :: Web Environment',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'])
if __name__ == '__main__':
setup(**setup_args)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | setup.py | kezabelle/django-treebeard |
import logging
import threading
import time
# Credits to https://gist.github.com/cypreess/5481681
class PeriodicThread(object):
"""
Python periodic Thread using Timer with instant cancellation
"""
def __init__(self, callback=None, period=1, name=None, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
self.callback = callback
self.period = period
self.stop = False
self.current_timer = None
self.schedule_lock = threading.Lock()
def start(self):
"""
Mimics Thread standard start method
"""
self.schedule_timer()
def run(self):
"""
By default run callback. Override it if you want to use inheritance
"""
if self.callback is not None:
self.callback(*self.args, **self.kwargs)
def _run(self):
"""
Run desired callback and then reschedule Timer (if thread is not stopped)
"""
try:
self.run()
except Exception as e:
logging.exception("Exception in running periodic thread")
finally:
with self.schedule_lock:
if not self.stop:
self.schedule_timer()
def schedule_timer(self):
"""
Schedules next Timer run
"""
self.current_timer = threading.Timer(
self.period, self._run, *self.args, **self.kwargs)
if self.name:
self.current_timer.name = self.name
self.current_timer.start()
def cancel(self):
"""
Mimics Timer standard cancel method
"""
with self.schedule_lock:
self.stop = True
if self.current_timer is not None:
self.current_timer.cancel()
def join(self):
"""
Mimics Thread standard join method
"""
self.current_timer.join()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | pilapse/scheduler.py | yix/pilapse |
import abc
from typing import List
class IPCWrapper(abc.ABC):
"""
This public class defines the mosec IPC wrapper plugin interface.
The wrapper has to at least implement `put` and `get` method.
"""
@abc.abstractmethod
def put(self, data: List[bytes]) -> List[bytes]:
"""Put bytes to somewhere to get ids, which are sent via protocol.
Args:
data (List[bytes]): List of bytes data.
Returns:
List[bytes]: List of bytes ID.
"""
@abc.abstractmethod
def get(self, ids: List[bytes]) -> List[bytes]:
"""Get bytes from somewhere by ids, which are received via protocol.
Args:
ids (List[bytes]): List of bytes ID.
Returns:
List[bytes]: List of bytes data.
"""
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | mosec/ipc.py | mosecorg/mosec |
"""added purchase_date to the Stock model
Revision ID: 4f494d6d1000
Revises: df472f9e977f
Create Date: 2022-02-13 06:35:05.343296
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4f494d6d1000'
down_revision = 'df472f9e977f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('stocks', sa.Column('purchase_date', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('stocks', 'purchase_date')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | migrations/versions/4f494d6d1000_added_purchase_date_to_the_stock_model.py | YellowFlash2012/stock-portfolio-io |
# Big O complexity
# O(log2(n))
# works only on sorted array
# recursive
def binary_search_recursive(arr, arg, left, right):
if right >= left:
middle = left + (right - left) // 2
if arr[middle] == arg:
return middle
elif arr[middle] > arg:
return binary_search_recursive(arr, arg, left, middle - 1)
else:
return binary_search_recursive(arr, arg, middle + 1, right)
else:
return -1
# iterative
def binary_search(arr, arg, left, right):
while left <= right:
mid = left + (right - left) // 2
if arr[mid] == arg:
return mid
elif arr[mid] < arg:
left = mid + 1
else:
right = mid - 1
return -1
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | computer_science/algoritms/search/binarysearch.py | kuzxnia/algoritms |
"""Initial Migration
Revision ID: 58556190cb24
Revises: 4a167476d5f7
Create Date: 2021-08-20 12:30:57.334610
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '58556190cb24'
down_revision = '4a167476d5f7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password', sa.String(length=255), nullable=True))
op.drop_column('users', 'pass_secure')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('pass_secure', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('users', 'password')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | migrations/versions/58556190cb24_initial_migration.py | samwel-chege/Pitches |
from sklearn.base import BaseEstimator
from collections import defaultdict
from lcs import lat_lng
from fastdtw import fastdtw
from haversine import haversine
import numpy as np
class KNN(BaseEstimator):
def __init__(self, n_neighbors=5):
self.n_neighbors = n_neighbors
def fit(self,data,target):
self.X = data
self.y = target
return self
def _distance(self,instance1,instance2):
#This is for manhattan distance
#return sum(abs(instance1 - instance2))
#This is for euclidean distance which is a little better
#return np.sqrt(sum((instance1 - instance2)**2))
inst1=np.array(lat_lng(instance1))
inst2=np.array(lat_lng(instance2))
distance,path=fastdtw(inst1,inst2,dist=haversine)
return distance
def _instance_prediction(self, test):
#Sorted the instances by the distance
distances = sorted((self._distance(x, test), y) for x, y in zip(self.X, self.y))
#Taking the instances for each class from the k nearest
instances= self._classes_instances(distances[:self.n_neighbors])
instances_by_class = defaultdict(list)
for d, c in instances:
instances_by_class[c].append(d)
counts = [(sum(value),key) for key,value in instances_by_class.iteritems()]
#Find the class with the most instances
majority = max(counts)
return majority[1]
def _classes_instances(self, distances):
cl_inst=[(1,y) for x,y in distances]
return cl_inst
#cl_inst = [(1,y) for x,y in distances if x == 0]
#return cl_inst if cl_inst else [(1/x, y) for x, y in distances] #After we have found the k n$
def predict(self, X):
#Calling instance prediction for each instance
return [self._instance_prediction(x) for x in X]
def score(self, X, y):
return sum(1 for p, t in zip(self.predict(X), y) if p == t) / len(y)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | Data_Visualization-TimeSeries_Classification/KNN.py | wyolland/DataMining |
import os
import datetime
import pytest
from smart_getenv import getenv
from kirby.api.queue import Queue
def test_it_can_create_a_queue():
q = Queue("my-queue", testing=True)
q.append("hello")
assert q.last() == "hello"
@pytest.mark.integration
@pytest.mark.skipif(
not os.getenv("KAFKA_BOOTSTRAP_SERVERS"),
reason="missing KAFKA_BOOTSTRAP_SERVERS environment",
)
def test_it_can_create_a_queue_integration(kafka_topic_factory):
start = datetime.datetime(year=2019, month=7, day=18, hour=15, minute=39)
offset = datetime.timedelta(seconds=5)
with kafka_topic_factory("kirby-test-integration"):
q = Queue(
"kirby-test-integration",
init_time=start - 4 * offset,
use_tls=getenv("KAFKA_USE_TLS", type=bool, default=True),
)
q.append("too early", submitted=start - 2 * offset)
q.append("hello world", submitted=start)
q.append("too late", submitted=start + 2 * offset)
messages = q.between(start, start + offset)
assert messages == ["hello world"]
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/tests_api/test_api_queue.py | minh-adimian/kirby |
# -*- coding: utf-8 -*-
"""
Created on Tue May 4 13:36:39 2021
@author: MorganaGiorgio
"""
from Learner import *
class Greedy_Learner(Learner):
def __init__(self, n_arms):
super().__init__(n_arms)
self.expected_rewards = np.zeros(n_arms)
def pull_arm(self):
#all'inizio pulo ogni arms una volta sola
if(self.t < self.n_arms):
return self.t
#quando t>n_arms allora pullo quella con expected_reards massimo
idxs= np.argwhere(self.expected_rewards== self.expected_rewards.max()).reshape(-1)
pulled_arm= np.random.choice(idxs)
return pulled_arm
def update(self, pulled_arm, reward):
self.t+=1
self.update_observations(pulled_arm, reward)
self.expected_rewards[pulled_arm]= (self.expected_rewards[pulled_arm]*(self.t-1)+ reward) /self.t
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | PricingAdvertising/pricingEnviroment/Greedy_Learner.py | rossigiorgio/DIAProject |
import torch.nn as nn
from utils import StochasticDepth
from layers import FeedForward
from CvT import ConvAttention
class CvTBlock(nn.Module):
def __init__(self, embed_dim: int, out_dim: int,
num_heads: int, mlp_ratio: int = 4,
qkv_bias: bool = False, drop: float = 0.,
attn_drop: float = 0., drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
**kwargs):
super(CvTBlock, self).__init__()
self.norm_one = norm_layer(embed_dim)
self.attn = ConvAttention(
embed_dim=embed_dim, out_dim=out_dim,
num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, drop=drop,
**kwargs
)
self.drop_path = StochasticDepth(drop_path)
self.norm_two = norm_layer(out_dim)
self.feed_forward = FeedForward(
in_features=out_dim, factor=mlp_ratio,
act_layer=act_layer, drop=drop
)
def forward(self, x, h, w):
x = x + self.drop_path(self.attn(self.norm_one(x), h, w))
x = x + self.drop_path(self.feed_forward(self.norm_two(x)))
return x
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a t... | 3 | CvT/transformer_block.py | Justin900429/vision-transformer |
# Complete the function below calculate your age after different numbers of years.
def age_now(x):
# My age now
print(f"I am currently {x} years old.\n")
def age_1(y):
# My age next year
print(f"Next year I'll be {y+1} years old.\n")
def age_10(z):
# My age in 10 years
print(f"In 10 years, I'll be {z+10}!\n")
def age_50(q):
# My age in 50 years!
print(f"In 50 years, I'll be {q+50}! Wow!\n")
if __name__ == '__main__':
age = 15 # change this number to your current age
'''
after you have this working by defining "age" above, you can comment that line out and add a new line
# below it to get the user's age as an input.
'''
age = int(input("How old are you?"))
age_now(age) # run function age_now with argument age
age_1(age) # run function age_1 with argument age
age_10(age) # run function age_10 with argument age
age_50(age) # run function age_50 with argument age
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | my_code.py | Athenian-ComputerScience-Fall2020/my-age-Jackypop101 |
#!/usr/bin/env python
# Copyright 2012 La Honda Research Center, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""clean_file_locks.py - Cleans stale interprocess locks
This rountine can be used to find and delete stale lock files from
nova's interprocess synchroization. It can be used safely while services
are running.
"""
import logging
import optparse
from nova import flags
from nova import log
from nova import utils
LOG = log.getLogger('nova.utils')
FLAGS = flags.FLAGS
def parse_options():
"""process command line options."""
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true',
help='List lock files found and deleted')
options, args = parser.parse_args()
return options, args
def main():
"""Main loop."""
options, args = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info('Cleaning stale locks from %s' % FLAGS.lock_path)
utils.cleanup_file_locks()
LOG.info('Finished')
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tools/clean_file_locks.py | bopopescu/extra-specs-1 |
class Task(object):
def __init__(self,name):
self.name = name
pass
def run(self):
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | python/GtBurst/Task.py | fermi-lat/pyBurstAnalysisGUI |
import pandas as pd
import json
class Element:
def __init__(self, name, abrivation, atomic_number,
atomic_mass, period, group):
self.name = name
self.abrivation = abrivation
self.atomic_number = atomic_number
self.atomic_mass = atomic_mass
self.period = period #row
self.group = group #column
self.protons = self.atomic_number
self.neutrons = self.atomic_mass - self.protons
def __repr__(self):
return f"{self.abrivation}\n{self.atomic_number}\n{self.atomic_mass}"
df = pd.read_csv('elements.csv', header=None).dropna(axis = 0)
df[0] = df[0].str.strip("'")
df[1] = df[1].str.strip("'")
periodic = {}
for i in range(len(df)):
element = Element(name = df[0][i],
abrivation = df[1][i],
atomic_number = df[2][i],
atomic_mass = df[3][i],
period = df[4][i],
group = df[5][i])
periodic[element.abrivation] = {'name': element.name,
'atomic_number': element.atomic_number,
'atomic_mass': element.atomic_mass,
'period':element.period,
'group': element.group}
with open('periodic.json', 'w', encoding='utf-8') as f:
json.dump(periodic, f, ensure_ascii=False, indent=4)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | periodic_table.py | mkirby1995/PyISRU |
"""Definitions for the primitive `array_scan`."""
import numpy as np
from . import primitives as P
def pyimpl_array_scan(fn, init, array, axis):
"""Implement `array_scan`."""
# This is inclusive scan because it's easier to implement
# We will have to discuss what semantics we want later
def f(ary):
val = init
it = np.nditer([ary, None])
for x, y in it:
val = fn(val, x)
y[...] = val
return it.operands[1]
return np.apply_along_axis(f, axis, array)
def debugvm_array_scan(vm, fn, init, array, axis):
"""Implement `array_scan` for the debug VM."""
def fn_(a, b):
return vm.call(fn, [a, b])
return pyimpl_array_scan(fn_, init, array, axis)
__operation_defaults__ = {
"name": "array_scan",
"registered_name": "array_scan",
"mapping": P.array_scan,
"python_implementation": pyimpl_array_scan,
}
__primitive_defaults__ = {
"name": "array_scan",
"registered_name": "array_scan",
"type": "backend",
"python_implementation": pyimpl_array_scan,
"debugvm_implementation": debugvm_array_scan,
"inferrer_constructor": None,
"grad_transform": None,
}
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | myia/operations/prim_array_scan.py | strint/myia |
# *- coding: utf-8 -*
# Created by: ZhaoDongshuang
# Created on: 2018/1/27
import unittest
from others.survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
def setUp(self):
question = "What language did you first learn to speak?"
self.my_survey = AnonymousSurvey(question)
self.responses = ['English', 'Spanish', 'Mandarin']
def test_store_single_response(self):
self.my_survey.store_response(self.responses[0])
self.assertIn(self.responses[0], self.my_survey.responses)
def test_store_three_responses(self):
for response in self.responses:
self.my_survey.store_response(response)
for response in self.responses:
self.assertIn(response, self.my_survey.responses)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | others/test_survey.py | imtoby/LearnPythonRecord |
from flask import render_template,url_for,flash,redirect,request
from . import auth
from flask_login import login_user,login_required,logout_user
from .forms import RegForm,LoginForm
from ..models import User
from .. import db
from ..email import mail_message
@auth.route('/login', methods = ['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user != None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
return render_template('auth/login.html', loginform = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/signup', methods = ["GET","POST"])
def signup():
form = RegForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data, password = form.password.data)
user.save_u()
mail_message("Welcome to the Blog","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
return render_template('auth/register.html', r_form = form) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | app/auth/views.py | clarametto/Salb-Blog |
"""Collection of Jax network layers, wrapped to fit Ivy syntax and
signature.
"""
# global
import jax.numpy as jnp
import jax.lax as jlax
# local
from ivy.functional.backends.jax import JaxArray
def conv1d(
x: JaxArray,
filters: JaxArray,
strides: int,
padding: str,
data_format: str = "NWC",
dilations: int = 1,
) -> JaxArray:
strides = (strides,) if isinstance(strides, int) else strides
dilations = (dilations,) if isinstance(dilations, int) else dilations
return jlax.conv_general_dilated(
x, filters, strides, padding, None, dilations, (data_format, "WIO", data_format)
)
def conv1d_transpose(*_):
raise Exception("Convolutions not yet implemented for jax library")
def conv2d(x, filters, strides, padding, data_format="NHWC", dilations=1):
strides = [strides] * 2 if isinstance(strides, int) else strides
dilations = [dilations] * 2 if isinstance(dilations, int) else dilations
return jlax.conv_general_dilated(
x,
filters,
strides,
padding,
None,
dilations,
(data_format, "HWIO", data_format),
)
def depthwise_conv2d(*_):
raise Exception("Convolutions not yet implemented for jax library")
def conv2d_transpose(*_):
raise Exception("Convolutions not yet implemented for jax library")
def conv3d(*_):
raise Exception("Convolutions not yet implemented for jax library")
def conv3d_transpose(*_):
raise Exception("Convolutions not yet implemented for jax library")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | ivy/functional/backends/jax/layers.py | thatguuyG/ivy |
import os
import sys
import time
import datetime
import locale
import getpass
import platform
import subprocess
class Gk:
def __init__(self):
self.os = os.name
self.sys_lang = locale.getdefaultlocale()
self.python_ver = sys.version.split()[0]
self.user_name = getpass.getuser()
self.version = "1.0"
def python_ver(self):
return self.python_ver
def sys_lang(self):
return self.sys_lang
def os(self):
return self.os
def clear(self):
if self.os == "nt":
os.system("cls")
else:
os.system("clear")
def fileexits(self, file):
if os.path.isfile(file):
return True
else:
return False
def bit(self):
if platform.machine().endswith('64'):
return 64
elif platform.machine().endswith("32"):
return 32
def direxits(self, dir):
if os.path.isdir(dir):
return True
else:
return False
def gk_info(self):
return f"""
Gk3 (Geliştirici Kiti 3) Hakkında
Gk python geliştiriciler kullan daha hızlı ve kolay kod yazmasını sağlamak için yazılmış bir kütüphanedir
yaratıcı: Kerem Ata
lisans: MİT
versiyon: {self.version}
python_versiyonu
"""
def username(self):
return self.user_name
def get_command_output(self,command):
p = subprocess.run(command, capture_output=True, text=True, shell=True)
return p.stdout
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | gk.py | kerem3338/Gk |
from getpass import getuser
from os.path import abspath, join, exists
from uuid import uuid4
from seisflows.tools import unix
from seisflows.tools.config import ParameterError, SeisflowsParameters, SeisflowsPaths, custom_import
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class tiger_md(custom_import('system', 'slurm_md')):
""" Specially designed system interface for tiger.princeton.edu
See parent class for more information.
"""
def check(self):
""" Checks parameters and paths
"""
if 'UUID' not in PAR:
setattr(PAR, 'UUID', str(uuid4()))
if 'SCRATCH' not in PATH:
setattr(PATH, 'SCRATCH', join('/scratch/gpfs', getuser(), 'seisflows', PAR.UUID))
if 'LOCAL' not in PATH:
setattr(PATH, 'LOCAL', '')
super(tiger_md, self).check()
def submit(self, *args, **kwargs):
""" Submits job
"""
if not exists(PATH.SUBMIT + '/' + 'scratch'):
unix.ln(PATH.SCRATCH, PATH.SUBMIT + '/' + 'scratch')
super(tiger_md, self).submit(*args, **kwargs)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}... | 3 | seisflows/system/tiger_md.py | chukren/seisflows |
from . import *
import os
class TestConfigCommand(TestBase):
def test_dump(self):
dump = self._arduino.config.dump()["result"]
self.assertIsInstance(dump, dict)
self.assertIn("directories", dump)
def test_init(self):
config_path = self._arduino.config.init(".")["result"].split(": ")[1]
self.assertTrue(os.path.isfile(config_path))
os.remove(config_path)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/test_config.py | Renaud11232/pyduinocli |
from __future__ import absolute_import, division, print_function
import torch
from torch.autograd import Variable
from pyro.distributions import MultivariateNormal, SparseMultivariateNormal
from tests.common import assert_equal
def test_scale_tril():
loc = Variable(torch.Tensor([1, 2, 1, 2, 0]))
D = Variable(torch.Tensor([1, 2, 3, 4, 5]))
W = Variable(torch.Tensor([[1, -1, 2, 3, 4], [2, 3, 1, 2, 4]]))
cov = D.diag() + W.t().matmul(W)
mvn = MultivariateNormal(loc, cov)
sparse_mvn = SparseMultivariateNormal(loc, D, W)
assert_equal(mvn.scale_tril, sparse_mvn.scale_tril)
def test_log_prob():
loc = Variable(torch.Tensor([2, 1, 1, 2, 2]))
D = Variable(torch.Tensor([1, 2, 3, 1, 3]))
W = Variable(torch.Tensor([[1, -1, 2, 2, 4], [2, 1, 1, 2, 6]]))
x = Variable(torch.Tensor([2, 3, 4, 1, 7]))
cov = D.diag() + W.t().matmul(W)
mvn = MultivariateNormal(loc, cov)
sparse_mvn = SparseMultivariateNormal(loc, D, W)
assert_equal(mvn.log_prob(x), sparse_mvn.log_prob(x))
def test_variance():
loc = Variable(torch.Tensor([1, 1, 1, 2, 0]))
D = Variable(torch.Tensor([1, 2, 2, 4, 5]))
W = Variable(torch.Tensor([[3, -1, 3, 3, 4], [2, 3, 1, 3, 4]]))
cov = D.diag() + W.t().matmul(W)
mvn = MultivariateNormal(loc, cov)
sparse_mvn = SparseMultivariateNormal(loc, D, W)
assert_equal(mvn.variance, sparse_mvn.variance)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | tests/distributions/test_sparse_multivariate_normal.py | cnheider/pyro |
from madmom.io.audio import load_audio_file
import numpy
import librosa
def load_wav(wav_in, stereo=False):
x, fs = load_audio_file(wav_in, sample_rate=44100)
if not stereo:
# stereo to mono if necessary
if len(x.shape) > 1 and x.shape[1] == 2:
x = x.sum(axis=1) / 2
# cast to float
x = x.astype(numpy.single)
# normalize between -1.0 and 1.0
x /= numpy.max(numpy.abs(x))
return x
def overlay_clicks(x, beats):
clicks = librosa.clicks(beats, sr=44100, length=len(x))
if len(x.shape) > 1 and x.shape[1] == 2:
clicks = numpy.column_stack((clicks, clicks)) # convert to stereo
return (x + clicks).astype(numpy.single)
def find_closest(A, target):
idx = A.searchsorted(target)
idx = numpy.clip(idx, 1, len(A) - 1)
left = A[idx - 1]
right = A[idx]
idx -= target - left < right - target
return idx
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | headbang/util.py | sevagh/headbang.py |
from pysnmp.proto.secmod.rfc3414.priv import base
from pysnmp.proto import errind, error
class NoPriv(base.AbstractEncryptionService):
serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 2, 1) # usmNoPrivProtocol
def hashPassphrase(self, authProtocol, privKey):
return
def localizeKey(self, authProtocol, privKey, snmpEngineID):
return
def encryptData(self, encryptKey, privParameters, dataToEncrypt):
raise error.StatusInformation(errorIndication=errind.noEncryption)
def decryptData(self, decryptKey, privParameters, encryptedData):
raise error.StatusInformation(errorIndication=errind.noEncryption)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | scalyr_agent/third_party/pysnmp/proto/secmod/rfc3414/priv/nopriv.py | code-sauce/scalyr-agent-2 |
"""wiki版本追踪
Revision ID: ac28bef87cb9
Revises: c5d936a18918
Create Date: 2020-09-05 15:09:30.698554
"""
import ormtypes
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'ac28bef87cb9'
down_revision = 'c5d936a18918'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('wikipage_ibfk_2', 'wikipage', type_='foreignkey')
op.drop_column('wikipage', 'navigation_id')
op.add_column('wikipage_version', sa.Column('base', mysql.INTEGER(), nullable=True))
op.add_column('wikipage_version', sa.Column('navigation_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'wikipage_version', 'wiki_navigation_item', ['navigation_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'wikipage_version', type_='foreignkey')
op.drop_column('wikipage_version', 'navigation_id')
op.drop_column('wikipage_version', 'base')
op.add_column('wikipage', sa.Column('navigation_id', mysql.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('wikipage_ibfk_2', 'wikipage', 'wiki_navigation_item', ['navigation_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | migrations/versions/ac28bef87cb9_wiki版本追踪.py | Officeyutong/HelloJudge2 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiSalesKbassetStuffProduceqrcodeBatchqueryModel(object):
def __init__(self):
self._batch_id = None
self._page_size = None
self._produce_order_id = None
@property
def batch_id(self):
return self._batch_id
@batch_id.setter
def batch_id(self, value):
self._batch_id = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def produce_order_id(self):
return self._produce_order_id
@produce_order_id.setter
def produce_order_id(self, value):
self._produce_order_id = value
def to_alipay_dict(self):
params = dict()
if self.batch_id:
if hasattr(self.batch_id, 'to_alipay_dict'):
params['batch_id'] = self.batch_id.to_alipay_dict()
else:
params['batch_id'] = self.batch_id
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.produce_order_id:
if hasattr(self.produce_order_id, 'to_alipay_dict'):
params['produce_order_id'] = self.produce_order_id.to_alipay_dict()
else:
params['produce_order_id'] = self.produce_order_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiSalesKbassetStuffProduceqrcodeBatchqueryModel()
if 'batch_id' in d:
o.batch_id = d['batch_id']
if 'page_size' in d:
o.page_size = d['page_size']
if 'produce_order_id' in d:
o.produce_order_id = d['produce_order_id']
return o
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | alipay/aop/api/domain/KoubeiSalesKbassetStuffProduceqrcodeBatchqueryModel.py | articuly/alipay-sdk-python-all |
""" Closuers
Free variables and closures
Remember: Functions defined inside another function can access the outer (nonLocal) variables
"""
def outer():
x = 'python'
def inner():
print("{0} rocks!".format(x))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"... | 3 | .history/my_classes/ScopesClosuresAndDecorators/Closures_20210711165644.py | minefarmer/deep-Dive-1 |
from ..typecheck import *
from .import adapter
import sublime
import re
class PHP(adapter.AdapterConfiguration):
type = 'php'
docs = 'https://github.com/xdebug/vscode-php-debug#installation'
async def start(self, log, configuration):
node = await adapter.get_and_warn_require_node(self.type, log)
install_path = adapter.vscode.install_path(self.type)
command = [
node,
f'{install_path}/extension/out/phpDebug.js'
]
return adapter.StdioTransport(log, command)
async def install(self, log):
url = 'https://github.com/xdebug/vscode-php-debug/releases/latest/download/php-debug.vsix'
await adapter.vscode.install(self.type, url, log)
async def installed_status(self, log):
return await adapter.git.installed_status('xdebug', 'vscode-php-debug', self.installed_version)
@property
def installed_version(self) -> Optional[str]:
return adapter.vscode.installed_version(self.type)
@property
def configuration_snippets(self) -> Optional[list]:
return adapter.vscode.configuration_snippets(self.type)
@property
def configuration_schema(self) -> Optional[dict]:
return adapter.vscode.configuration_schema(self.type)
def on_hover_provider(self, view, point):
seperators = "./\\()\"'-:,.;<>~!@#%^&*|+=[]{}`~?."
word = view.expand_by_class(point, sublime.CLASS_WORD_START | sublime.CLASS_WORD_END, separators=seperators)
word_string = word and view.substr(word)
if not word_string:
return None
match = re.search("\\$[a-zA-Z0-9_]*", word_string)
if not match:
return None
word_string = match.group()
return (match.group(), word)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | modules/adapters/php.py | mateka/sublime_debugger |
#!/usr/bin/env python3
# 2018 Luther Thompson
# This code is public domain under CC0. See the file COPYING for details.
import unittest
import newtuple
class TupleTest:
def testSetItem(self):
self.assertEqual(self.call('setItem', (0, 1, 2), 1, 666), (0, 666, 2))
def testSetItemSlice(self):
self.assertEqual(
self.call('setItem', (0, 1, 2, 3), slice(1, 3), (10, 20, 100)),
(0, 10, 20, 100, 3),
)
def testSetItemSliceStep(self):
self.assertEqual(
self.call('setItem', (0, 1, 2, 3, 4), slice(1, 5, 2), (10, 30)),
(0, 10, 2, 30, 4),
)
def testDelItem(self):
self.assertEqual(self.call('delItem', (0, 1, 2), 1), (0, 2))
def testDelItemSlice(self):
self.assertEqual(self.call('delItem', (0, 1, 2, 3), slice(1, 3)), (0, 3))
def testDelItemSliceStep(self):
self.assertEqual(
self.call('delItem', (0, 1, 2, 3, 4), slice(1, 5, 2)), (0, 2, 4),
)
def testAppend(self):
self.assertEqual(self.call('append', (0, 1), 55), (0, 1, 55))
def testInsert(self):
self.assertEqual(self.call('insert', (0, 1), 1, 55), (0, 55, 1))
def testPop(self):
self.assertEqual(self.call('pop', (0, 1, 2), 1), ((0, 2), 1))
def testRemove(self):
self.assertEqual(self.call('remove', (0, 100, 2, 100), 100), (0, 2, 100))
def makeTestCase(name, call):
return type(name, (unittest.TestCase, TupleTest), {'call': call})
@staticmethod
def callMethod(name, tup, *args):
return getattr(newtuple.Tuple(tup), name)(*args)
@staticmethod
def callFunction(name, tup, *args):
return getattr(newtuple, name)(tup, *args)
ClassTest = makeTestCase('Classtest', callMethod)
GenericTest = makeTestCase('GenericTest', callFunction)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | tests.py | luther9/newtuple-py |
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string
class TudoGostoso(AbstractScraper):
@classmethod
def host(cls):
return 'tudogostoso.com.br'
def title(self):
return normalize_string(self.soup.find('h1').get_text())
def total_time(self):
return get_minutes(self.soup.find('time', {'class': 'dt-duration'}))
def ingredients(self):
ingredients_html = self.soup.findAll('span', {'class': "p-ingredient"})
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients_html
]
def instructions(self):
instructions_html = self.soup.findAll('div', {'class': "instructions e-instructions"})
return '\n'.join(
normalize_string(instruction.get_text())
for instruction in instructions_html
)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | recipe_scrapers/tudogostoso.py | jerjou/recipe-scrapers |
"""Test timestamp."""
# --- import -------------------------------------------------------------------------------------
import WrightTools as wt
# --- test ---------------------------------------------------------------------------------------
def test_now():
wt.kit.TimeStamp() # exception will be raised upon failure
def test_utc():
wt.kit.timestamp_from_RFC3339("2017-11-13 16:09:17Z") # exception will be raised upon failure
def test_date():
ts = wt.kit.timestamp_from_RFC3339("2017-11-13 16:09:17-6")
assert len(ts.date) == 10
def test_hms():
ts = wt.kit.timestamp_from_RFC3339("2017-11-13 16:33:44-6")
assert len(ts.hms) == 8
def test_human():
ts = wt.kit.TimeStamp()
assert len(ts.human) == 19
def test_RFC3339():
ts = wt.kit.TimeStamp()
assert ts.RFC3339
assert wt.kit.timestamp_from_RFC3339(ts.RFC3339) == ts
def test_RFC5322():
ts = wt.kit.TimeStamp()
assert ts.RFC5322
def test_path():
ts = wt.kit.TimeStamp()
assert ts.path
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | tests/kit/timestamp.py | untzag/WrightTools |
import pytest
import urllib.parse
from helper import chromedriver
from browsermobproxy import Server, Client
from selenium import webdriver
@pytest.fixture
def proxy_server(request):
server = Server("browsermob-proxy/bin/browsermob-proxy")
server.start()
client = Client("localhost:8080")
server.create_proxy()
request.addfinalizer(server.stop)
client.new_har()
return client
@pytest.fixture
def browser(request, proxy_server):
options = webdriver.ChromeOptions()
# Избавляемся от ошибок сертификатов
# https://stackoverflow.com/questions/24507078/how-to-deal-with-certificates-using-selenium
options.add_argument('--ignore-certificate-errors')
# Устанавливаем прокси сервер
proxy_url = urllib.parse.urlparse(proxy_server.proxy).path
options.add_argument('--proxy-server=%s' % proxy_url)
driver = webdriver.Chrome(options=options, executable_path=chromedriver())
driver.proxy = proxy_server
driver.implicitly_wait(5)
request.addfinalizer(driver.quit)
return driver
def test_proxy(browser):
browser.get('https://yandex.ru/')
browser.get('https://demo.opencart.com/')
browser.get('https://demo.opencart.com/admin')
browser.find_element_by_id("input-username").send_keys("admin")
browser.find_element_by_id("input-password").send_keys("admin")
browser.find_element_by_tag_name("form").submit()
har = browser.proxy.har['log']
for el in har:
print(el)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | 4_proxy/test_proxy_example.py | pavlovprojects/python_qa_logging |
from __future__ import absolute_import
from typing import Callable, TypeVar
from returns.future import FutureResult
from returns.interfaces.specific.future_result import FutureResultLikeN
from returns.primitives.hkt import Kinded, KindN, kinded
_FirstType = TypeVar(u'_FirstType')
_SecondType = TypeVar(u'_SecondType')
_ThirdType = TypeVar(u'_ThirdType')
_UpdatedType = TypeVar(u'_UpdatedType')
_FutureResultKind = TypeVar(u'_FutureResultKind', bound=FutureResultLikeN)
def bind_future_result(
function,
):
u"""
Compose a container and async function returning ``FutureResult``.
In other words, it modifies the function
signature from:
``a -> FutureResult[b, c]``
to:
``Container[a, c] -> Container[b, c]``
This is how it should be used:
.. code:: python
>>> import anyio
>>> from returns.pointfree import bind_future_result
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> def example(argument: int) -> FutureResult[int, str]:
... return FutureResult.from_value(argument + 1)
>>> assert anyio.run(
... bind_future_result(example)(
... FutureResult.from_value(1),
... ).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... bind_future_result(example)(
... FutureResult.from_failure('a'),
... ).awaitable,
... ) == IOFailure('a')
.. currentmodule: returns.primitives.interfaces.specific.future_result
Note, that this function works
for all containers with ``.bind_async_future`` method.
See :class:`~FutureResultLikeN` for more info.
"""
@kinded
def factory(
container,
):
return container.bind_future_result(function)
return factory
| [
{
"point_num": 1,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer... | 3 | returns/pointfree/bind_future_result.py | internetimagery/returns |
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the 'grit newgrd' tool.'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.tool import newgrd
class DummyOpts(object):
"""Options needed by NewGrd."""
class NewgrdUnittest(unittest.TestCase):
def testNewFile(self):
"""Create a new file."""
tool = newgrd.NewGrd()
with util.TempDir({}) as output_dir:
output_file = os.path.join(output_dir.GetPath(), 'new.grd')
self.assertIsNone(tool.Run(DummyOpts(), [output_file]))
self.assertTrue(os.path.exists(output_file))
def testMissingFile(self):
"""Verify failure w/out file output."""
tool = newgrd.NewGrd()
ret = tool.Run(DummyOpts(), [])
self.assertIsNotNone(ret)
self.assertGreater(ret, 0)
def testTooManyArgs(self):
"""Verify failure w/too many outputs."""
tool = newgrd.NewGrd()
ret = tool.Run(DummyOpts(), ['a', 'b'])
self.assertIsNotNone(ret)
self.assertGreater(ret, 0)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": fals... | 3 | src/tools/grit/grit/tool/newgrd_unittest.py | Mr-Sheep/naiveproxy |
from bson import ObjectId
from mini_gplus.models import Post
from mini_gplus.utils.profiling import timer
from .cache import r
RPost = "post"
def set_in_post_cache(post: Post):
r.hset(RPost, str(post.id), post.to_json())
@timer
def get_in_post_cache(oid: ObjectId):
r_post = r.hget(RPost, str(oid))
if not r_post:
post = Post.objects.get(id=oid)
set_in_post_cache(post)
return post
return Post.from_json(r_post.decode('utf-8'))
def exists_in_post_cache(oid: ObjectId):
return r.hexists(RPost, str(oid))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstr... | 3 | mini_gplus/daos/post_cache.py | KTachibanaM/pill-city |
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from flowtron_plotting_utils import plot_alignment_to_numpy
from flowtron_plotting_utils import plot_gate_outputs_to_numpy
class FlowtronLogger(SummaryWriter):
def __init__(self, logdir):
super(FlowtronLogger, self).__init__(logdir)
def log_training(self, loss, learning_rate, iteration):
self.add_scalar("training/loss", loss, iteration)
self.add_scalar("learning_rate", learning_rate, iteration)
def log_validation(self, loss, loss_nll, loss_gate, attns, gate_pred,
gate_out, iteration):
self.add_scalar("validation/loss", loss, iteration)
self.add_scalar("validation/loss_nll", loss_nll, iteration)
self.add_scalar("validation/loss_gate", loss_gate, iteration)
# batch里随机抽一条看看效果
idx = random.randint(0, len(gate_out) - 1)
for i in range(len(attns)):
self.add_image(
'attention_weights_{}'.format(i),
plot_alignment_to_numpy(attns[i][idx].data.cpu().numpy().T),
iteration,
dataformats='HWC')
if gate_pred is not None:
gate_pred = gate_pred.transpose(0, 1)[:, :, 0]
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_out[idx].data.cpu().numpy(),
torch.sigmoid(gate_pred[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | flowtron_logger.py | hit-thusz-RookieCJ/flowtron |
class Busca:
def __init__(self, sequencia, elemento):
self.sequencia = sequencia
self.elemento = elemento
def busca_indice_elemento(self):
"""
Retorna o indíce do elemento caso o mesmo se encontre na lista,
caso o elemento não seja encontrado, o retorno será -1
"""
for k, v in enumerate(self.sequencia):
if self.sequencia[k] == self.elemento:
return k
return -1
def busca_sequencial(self):
"""
Retorna True caso o elemento esteja na sequência, caso contrário,
retorna False
"""
for item in range(len(self.sequencia)):
if self.sequencia[item] == self.elemento:
return True
return False
def binary_search(self):
primeiro = 0
ultimo = len(self.sequencia) - 1
while primeiro <= ultimo:
meio = (primeiro + ultimo) // 2
if self.sequencia[meio] == self.elemento:
return meio
elif self.elemento < self.sequencia[meio]:
ultimo = meio - 1
else:
primeiro = meio + 1
return -1
def binary_search_recursive(lista, elemento, min=0, max=None):
if max == None:
max = len(lista) - 1
if max < min:
return False
else:
meio = min + (max - min) // 2
if lista[meio] > elemento:
return binary_search_recursive(lista, elemento, min, meio - 1)
elif lista[meio] < elemento:
return binary_search_recursive(lista, elemento, meio + 1, max)
else:
return meio
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | busca.py | carlos-moreno/algorithms |
"""
This class models the redirect page i.e the view buses page of the redBus Main Page
URL: selenium-tutorial-redirect
The page consists of a header, footer and some text
"""
from .Base_Page import Base_Page
from .redBus_header_object import redBus_Header_Object
from .redBus_footer_object import redBus_Footer_Object
import conf.locators_conf as locators
#import conf.ticket_booking_conf as
from utils.Wrapit import Wrapit
class redBus_View_Buses_Page(Base_Page,redBus_Header_Object,redBus_Footer_Object):
"Page Object for the redirect page of redBus main page i.e view buses page"
#locators
#govt_buses_option = locators.govt_buses_option
source_locator = locators.source_location_in_view_buses_page
destination_locator = locators.destination_location_in_view_buses_page
def start(self):
"Use this method to go to specific URL -- if needed"
url = 'https://www.redbus.in/search?fromCityName=Bangalore%20(All%20Locations)&toCityName=Coimbatore%20(All%20Locations)&fromCityId=122&toCityId=141&busType=Any&opId=0&onward=06-Feb-2020'
self.open(url)
@Wrapit._exceptionHandler
def check_source_and_destination(self,source,destination):
"Check if the source and destination matches with the from and to selections in redBus Main Page"
result_flag = self.check_element_present(self.source_locator)
result_flag &= self.check_element_present(self.destination_locator)
if (result_flag is True) :
if((self.get_text(self.source_locator).casefold() == source.casefold()) and (self.get_text(self.destination_locator).casefold() == destination.casefold())) :
result_flag = True
self.conditional_write(result_flag,
positive='Correct source and destination locations found on the redirect page!',
negative='Incorrect source and destinations found on the redirect page!!',
level='debug')
return result_flag
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | page_objects/redBus_view_buses_page.py | PreedhiVivek/qxf2-page-object-model |
import numpy as np
class LDA:
def __init__(self, n_components):
self.n_components = n_components
self.linear_discriminants = None
def fit(self, X, y):
n_features = X.shape[1]
class_labels = np.unique(y)
# Within class scatter matrix:
# SW = sum((X_c - mean_X_c)^2 )
# Between class scatter:
# SB = sum( n_c * (mean_X_c - mean_overall)^2 )
mean_overall = np.mean(X, axis=0)
SW = np.zeros((n_features, n_features))
SB = np.zeros((n_features, n_features))
for c in class_labels:
X_c = X[y == c]
mean_c = np.mean(X_c, axis=0)
# (4, n_c) * (n_c, 4) = (4,4) -> transpose
SW += (X_c - mean_c).T.dot((X_c - mean_c))
# (4, 1) * (1, 4) = (4,4) -> reshape
n_c = X_c.shape[0]
mean_diff = (mean_c - mean_overall).reshape(n_features, 1)
SB += n_c * (mean_diff).dot(mean_diff.T)
# Determine SW^-1 * SB
A = np.linalg.inv(SW).dot(SB)
# Get eigenvalues and eigenvectors of SW^-1 * SB
eigenvalues, eigenvectors = np.linalg.eig(A)
# -> eigenvector v = [:,i] column vector, transpose for easier calculations
# sort eigenvalues high to low
eigenvectors = eigenvectors.T
idxs = np.argsort(abs(eigenvalues))[::-1]
eigenvalues = eigenvalues[idxs]
eigenvectors = eigenvectors[idxs]
# store first n eigenvectors
self.linear_discriminants = eigenvectors[0:self.n_components]
def transform(self, X):
# project data
return np.dot(X, self.linear_discriminants.T)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | mlfromscratch/lda.py | Guangrui-best/ML_from_scratch |
"""
Convert characters (chr) to integer (int) labels and vice versa.
REVIEW: index 0 bug, also see:
https://github.com/baidu-research/warp-ctc/tree/master/tensorflow_binding
`ctc_loss`_ maps labels from 0=<unused>, 1=<space>, 2=a, ..., 27=z, 28=<blank>
See: https://www.tensorflow.org/api_docs/python/tf/nn/ctc_loss
"""
__MAP = r' abcdefghijklmnopqrstuvwxyz' # 27 characters including <space>.
__CTOI = dict()
__ITOC = dict([(0, '')]) # This is in case the net decodes a 0 on step 0.
if not __CTOI or not __ITOC:
for i, c in enumerate(__MAP):
__CTOI.update({c: i + 1})
__ITOC.update({i + 1: c})
def ctoi(char):
"""
Convert character label to integer.
Args:
char (char): Character label.
Returns:
int: Integer representation.
"""
if char not in __MAP:
raise ValueError('Invalid input character \'{}\'.'.format(char))
if not len(char) == 1:
raise ValueError('"{}" is not a valid character.'.format(char))
return __CTOI[char.lower()]
def itoc(integer):
"""
Convert integer label to character.
Args:
integer (int): Integer label.
Returns:
char: Character representation.
"""
if not 0 <= integer < num_classes():
raise ValueError('Integer label ({}) out of range.'.format(integer))
return __ITOC[integer]
def num_classes():
"""
Return number of different classes, +1 for the <blank> label.
Returns:
int: Number of labels +1.
"""
return len(__MAP) + 2
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | asr/labels.py | aflyingwolf/ctc-asr |
from pythonforandroid.recipe import CompiledComponentsPythonRecipe, Recipe
from multiprocessing import cpu_count
class ThisRecipe(CompiledComponentsPythonRecipe):
site_packages_name = 'scikit-learn'
version = '0.23.2'
url = f'https://github.com/{site_packages_name}/{site_packages_name}/archive/{version}.zip'
depends = ['setuptools', 'scipy', 'joblib', 'threadpoolctl']
call_hostpython_via_targetpython = False
need_stl_shared = True
patches = ['cross-compile.patch']
def build_compiled_components(self, arch):
self.setup_extra_args = ['-j', str(cpu_count())]
super().build_compiled_components(arch)
self.setup_extra_args = []
def rebuild_compiled_components(self, arch, env):
self.setup_extra_args = ['-j', str(cpu_count())]
super().rebuild_compiled_components(arch, env)
self.setup_extra_args = []
def strip_ccache(self, env):
for key, value in env.items():
parts = value.split(' ')
if 'ccache' in parts[0]:
env[key] = ' '.join(parts[1:])
def get_recipe_env(self, arch):
env = super().get_recipe_env(arch)
self.strip_ccache(env)
scipy_build_dir = Recipe.get_recipe('scipy', self.ctx).get_build_dir(arch.arch)
env['PYTHONPATH'] += f':{scipy_build_dir}'
env['CXX'] += f' -Wl,-l{self.stl_lib_name}'
return env
recipe = ThisRecipe()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | pythonforandroid/recipes/scikit-learn/__init__.py | adin234/python-for-android |
import os
import unittest
from time import sleep
from cache_gs import CacheGS
from cache_gs.utils.filesystem import remove_tree
class TestRealSQLiteCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_file = '.cache'
if not os.path.isdir(cls.cache_file):
os.mkdir(cls.cache_file)
cls.cache = CacheGS('sqlite://' + cls.cache_file)
cls.cache._cache.conn.set_trace_callback(print)
cls.cache.set_value('sec', 'purged', '1234', 0.001)
sleep(1)
return super().setUpClass()
@classmethod
def tearDownClass(cls):
del (cls.cache)
remove_tree(cls.cache_file)
def test_init(self):
self.assertIsInstance(self.cache, CacheGS)
def test_get_set_delete(self):
self.assertTrue(self.cache.set_value(
'sec', 'key', '1234', ttl=100000))
value = self.cache.get_value('sec', 'key')
self.assertEqual(value, '1234')
self.assertTrue(self.cache.delete_value('sec', 'key'))
def test_z_purge(self):
self.assertGreater(self.cache.purge_expired(), 0)
self.assertEqual(self.cache.purge_expired(), 0)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | tests/real_tests/test_real_sqlite_cache.py | guionardo/py-cache-guiosoft |
import uuid
from sqlalchemy import BigInteger, Column, Text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class TestRdsTable(Base): # pylint: disable=too-few-public-methods
"""Class representing a model of database for ORM"""
__tablename__ = str(uuid.uuid4())
id = Column(BigInteger, primary_key=True, autoincrement=True)
content = Column(Text)
def __init__(self, content):
self.content = content
def __repr__(self):
return "<TestRds('%s','%s')>" % (self.id, self.content)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | csm_test_utils/rds_backup/generation/sqla/db_model.py | opentelekomcloud-infra/csm-test-utils |
from django.db import models
from aziende.models import *
# Create your models here.
class Anagrafica(models.Model):
nome = models.CharField(null=False, max_length=128)
cognome = models.CharField(null=False, max_length=128)
codice_fiscale = models.CharField(null=False, max_length=16)
azienda = models.ManyToManyField(Azienda, through='AnagraficaAzienda')
def __unicode__(self):
return "{} {}".format(self.cognome, self.nome)
class Meta:
verbose_name_plural = 'Anagrafica'
class AnagraficaAzienda(models.Model):
anagrafica = models.ForeignKey(Anagrafica)
azienda = models.ForeignKey(Azienda)
dal = models.DateField()
al = models.DateField()
orario = models.CharField(null=False, max_length=128)
class Meta:
verbose_name_plural = 'Rapporti di lavoro' | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | personale/models.py | luca772005/studio |
from github.interfaces import Type
class LicenseRule(Type):
"""
Represents a license rule.
"""
__slots__ = ()
_repr_fields = [
"key",
]
_graphql_fields = [
"description",
"key",
"label",
]
@property
def description(self):
"""
A description of the license rule.
:type: :class:`str`
"""
return self._get_field("description")
@property
def key(self):
"""
The machine-readable key of the license rule.
:type: :class:`str`
"""
return self._get_field("key")
@property
def label(self):
"""
The human-readable label of the license rule.
:type: :class:`str`
"""
return self._get_field("label")
__all__ = [
"LicenseRule",
]
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | github/content/licenserule.py | ShineyDev/github |
import cv2
import numpy as np
from sklearn.cluster import DBSCAN as skDBSCAN
def DBSCAN(src, eps, min_samples):
arr = cv2.cvtColor(src, cv2.COLOR_BGR2LAB).reshape(-1, src.shape[2])
clustering = skDBSCAN(eps=eps, min_samples=min_samples).fit(arr)
labels = clustering.labels_ + 1
maps = labels.reshape(src.shape[:2])
return maps, labels
def drawDBSCAN(src, maps, labels):
colors = []
for lb in set(labels):
mask = np.where(maps == lb, 255, 0).astype(np.uint8)
color = list(map(int, list(cv2.mean(src, mask)[:src.shape[2]])))
colors.append(np.array(color, dtype=np.uint8))
colors = np.asarray(colors)
dst = colors[labels].astype(np.uint8).reshape(src.shape)
return dst
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | cv2u/core/cluster.py | 076923/cv2-utils |
import abc
from collections import OrderedDict
class ControlArchitecture(abc.ABC):
def __init__(self, robot):
self._robot = robot
self._state = 0
self._prev_state = 0
self._state_machine = OrderedDict()
self._trajectory_managers = OrderedDict()
self._hierarchy_manangers = OrderedDict()
self._reaction_force_managers = OrderedDict()
@property
def state(self):
return self._state
@property
def prev_state(self):
return self._prev_state
@abc.abstractmethod
def get_command(self):
pass
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | pnc/control_architecture.py | junhyeokahn/ASE389 |
"""Converts Jupyter Notebooks to Jekyll compliant blog posts"""
from datetime import datetime
import re, os, logging
from nbdev import export2html
from nbdev.export2html import Config, Path, _re_digits, _to_html, _re_block_notes
from fast_template import rename_for_jekyll
warnings = set()
# Modify the naming process such that destination files get named properly for Jekyll _posts
def _nb2htmlfname(nb_path, dest=None):
fname = rename_for_jekyll(nb_path, warnings=warnings)
if dest is None: dest = Config().doc_path
return Path(dest)/fname
# Add embedded links for youtube and twitter
def add_embedded_links(cell):
"Convert block quotes to embedded links in `cell`"
_styles = ['youtube', 'twitter']
def _inner(m):
title,text = m.groups()
if title.lower() not in _styles: return f"> {m.groups()[0]}: {m.groups()[1]}"
return '{% include '+title.lower()+".html content=\'`"+_to_html(text)+"`\' %}"
if cell['cell_type'] == 'markdown':
cell['source'] = _re_block_notes.sub(_inner, cell['source'])
return cell
# TODO: Open a GitHub Issue in addition to printing warnings
for original, new in warnings:
print(f'{original} has been renamed to {new} to be complaint with Jekyll naming conventions.\n')
## apply monkey patches
export2html._nb2htmlfname = _nb2htmlfname
export2html.process_cell.append(add_embedded_links)
export2html.notebook2html(fname='_notebooks/*.ipynb', dest='_posts/')
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | _action_files/nb2post.py | inc0/fastpages |
from unittest import TestCase
from rockaway.models import Track
class TestTrackBasics(TestCase):
def test_track_create_no_args(self):
track = Track()
self.assertFalse(track.hasDbEntry())
self.assertFalse(track.hasFile())
def test_track_create(self):
args = {"Title": "Rockaway Beach",
"Artist": "The Ramones", # FIXME--This and album will not just be strings
"Album": "Rocket to Russia",
"Year": 1977,
"Genre": "Punk Rock",
"Time": 126000}
track = Track(**args)
self.assertEqual(track.Title, args["Title"])
self.assertEqual(track.Year, 1977)
# Alternate ways of looking up attributes
self.assertEqual(track.genre, track.Genre)
self.assertEqual(track.Time, track["Time"])
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | test/test_tracks.py | dpitch40/rockawayplayer |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides values which would be available from /proc which
are not fulfilled by other modules.
"""
from __future__ import print_function
from __future__ import unicode_literals
import functools
import sys
from types import ModuleType
import gdb
import pwndbg.memoize
import pwndbg.qemu
class module(ModuleType):
@property
def pid(self):
# QEMU usermode emualtion always returns 42000 for some reason.
# In any case, we can't use the info.
if pwndbg.qemu.is_qemu_usermode():
return pwndbg.qemu.pid()
i = gdb.selected_inferior()
if i is not None:
return i.pid
return 0
@property
def tid(self):
if pwndbg.qemu.is_qemu_usermode():
return pwndbg.qemu.pid()
i = gdb.selected_thread()
if i is not None:
return i.ptid[1]
return self.pid
@property
def alive(self):
return gdb.selected_thread() is not None
@property
def exe(self):
for obj in gdb.objfiles():
if obj.filename:
return obj.filename
break
if self.alive:
auxv = pwndbg.auxv.get()
return auxv['AT_EXECFN']
def OnlyWhenRunning(self, func):
@functools.wraps(func)
def wrapper(*a, **kw):
if self.alive:
return func(*a, **kw)
return wrapper
# To prevent garbage collection
tether = sys.modules[__name__]
sys.modules[__name__] = module(__name__, '')
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | pwndbg/proc.py | jakuta-tech/pwndbg |
## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from PyFlow.Core import NodeBase
from PyFlow.Core.Common import *
from PyFlow.Core.GraphManager import GraphManagerSingleton
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Packages.PyFlowBase.Nodes import FLOW_CONTROL_COLOR
class cliexit(NodeBase):
def __init__(self, name):
super(cliexit, self).__init__(name)
self.inp0 = self.createInputPin(DEFAULT_IN_EXEC_NAME, 'ExecPin', None, self.compute)
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('ExecPin')
helper.addInputStruct(PinStructure.Single)
return helper
@staticmethod
def category():
return 'CLI'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return 'Stops cli program loop'
def compute(self, *args, **kwargs):
man = GraphManagerSingleton().get()
man.terminationRequested = True
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | PyFlow/Packages/PyFlowBase/Nodes/cliexit.py | liaokongVFX/PyFlow |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our ApiView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}}
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | profiles_api/serializers.py | MariyaShalju/profiles-rest-api |
import pytest
from tyfbaf import constants
@pytest.fixture
def ugly_hack():
"""Ugly hack to disable autouse fixture in conftest.py..."""
constants.SERVER_NAME = ""
def test_server_name_default(ugly_hack):
assert constants.SERVER_NAME == ""
def test_port_default():
assert constants.PORT == 6405
def test_base_headers_default():
assert constants.BASE_HEADERS == {
"Content-Type": "application/json",
"Accept": "application/json",
}
def test_current_token_default():
assert constants.CURRENT_TOKEN == ""
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/test_constants.py | yveso/tyfbaf |
import logging
import discord
from discord.ext import commands
# from util.logger import Logger
log = logging.getLogger("discordbot")
def delete_original():
"""
Decorator that deletes the original
Discord message upon command execution.
:return: boolean
"""
async def predicate(ctx):
if ctx.invoked_with != "help": # Don't try to delete if help command
if isinstance(ctx.message.channel, discord.TextChannel):
try:
await ctx.message.delete()
except discord.errors.NotFound as e:
log.fatal(f"Unable to delete message.\n\t{e}")
return True
return commands.check(predicate)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"... | 3 | util/decorators.py | MiningMark48/Discord-Bot-Template |
import sys
import re
from mrjob.job import MRJob
class MR32B(MRJob):
SORT_VALUES = True
def mapper(self, _, line):
issue = line.strip().split(',')[3]
words = re.findall(r'[a-z]+', issue.lower())
for word in words:
sys.stderr.write("reporter:counter:Issue,word,1\n")
sys.stderr.write("reporter:counter:Mapper,Calls,1\n")
yield word, 1
def reducer(self, key, counts):
sys.stderr.write("reporter:counter:Reducer,Calls,1\n")
yield key, sum(counts)
if __name__ == '__main__':
MR32B.run() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | HW3/mrjob3_2B.py | maynard242/Machine-Learning-At-Scale |
# lógica para o usuário fazer apostas
import sys
class UserAccount:
def __init__(self, name, balance=0.0):
self.name = name
self.balance = balance
def get_balance(self):
return self.balance
def insert_credits(self, amount):
self.balance += amount
def check_credits(self):
return self.balance
def withdraw_credits(self, amount):
if amount > self.balance:
print("Error. Insuficiente credits: {}".format(self.balance))
sys.exit(0)
else:
self.balance -= amount
print("Withdraw: {}. Current balance: {}".format(amount, self.balance))
class MakeBet:
def __init__(self, user):
self.balance = user.get_balance()
def make_bet(self, bet, team):
if bet > self.balance:
print("Insuficient founds. You have {}".format(self.balance))
sys.exit(0)
else:
print("Bet of {} made with sucess on team {}".format(bet, team)) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | Betting.py | viniciusbenite/FootballBets |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cliff.formatters import value
from cliff.tests import test_columns
def test_value_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', '"no escape me"')
expected = 'A\nB\nC\n"no escape me"\n'
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', test_columns.FauxColumn(['the', 'value']))
expected = "A\nB\nC\n['the', 'value']\n"
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = 'A B C\nD E F\n'
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', test_columns.FauxColumn(['the', 'value']))
data = [d1]
expected = "A B ['the', 'value']\n"
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | cliff/tests/test_formatters_value.py | serrollc/cliff |
from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import PandasColumn, create_dagster_pandas_dataframe_type
from dagster_pandas.constraints import (
ColumnConstraint,
ColumnConstraintViolationException,
ColumnDTypeInSetConstraint,
)
from pandas import DataFrame, read_csv
# start_custom_col
class DivisibleByFiveConstraint(ColumnConstraint):
def __init__(self):
message = "Value must be divisible by 5"
super(DivisibleByFiveConstraint, self).__init__(
error_description=message, markdown_description=message
)
def validate(self, dataframe, column_name):
rows_with_unexpected_buckets = dataframe[dataframe[column_name].apply(lambda x: x % 5 != 0)]
if not rows_with_unexpected_buckets.empty:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=self.error_description,
column_name=column_name,
offending_rows=rows_with_unexpected_buckets,
)
CustomTripDataFrame = create_dagster_pandas_dataframe_type(
name="CustomTripDataFrame",
columns=[
PandasColumn(
"amount_paid",
constraints=[ColumnDTypeInSetConstraint({"int64"}), DivisibleByFiveConstraint()],
)
],
)
# end_custom_col
@op(out=Out(CustomTripDataFrame))
def load_custom_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
dtype={"color": "category"},
)
@job
def custom_column_constraint_trip():
load_custom_trip_dataframe()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | examples/docs_snippets/docs_snippets/legacy/dagster_pandas_guide/custom_column_constraint.py | rpatil524/dagster |
from settings import settings
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.sharepoint.caml_query import CamlQuery
from office365.sharepoint.client_context import ClientContext
list_title = "Survey"
view_title = "All Responses"
def print_list_views(ctx):
"""Read list view by title example"""
list_object = ctx.web.lists.get_by_title(list_title)
views = list_object.views
ctx.load(views)
ctx.execute_query()
for view in views:
# print "View title: {0}".format(view.Properties["Title"])
cur_view_title = view.properties["Title"]
cur_view = views.get_by_title(cur_view_title)
ctx.load(cur_view)
ctx.execute_query()
print("View title: {0}".format(cur_view.properties["Title"]))
def print_view_items(ctx):
"""Example demonstrates how to retrieve View items"""
list_object = ctx.web.lists.get_by_title(list_title)
# 1.get View query
view = list_object.views.get_by_title(view_title)
ctx.load(view, ["ViewQuery"])
ctx.execute_query()
# 2.get View fields
view_fields = view.view_fields
ctx.load(view_fields)
ctx.execute_query()
# 3.get items for View query
qry = CamlQuery()
qry.ViewXml = "<View><Where>{0}</Where></View>".format(view.properties["ViewQuery"])
items = list_object.get_items(qry)
ctx.load(items)
ctx.execute_query()
for item in items:
print("Item title: {0}".format(item.properties["Title"]))
if __name__ == '__main__':
ctx_auth = AuthenticationContext(url=settings['url'])
if ctx_auth.acquire_token_for_user(username=settings['user_credentials']['username'],
password=settings['user_credentials']['password']):
ctx = ClientContext(settings['url'], ctx_auth)
# print_list_views(ctx)
print_view_items(ctx)
else:
print(ctx_auth.get_last_error())
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer... | 3 | examples/sharepoint/view_operations.py | andebor/Office365-REST-Python-Client |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Debug signal handler: prints a stack trace and enters interpreter.
``register_interrupt_handler()`` enables a ctrl-C handler that prints
a stack trace and drops the user into an interpreter.
"""
import sys
import time
import spack.util.spack_json as sjson
class Timer(object):
"""
Simple timer for timing phases of a solve or install
"""
def __init__(self):
self.start = time.time()
self.last = self.start
self.phases = {}
self.end = None
def phase(self, name):
last = self.last
now = time.time()
self.phases[name] = now - last
self.last = now
@property
def total(self):
"""Return the total time
"""
if self.end:
return self.end - self.start
return time.time() - self.start
def stop(self):
"""
Stop the timer to record a total time, if desired.
"""
self.end = time.time()
def write_json(self, out=sys.stdout):
"""
Write a json object with times to file
"""
phases = [{"name": p, "seconds": s} for p, s in self.phases.items()]
times = {"phases": phases, "total": {"seconds": self.total}}
out.write(sjson.dump(times))
def write_tty(self, out=sys.stdout):
now = time.time()
out.write("Time:\n")
for phase, t in self.phases.items():
out.write(" %-15s%.4f\n" % (phase + ":", t))
out.write("Total: %.4f\n" % (now - self.start))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | lib/spack/spack/util/timer.py | player1537-forks/spack |
"""
Test cast_class in tagulous.models.tagged
"""
import inspect
import pickle
from pytest import fixture
from tagulous.models.cast import cast_instance, get_cast_class
from tests.tagulous_tests_app.cast import NewBase, OldBase, Target
EXPECTED_CAST_NAME = "TagulousCastTaggedTarget"
@fixture
def Cast():
# Create a class for use during tests and remove it from the module afterwards
cls = get_cast_class(Target, NewBase)
yield cls
delattr(inspect.getmodule(Target), cls.__name__)
@fixture
def instance():
v = 1
obj = Target(v)
cast_instance(obj, NewBase)
yield obj
delattr(inspect.getmodule(Target), obj.__class__.__name__)
def test_get_cast_class__name(Cast):
assert Cast.__name__ == EXPECTED_CAST_NAME
def test_get_cast_class__mro(Cast):
assert Cast.__bases__ == (NewBase, Target)
assert Cast.__bases__[0].__bases__ == (object,)
assert Cast.__bases__[1].__bases__ == (OldBase,)
def test_get_cast_class__existing_found(Cast):
first_id = id(Cast)
ReCast = get_cast_class(Target, NewBase)
second_id = id(ReCast)
assert first_id == second_id
def test_get_cast_class__module(Cast):
orig_module = inspect.getmodule(Target)
cast_module = inspect.getmodule(Cast)
assert orig_module == cast_module
def test_get_cast_class__pickle(Cast):
v = 1
obj = Cast(v)
pickled = pickle.dumps(obj)
unpickled = pickle.loads(pickled)
assert unpickled.v == v
def test_cast_instance__class_name(instance):
assert instance.__class__.__name__ == EXPECTED_CAST_NAME
def test_cast_instance__mro(instance):
assert instance.__class__.__bases__ == (NewBase, Target)
assert instance.__class__.__bases__[0].__bases__ == (object,)
assert instance.__class__.__bases__[1].__bases__ == (OldBase,)
def test_cast_instance__value_unchanged(instance):
assert instance.v == 1
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | tests/test_models_cast.py | jdolter/django-tagulous |
from configparser import ConfigParser, NoOptionError
from io import StringIO
from typing import List
from ochrona.const import TOX_LINKED_REQUIREMENTS, INVALID_TOX_LINES, TOX_INI
from ochrona.parser.requirements import RequirementsFile
class ToxFile:
@staticmethod
def parse(file_path: str) -> List[str]:
"""
Parses a tox.ini into a list of requirements.
:param file_path: tox.ini path
:return: list<str> list of dependencies ['dependency==semvar']
"""
dependencies = []
with open(file_path) as tox:
parser = ConfigParser()
parser.read_file(tox)
for section in parser.sections():
try:
deps = parser.get(section=section, option="deps")
for _, line in enumerate(deps.splitlines()):
if line.startswith(TOX_LINKED_REQUIREMENTS):
path = ToxFile._tox_path(file_path)
req_file_name = line.replace(TOX_LINKED_REQUIREMENTS, "")
return RequirementsFile.parse(f"{path}{req_file_name}")
elif not any([line.startswith(i) for i in INVALID_TOX_LINES]):
if ":" in line:
# requirement is specified with an environment
dependencies.append(line.split(":")[-1].strip())
else:
if line != "":
# plain requirement
dependencies.append(line)
else:
pass # did not find valid line to parse
except NoOptionError:
pass
return dependencies
@staticmethod
def _tox_path(tox_file_path):
return tox_file_path.replace(TOX_INI, "")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true... | 3 | ochrona/parser/tox.py | ttw225/ochrona-cli |
import torch
import torch.nn as nn
from .decoder import Decoder
from .encoder import Encoder
from .phrase_encoder import PhraseModel
from .refiner import Refiner
from graph.weights_initializer import weights_init
class Model(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder([64, 128, 256, 512, 1024])
self.decoder = Decoder([1024, 512, 256, 128, 64])
self.phrase_encoder = PhraseModel([64, 128, 256, 512, 1024])
self.refiner = Refiner()
self.apply(weights_init)
def forward(self, note, pre_note, phrase, position, is_train=True):
if is_train:
phrase_feature = self.phrase_encoder(phrase)
z = self.encoder(note)
pre_z = self.encoder(pre_note)
gen_note = self.decoder(z, pre_z, phrase_feature, position)
refined_gen_note = self.refiner(gen_note)
return refined_gen_note, z, pre_z, phrase_feature
else:
phrase_feature = self.phrase_encoder(phrase)
pre_z = self.encoder(pre_note)
gen_note = self.decoder(note, pre_z, phrase_feature, position)
return self.refiner(gen_note)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | graph/model.py | KMU-AELAB-MusicProject/MusicGeneration_VAE-torch |
from AlbotOnline.Snake import SnakeGame
import AlbotOnline.JsonProtocol as Prot
import random as rand
game = SnakeGame.SnakeGame(Port=int(input("Port:"))) #Connects you to the Client
maxDepth = 2
def stateToScore(state):
if(state == Prot.STATES.ongoing):
return 0.5
if(state == Prot.STATES.draw):
return 0.25
if (state == Prot.STATES.playerWon):
return 1
if (state == Prot.STATES.enemyWon):
return 0
def search(board, depth):
state = game.evaluateBoard(board)
if(depth == 0 or state != Prot.STATES.ongoing):
return stateToScore(state), "None"
pMoves, eMoves = game.getPossibleMoves(board)
results = {}
for p in pMoves:
results[p] = 0
for e in eMoves:
simBoard = game.simulateMove(board, p, e)
score, ignore = search(simBoard, depth-1)
results[p] += score / len(eMoves)
maxScore = results[pMoves[0]]
move = pMoves[0]
for i in range(1,3):
if results[pMoves[i]] >= maxScore:
maxScore = results[pMoves[i]]
move = pMoves[i]
if(depth == maxDepth):
print("******************s")
temp = [[p, results[p]] for p in pMoves]
temp.sort(key=lambda r: r[1], reverse=True)
print("Sorted", temp)
temp = [p for p in temp if p[1] >= temp[0][1]]
pick = rand.choice(temp)
return pick[1], pick[0]
return maxScore, move
while(True):
while(game.awaitNextGameState() == "ongoing"):
board = game.currentBoard
score, move = search(board, maxDepth)
if(len(board.blocked) > 0):
print(board.blocked[0])
print(score, move)
board.printBoard("My current board")
game.makeMove(move)
game.restartGame() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | AlbotOnline/Tests/SnakeTest.py | Albot-Online/Albot-Python-Library |
from metadata.metadata import METRIC_PERIOD
class ModelHolder:
def __init__(self, model_name, model_config=None, model_data=None, period=METRIC_PERIOD.HISTORICAL.value, id=''):
if model_config is None:
model_config = {}
if model_data is None:
model_data = {}
self._model_name = model_name
self._model_data = model_data
self._period = period
self._model_config = model_config
self._id = id
def getModelByKey(self, key):
return self._model_data.get(key)
def setModelKV(self, key, value):
self._model_data.setdefault(key, value)
def getModelConfigByKey(self, key):
return self._model_config.get(key)
@property
def period(self):
return self._period
@property
def model_name(self):
return self._model_name
@property
def hasModel(self):
return len(self._model_data) > 0
@property
def id(self):
return self._id
def __getitem__(self, item):
return self.getModelByKey(item)
def __setitem__(self, key, value):
self.setModelKV(key, value)
def __str__(self):
sb = []
sb.append('model_name: ')
sb.append(self.model_name)
sb.append(', modeldata: ')
sb.append(str(self._model_data))
sb.append(', modelconfig: ')
sb.append(str(self._model_config))
sb.append(', period: ')
sb.append(self.period)
return ''.join(sb)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/models/modelclass.py | bowbahdoe/foremast-brain |
from my_app import db
class Console(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
year = db.Column(db.Integer)
price = db.Column(db.Float(asdecimal=True))
status = db.Column(db.String(100))
numberGames = db.Column(db.Integer)
def __init__(self,name,year,price,status,numberGames):
self.name = name
self.year = year
self.price = price
self.status = status
self.numberGames = numberGames
def __repr__(self):
return 'Console {0}'.format(self.id) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | IntegracaoFlask_Android/consoles_app/my_app/console/models.py | Luis-Gritz/ExercicioApiFlask |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.