source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
# ๊ณ์ฐ๊ธฐ ์์ . ์ค๋ฒ๋ผ์ด๋์ ํ์ฉ.
class Cal(object):
_history = []
def __init__(self, v1, v2):
if isinstance(v1, int):
self.v1 = v1
if isinstance(v2, int):
self.v2 = v2
def add(self):
result = self.v1+self.v2
Cal._history.append("add : %d+%d=%d" % (self.v1, self.v2, result))
return result
def subtract(self):
result = self.v1-self.v2
Cal._history.append("subtract : %d-%d=%d" % (self.v1, self.v2, result))
return result
def setV1(self, v):
if isinstance(v, int):
self.v1 = v
def getV1(self):
return self.v1
@classmethod
def history(cls):
for item in Cal._history:
print(item)
# ๋ถ๋ชจํด๋์ค info ๋ฉ์๋ : ์
๋ ฅ๋ ๋ณ์์ ์ ๋ณด๋ฅผ ์๋ ค์ฃผ๋ ๋ฉ์๋.
def info(self):
return "Cal => v1 : %d, v2 : %d" % (self.v1, self.v2)
class CalMultiply(Cal):
def multiply(self):
result = self.v1*self.v2
Cal._history.append("multiply : %d*%d=%d" % (self.v1, self.v2, result))
return result
# ์ค๋ฒ๋ผ์ด๋ฉ info ๋ฉ์๋1
def info(self):
return "CalMultiply => %s" % super().info() # ์ฌ๊ธฐ์ super๋ Cal ์ info๋ฉ์๋
class CalDivide(CalMultiply):
def divide(self):
result = self.v1/self.v2
Cal._history.append("divide : %d/%d=%d" % (self.v1, self.v2, result))
return result
# ์ค๋ฒ๋ผ์ด๋ฉ info ๋ฉ์๋2
def info(self):
return "CalDivide => %s" % super().info() # ์ฌ๊ธฐ์ super๋ CalMultiply ์ info๋ฉ์๋
c0 = Cal(30, 60)
print(c0.info())
c1 = CalMultiply(10,10)
print(c1.info())
c2 = CalDivide(20,10)
print(c2.info())
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?... | 3 | opentutorials_python2/opentutorials_python2/19_Override/2_Override_deepen.py | dongrami0425/Python_OpenCV-Study |
import os
from sanic import Sanic, response as res
from sanic.exceptions import NotFound
from sanic.websocket import ConnectionClosed
import json
from database import get_messages, post_message
# initiate the sanic app
app = Sanic('app')
# list of connected clients
clients = set()
# function that sends a websocket message to all connected clients
async def broadcast(message):
# must iterate a copy of the clients set
# because the loop gets inconsistent if removing
# an element while iterating
for client in [*clients]: # copy list with spread syntax
try:
await client.send(message)
except ConnectionClosed:
# remove client from list if disconnected
clients.remove(client)
@app.websocket('/ws')
async def websockets(req, ws):
# add connected client to list
clients.add(ws)
while True:
# wait to receive message from client
data = await ws.recv()
data = json.loads(data) # parse json
# save message to db
data['id'] = await post_message(data)
print(data)
data = json.dumps(data) # stringify dict
# broadcast message to all clients
await broadcast(data)
@app.get('/rest/messages')
async def messages(req):
return res.json(await get_messages())
# enable frontend to be served from root
app.static('/', './dist')
@app.exception(NotFound)
async def ignore_404s(request, exception):
return await res.file('./dist/index.html')
# start the server with the PORT from an environment variable
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.environ.get("PORT", 5000)))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | main.py | andreaskvam/python-chat |
#!/usr/bin/env python3
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""List LoRaWAN End Devices in the LNS database."""
import argparse
from empower.cli import command
def pa_cmd(args, cmd):
"""List lEndDevs parser method.
usage: empower-ctl.py list-lenddevs <options>
optional arguments:
-h, --help show this help message and exit
-g DEVEUI, --devEUI DEVEUI
show results for a specified devEUI id only
-v, --verbose verbose
"""
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
# required = parser.add_argument_group('required named arguments')
parser.add_argument(
'-g', '--devEUI', help='show results for a specified devEUI id only',
default=None, type=str, dest="devEUI")
parser.add_argument(
'-v', '--verbose', help='verbose', action="store_true",
default=False, dest="config")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_cmd(gargs, args, _):
"""List lEndDevs registered in the LNS."""
url = '/api/v1/lns/lenddevs/'
_, data = command.connect(gargs, ('GET', url), 200)
for entry in data:
if not args.devEUI:
print(entry)
elif entry['DevEUI'] == args.devEUI:
print(entry)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | empower/cli/lomm_lns_commands/list_lenddevs.py | ericbrinckhaus/empower-runtime-modified |
class Person:
somePublicProp = 32
# Costruttore; il primo parametro deve essere il self (this in C++)
def __init__(self, name, age):
self.name = name
self.age = age
p1 = Person("Davide", 23)
print(p1.name)
print(p1.age)
# Per cancellare l'oggetto p1
del p1
class Student(Person):
def __init__(self, name, age, id):
super().__init__(name, age)
self.id = id
s1 = Student("Davide", 23, 1000)
print(s1.name, s1.age, s1.id, s1.somePublicProp)
# iterator definito in una classe
class MyNumbers:
# Costruttore dell'iteratore (all'inizio del for)
def __iter__(self):
self.a = 1
return self
# Ad ogni passata del for
def __next__(self):
if self.a <= 20:
x = self.a
self.a += 1
return x
else:
# raise serve a scatenare un eccezione
raise StopIteration
myclass = MyNumbers()
myiter = iter(myclass)
for x in myiter:
print(x)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true... | 3 | oop.py | LoZioo/PythonExamples |
from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "arch", "compiler", "build_type"
generators = "cmake", "cmake_find_package_multi"
def build_requirements(self):
if self.settings.os == "Macos" and self.settings.arch == "armv8":
# Workaround for CMake bug with error message:
# Attempting to use @rpath without CMAKE_SHARED_LIBRARY_RUNTIME_C_FLAG being
# set. This could be because you are using a Mac OS X version less than 10.5
# or because CMake's platform configuration is corrupt.
# FIXME: Remove once CMake on macOS/M1 CI runners is upgraded.
self.build_requires("cmake/3.22.3")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | recipes/librasterlite/all/test_package/conanfile.py | dpronin/conan-center-index |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | astropy/stats/lombscargle/implementations/tests/test_mle.py | b1quint/astropy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : conftest.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 12.12.2019
import os
import shutil
import pytest
from .utils import hmmscan_cmd, run_shell_cmd
TEST_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(TEST_DIR, 'test-data')
def get_data(name):
return os.path.join(DATA_DIR, name)
@pytest.fixture
def datadir(tmpdir, request):
'''
Fixture responsible for locating the test data directory and copying it
into a temporary directory.
'''
def getter(filename, as_str=True):
filepath = tmpdir.join(filename)
shutil.copyfile(os.path.join(DATA_DIR, filename),
filepath)
if as_str:
return str(filepath)
return filepath
return getter
@pytest.fixture(scope='session')
def pfam(tmpdir_factory):
h3f = get_data('Pfam-A.hmm.h3f')
h3i = get_data('Pfam-A.hmm.h3i')
h3m = get_data('Pfam-A.hmm.h3m')
h3p = get_data('Pfam-A.hmm.h3p')
return h3f[:-4], (h3f, h3i, h3m, h3p)
@pytest.fixture(scope='session')
def query_x_pfam(pfam, tmpdir_factory):
pfam, _ = pfam
query = get_data('query.100.pep.fa')
exp_output = tmpdir_factory.mktemp('query_x_pfam').join('exp.tbl')
exp_cmd = hmmscan_cmd(query, pfam, exp_output)
run_shell_cmd(' '.join(map(str, exp_cmd)))
return exp_output
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | tests/conftest.py | camillescott/fuckitall |
from env_checker_error import EnvCheckerError
class ProcessRunningError(Exception):
def __init__(self, process_name):
self.process_name = process_name
def resolve(self):
raise EnvCheckerError(
"`%s` cannot be running while Ice is being run" %
self.process_name)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | ice/error/process_running_error.py | reavessm/Ice |
class UnpackException(Exception):
"""Base class for some exceptions raised while unpacking.
NOTE: unpack may raise exception other than subclass of
UnpackException. If you want to catch all error, catch
Exception instead.
"""
class BufferFull(UnpackException):
pass
class OutOfData(UnpackException):
pass
class FormatError(ValueError, UnpackException):
"""Invalid msgpack format"""
class StackError(ValueError, UnpackException):
"""Too nested"""
# Deprecated. Use ValueError instead
UnpackValueError = ValueError
class ExtraData(UnpackValueError):
"""ExtraData is raised when there is trailing data.
This exception is raised while only one-shot (not streaming)
unpack.
"""
def __init__(self, unpacked, extra):
self.unpacked = unpacked
self.extra = extra
def __str__(self):
return "unpack(b) received extra data."
# Deprecated. Use Exception instead to catch all exception during packing.
PackException = Exception
PackValueError = ValueError
PackOverflowError = OverflowError
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
... | 3 | TimeWrapper_JE/venv/Lib/site-packages/pip/_vendor/msgpack/exceptions.py | JE-Chen/je_old_repo |
import pathlib
import numpy as np
from context import RESOURCE_PATH
import rmsd
def test_kabash_fit_pdb():
filename_p = pathlib.PurePath(RESOURCE_PATH, "ci2_1r+t.pdb")
filename_q = pathlib.PurePath(RESOURCE_PATH, "ci2_1.pdb")
p_atoms, p_coord = rmsd.get_coordinates_pdb(filename_p)
q_atoms, q_coord = rmsd.get_coordinates_pdb(filename_q)
new_p_coord = rmsd.kabsch_fit(p_coord, q_coord)
np.testing.assert_array_almost_equal(q_coord[0], new_p_coord[0], decimal=2)
def test_kabash_weighted_fit_pdb():
filename_1 = pathlib.PurePath(RESOURCE_PATH, "ci2_12.pdb")
filename_2 = pathlib.PurePath(RESOURCE_PATH, "ci2_2.pdb")
p_atoms, p_coord = rmsd.get_coordinates_pdb(filename_1)
q_atoms, q_coord = rmsd.get_coordinates_pdb(filename_2)
weights = np.zeros(len(p_coord))
residue13_start = 200
residue24_start = 383
weights[residue13_start:residue24_start] = 1.0
new_p_coord = rmsd.kabsch_fit(p_coord, q_coord, weights)
np.testing.assert_array_almost_equal(q_coord[300], new_p_coord[300], decimal=2)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/test_kabsch_weighted.py | hengwei-chan/rmsd-to-calculate-structural-difference-between-2-cmps |
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .base import Base
class MapperBase():
user = os.getenv("MYSQL_USER")
key = os.getenv("MYSQL_KEY")
host = os.getenv("MYSQL_HOST")
port = os.getenv("MYSQL_PORT")
def __init__(self, database):
self.db = database
if database == 'test':
self.url = 'sqlite:///:memory:'
else:
self.url = \
'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(
self.user,
self.key,
self.host,
self.port,
self.db,
)
self.engine = create_engine(
self.url,
connect_args={'use_pure': True}
)
self.session = sessionmaker(bind=self.engine)
self.base = Base
def get_base(self):
return self.base
def get_engine(self):
return self.engine
def get_session(self):
return self.session()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | api/insights/insights/infrastructure/mysql/orm/mapper_base.py | manisharmagarg/qymatix |
"""
Pylibui test suite.
"""
from pylibui.controls import Slider
from tests.utils import WindowTestCase
class SliderTest(WindowTestCase):
def setUp(self):
super().setUp()
self.slider = Slider(0, 100)
def test_value_initial_value(self):
"""Tests the sliders's `value` initial value is the first parameter
passed to constructor."""
slider = Slider(10, 110)
self.assertEqual(slider.value, 10)
def test_value_can_be_changed(self):
"""Tests the slider's `value` attribute can be changed."""
value = 30
self.slider.value = value
self.assertEqual(self.slider.value, value)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | tests/test_slider.py | Yardanico/pylibui-cffi |
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractBaseUser
from django.db import models
class TestUser(AbstractBaseUser):
identifier = models.CharField(max_length=40, unique=True, db_index=True)
uid_number = models.IntegerField()
USERNAME_FIELD = 'identifier'
def get_full_name(self):
return self.identifier
def get_short_name(self):
return self.identifier
def get_first_name(self):
return 'Alice'
def set_first_name(self, value):
raise Exception('Oops...')
first_name = property(get_first_name, set_first_name)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | tests/models.py | ThibaultVigier/django-auth-ldap |
import cv2
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
# self.video = cv2.VideoCapture(0)
self.image = None
self.cache = None
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
try:
self.image = cv2.imread("static/Stream.jpg")
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', self.image)
temp = jpeg.tobytes()
if temp is not None:
self.cache = temp
return temp
else:
return cache
except:
pass | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | camera.py | fanzhe98/FaceRecoCamera |
import unittest
import cpmpy as cp
from cpmpy.expressions import *
from cpmpy.expressions.core import Operator
class TestSum(unittest.TestCase):
def setUp(self):
self.iv = cp.intvar(0, 10)
def test_add_int(self):
expr = self.iv + 4
self.assertIsInstance(expr, Operator)
self.assertEqual(expr.name, 'sum')
def test_addsub_int(self):
expr = self.iv + 3 - 1
self.assertIsInstance(expr, Operator)
self.assertEqual(expr.name, 'sum')
self.assertEqual(len(expr.args), 3)
def test_subadd_int(self):
expr = self.iv -10 + 3
self.assertIsInstance(expr, Operator)
self.assertEqual(expr.name, 'sum')
self.assertEqual(len(expr.args), 3)
def test_add_iv(self):
expr = self.iv + cp.intvar(2,4)
self.assertIsInstance(expr, Operator)
self.assertEqual(expr.name, 'sum')
def test_addsub_iv_int(self):
expr = self.iv + cp.intvar(2,4) - 1
self.assertIsInstance(expr, Operator)
self.assertEqual(expr.name, 'sum')
self.assertEqual(len(expr.args), 3)
def test_subadd_iv_int(self):
expr = self.iv - cp.intvar(2,4) + 1
self.assertIsInstance(expr, Operator)
self.assertEqual(expr.name, 'sum')
self.assertEqual(len(expr.args), 3)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tests/test_expressions.py | hakank/cpmpy |
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
translator_instance = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
translator_instance.set_service_url(url)
def english_french(english_text):
translation = translator_instance.translate(
text= english_text,
model_id='en-fr').get_result()
french_text = translation['translations'][0]['translation']
return french_text
def french_english(french_text):
translation = translator_instance.translate(
text=french_text,
model_id='fr-en').get_result()
english_text = translation['translations'][0]['translation']
return english_text
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | final_project/machinetranslation/translator.py | SlenderShield/xzceb-flask_eng_fr |
from . import common
from audio_toolbox import sox
class PitchDeformer:
SUFFIX = '.pitch@n'
def __init__(self, input_files_key, output_files_key, semitones):
self.input_files_key = input_files_key
self.output_files_key = output_files_key
self.semitones = semitones
def execute(self, context):
input_files = context[self.input_files_key]
output_files = context[self.output_files_key] = []
for input_file in input_files:
output_pattern = common.append_suffix_to_filename(
input_file, PitchDeformer.SUFFIX)
for index, semitone in enumerate(self.semitones):
# start indexing with 1 to be compatible with sox
output_file = output_pattern.replace('@n', str(index + 1))
output_files.append(output_file)
sox.adjust_pitch(input_file, output_file, semitone=semitone)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | jobs/pitch_deformer.py | nSimonFR/spoken_language_dataset |
#!/usr/bin/env python
import gtk
import Editor
def main(filenames=[]):
"""
start the editor, with a new empty document
or load all *filenames* as tabs
returns the tab object
"""
Editor.register_stock_icons()
editor = Editor.EditorWindow()
tabs = map(editor.load_document, filenames)
if len(filenames) == 0:
editor.welcome()
return tabs
def run():
"""
handle all initialisation and start main() and gtk.main()
"""
try: # this works only on linux
from ctypes import cdll
libc = cdll.LoadLibrary("libc.so.6")
libc.prctl(15, 'odMLEditor', 0, 0, 0)
except:
pass
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
main(filenames=args)
gtk.main()
if __name__=="__main__":
run()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true... | 3 | odml/gui/__main__.py | carloscanova/python-odml |
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.template import loader
from django.contrib import messages
from django.views import generic
from django.views.generic.base import TemplateView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Door, Area, AccessRule, User
class IndexView(TemplateView):
template_name = 'SecuriTree/index.html'
class HomeView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/home.html'
class HierarchyView(LoginRequiredMixin,generic.ListView):
model = Area
template_name = 'SecuriTree/hierarchy.html'
context_object_name = 'area_list'
def get_queryset(self):
return Area.objects.filter(parent_area__isnull=True).order_by('id')
class DoorManageView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/manage_doors.html'
class DoorsView(LoginRequiredMixin,generic.ListView):
model = Door
template_name = 'SecuriTree/all_doors.html'
context_object_name = 'door_list'
def get_queryset(self):
return Door.objects.all()
@login_required
def door_form(request):
r_action = request.GET['action']
if r_action == 'unlock':
action = 'unlock'
else:
action = 'lock'
return render(request, 'SecuriTree/door_form.html', {'action':action})
@login_required
def door_status(request):
door_id = request.POST['doorid']
status = request.POST['status']
door = get_object_or_404(Door, pk=door_id)
# door = Door.objects.filter(pk = door_id).first()
door.status = status;
door.save()
if status == 'closed':
msg = 'Door ' + door.id + ' successfully locked.'
else:
msg = 'Door ' + door.id + ' successfully unlocked.'
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | SecuriTree/views.py | davymaish/django-SecuriTree |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gzip
import sys
from astropy.extern.six import StringIO
from astropy.extern.six.moves import urllib
from astropy.io import fits
__all__ = ['chunk_report','chunk_read']
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size > 0:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write("Downloaded %12.2g of %12.2g Mb (%6.2f%%)\r" %
(bytes_so_far / 1024.**2, total_size / 1024.**2, percent))
else:
sys.stdout.write("Downloaded %10.2g Mb\r" %
(bytes_so_far / 1024.**2))
def chunk_read(response, chunk_size=1024, report_hook=None):
content_length = response.info().get('Content-Length')
if content_length is None:
total_size = 0
else:
total_size = content_length.strip()
total_size = int(total_size)
bytes_so_far = 0
result_string = b""
# sys.stdout.write("Beginning download.\n")
while True:
chunk = response.read(chunk_size)
result_string += chunk
bytes_so_far += len(chunk)
if not chunk:
if report_hook:
sys.stdout.write('\n')
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return result_string
def retrieve(url, outfile, opener=None, overwrite=False):
"""
"retrieve" (i.e., download to file) a URL.
"""
if opener is None:
opener = urllib.build_opener()
page = opener.open(url)
results = chunk_read(page, report_hook=chunk_report)
S = StringIO(results)
try:
fitsfile = fits.open(S,ignore_missing_end=True)
except IOError:
S.seek(0)
G = gzip.GzipFile(fileobj=S)
fitsfile = fits.open(G,ignore_missing_end=True)
fitsfile.writeto(outfile, clobber=overwrite)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | astroquery/utils/progressbar.py | wschoenell/astroquery |
import sys
class SimpleProgressBar:
def __init__(self):
self.displayed = False
def update(self, percent):
self.displayed = True
bar_size = 40
percent *= 100.0
if percent > 100:
percent = 100.0
dots = int(bar_size * percent / 100)
plus = percent / 100 * bar_size - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
percent = int(percent)
bar = '=' * dots + plus
bar = '{0:>3}%[{1:<40}]'.format(percent, bar)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def done(self):
if self.displayed:
print
self.displayed = False
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | lixian_progress.py | 1py/xunlei-lixian |
from keras.models import load_model
import h5py
import numpy as np
import matplotlib.pyplot as plt
import cv2
from keras.optimizers import Adam
import os
from keras import backend as K
import tensorflow as tf
def PSNR(y_true, y_pred):
max_pixel = 1.0
return 10.0 * tf_log10((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true))))
def tf_log10(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
data = h5py.File("D:/water_large_sorted.h5")
x = data["lowres"][:48]
##x2 = []
##x = np.squeeze(x)
##for i in range(len(x)):
## temp = cv2.resize(x[i], (16, 16), interpolation = cv2.INTER_CUBIC)
## temp = temp.reshape(16, 16, 1)
## x2.append(temp)
##x2 = np.asarray(x2)
modelname = input()
model = load_model(modelname, custom_objects = {'PSNR' : PSNR})
result = model.predict(x, verbose = 1)
result = result.reshape(48, 32, 32)
y = data["highres"][:48]
y = y.reshape(48, 32, 32)
for i in range(48):
plt.imshow(y[i])
plt.savefig("D:/SRCFD/truth/truth_fsrcnn {}.png".format(i))
for i in range(48):
plt.imshow(result[i])
plt.savefig("D:/SRCFD/neuralnetoutput/neural net output_fsrcnn {}.png".format(i))
import glob
import moviepy.editor as mpy
import time
time = ((time.asctime().replace(" ", " ")).replace(" ", "_")).replace(":", "-")
file_list = glob.glob('D:/SRCFD/neuralnetoutput/*.png')
list.sort(file_list, key=lambda x: int(x.split(' ')[3].split('.png')[0]))
print(file_list)
clip = mpy.ImageSequenceClip(file_list, fps=24)
clip.write_gif('neuralnet_fsrcnn {}.gif'.format(time), fps=24)
file_list = glob.glob('D:/SRCFD/truth/*.png')
list.sort(file_list, key=lambda x: int(x.split(' ')[1].split('.png')[0]))
print(file_list)
clip = mpy.ImageSequenceClip(file_list, fps=24)
clip.write_gif('truth_fsrcnn.gif'.format(time), fps=24)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | neural_net/fsrcnn_predict.py | matthewli125/SRCFD |
import chex
import jax
import jax.numpy as np
import numpy as onp
import objax
import pytest
from rbig_jax.transforms.conv import Conv1x1Householder
seed = 123
rng = onp.random.RandomState(123)
generator = objax.random.Generator(123)
@pytest.mark.parametrize("n_channels", [1, 3, 12])
@pytest.mark.parametrize("hw", [(1, 1), (5, 5), (12, 12)])
@pytest.mark.parametrize("n_samples", [1, 5, 10])
@pytest.mark.parametrize("n_reflections", [1, 2, 10])
def test_conv1x1ortho_shape(n_channels, hw, n_samples, n_reflections):
x = objax.random.normal((n_samples, hw[0], hw[1], n_channels), generator=generator)
# print(x.shape)
# create layer
model = Conv1x1Householder(n_channels=n_channels, n_reflections=n_reflections)
# forward transformation
z, log_abs_det = model(x)
# print(z.shape, log_abs_det.shape)
# checks
chex.assert_equal_shape([z, x])
chex.assert_shape(np.atleast_1d(log_abs_det), (n_samples,))
# forward transformation
z = model.transform(x)
# checks
chex.assert_equal_shape([z, x])
# inverse transformation
x_approx = model.inverse(z)
# checks
chex.assert_equal_shape([x_approx, x])
@pytest.mark.parametrize("n_channels", [1, 3, 12])
@pytest.mark.parametrize("hw", [(1, 1), (5, 5), (12, 12)])
@pytest.mark.parametrize("n_samples", [1, 5, 10])
@pytest.mark.parametrize("n_reflections", [1, 2, 10])
def test_conv1x1ortho_approx(n_channels, hw, n_samples, n_reflections):
x = objax.random.normal((n_samples, hw[0], hw[1], n_channels), generator=generator)
# create layer
model = Conv1x1Householder(n_channels=n_channels, n_reflections=n_reflections)
# forward transformation
z, log_abs_det = model(x)
# inverse transformation
x_approx = model.inverse(z)
# checks
chex.assert_tree_all_close(x, x_approx, atol=1e-5)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding s... | 3 | tests/transforms/test_conv1x1ortho.py | alexhepburn/rbig_jax |
import os
import re
# room, sector, checksum
p = re.compile("([\w-]+)-(\d+)\[(\w+)\]")
def checksum(room_name):
counts = {}
for char in room_name.replace('-', ''):
if char in counts:
counts[char] += 1
else:
counts[char] = 1
result = []
for item in sorted(counts.items(), key=lambda pair: (-pair[1], pair[0])):
result.append(item[0])
return ''.join(result)[:5]
def valid_sector_sum(lines):
sector_sum = 0
for line in lines:
m = p.match(line)
room_name, sector, cs = m.groups()
if cs == checksum(room_name):
sector_sum += int(sector)
return sector_sum
def solve_part_1():
input_file = os.path.join(os.path.dirname(__file__), 'input.txt')
return valid_sector_sum(open(input_file).readlines())
def decipher(room_name, sector):
alphabet = "abcdefghijklmnopqrstuvwxyz"
result = []
for char in room_name:
if char == '-':
result.append(' ')
else:
index = alphabet.index(char)
index += sector
index = index % len(alphabet)
result.append(alphabet[index])
return ''.join(result)
def solve_part_2():
input_file = os.path.join(os.path.dirname(__file__), 'input.txt')
for line in open(input_file).readlines():
m = p.match(line)
room_name, sector, cs = m.groups()
if cs == checksum(room_name):
if decipher(room_name, int(sector)) == 'northpole object storage':
return int(sector)
if __name__ == '__main__':
solve_part_2()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | day4/day4.py | chrisb87/advent_of_code_2016 |
import click
from cmsis_svd.parser import SVDParser
MCU_OPTIONS = [
'STM32F0xx',
]
MCU2VENDOR_FILE = {
'STM32F0xx': ('STMicro', 'STM32F0xx.svd'),
}
ALL = 'show_all'
def show_register(register):
fields = []
for field in register.fields:
upper_index = field.bit_offset + field.bit_width - 1
lower_index = field.bit_offset
if upper_index == lower_index:
index_s = str(upper_index)
else:
index_s = f'{upper_index}:{lower_index}'
fields.append(f'{field.name}[{index_s}]')
print(f'{register.name: <5} 0x{register.address_offset:04x}: {",".join(fields)}')
def show_peripheral(peripheral):
print(peripheral.name)
for register in peripheral.registers:
show_register(register)
print()
@click.command()
@click.option('--mcu', type=click.Choice(MCU_OPTIONS), required=True,
help='MCU Name')
@click.option('--mcu-peripheral', help='Peripheral Specified')
def main(mcu, mcu_peripheral=None):
"""Given a chip and peripheral, prints the registers.
"""
parser = SVDParser.for_packaged_svd(*MCU2VENDOR_FILE[mcu])
address2peripheral = {}
for peripheral in parser.get_device().peripherals:
address2peripheral[peripheral.base_address] = peripheral
for _, peripheral in sorted(address2peripheral.items()):
print(f'{peripheral.name: <16} @ 0x{peripheral.base_address:08x} ({peripheral.address_block.size: >4})')
if mcu_peripheral:
for peripheral in parser.get_device().peripherals:
if peripheral.name == mcu_peripheral or mcu_peripheral == ALL:
show_peripheral(peripheral)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | cmsis-svd-parsing/main.py | michael-christen/prototypes |
from helpers.executor import Executor
from helpers.util import *
import itertools
from itertools import *
import re
from re import *
import numpy as np
from typing import Any, Callable, Generator, Sequence
day, year = None, None # TODO: Update day and year for current day
split_seq = '\n'
class Solution(Executor):
def solve(self, r: Sequence[str], print: Callable[..., None]) -> Generator[Any, None, None]:
yield self._solve_part1(r, print)
yield self._solve_part2(r, print)
def _solve_part1(self, r: Sequence[str], print: Callable[..., None]) -> Any:
# TODO
return None
def _solve_part2(self, r: Sequence[str], print: Callable[..., None]) -> Any:
# TODO
return None
if __name__ == '__main__':
solution = Solution(year, day)
solution.execute(split_seq, use_cached_test_cases=True)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | src/solutions/template.py | etillison3350/advent-of-code-2020 |
# add path to the main package and test battery.py
if __name__ == '__main__':
from __access import ADD_PATH
ADD_PATH()
import unittest
import psutil
from battery import Battery
class TestBattery(unittest.TestCase):
""" Test battry module """
def test_Battery_constructor(self):
if not (has_battery := psutil.sensors_battery()):
with self.assertRaises(Exception):
Battery()
else:
self.assertTrue(has_battery.percent > 0)
def test_create_details_text(self):
if not psutil.sensors_battery():
pass
else:
self.assertTrue(isinstance(Batter().create_details_text(), str))
if __name__ == '__main__':
unittest.main() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | tests/test_battery.py | bexxmodd/vizex |
def add(a, b):
print(f"ADDING {a} + {b}")
return a + b
def subtract(a, b):
print(f"SUBTRACTING {a} - {b}")
return a - b
def multiply(a, b):
print(f"MULTIPLYING {a} * {b}")
return a * b
def divide(a, b):
print(f"DIVIDING {a} / {b}")
return a / b
print("Let's do some math with just functions!")
age = add(30,5)
height = subtract(78,4)
weight = multiply(90,2)
iq = divide(100,2)
print(f"Age: {age}, Height: {height}, Weight: {weight}, IQ: {iq}")
# A puzzle for the extra credit, type it in anyway.
print("Here is a puzzle.")
what = add(age, subtract(add(height,multiply(iq,2)), multiply(weight,divide(iq,2))))
print("That becomes: ", what, "Can you do it by hand?") | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | ex21/ex21-3.py | taylorcreekbaum/lpthw |
from django.db import models
class CapitalizeField(models.CharField):
def __init__(self, *args, **kwargs):
super(CapitalizeField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname, None)
if value:
value = value.capitalize()
setattr(model_instance, self.attname, value)
return value
else:
return super(CapitalizeField, self).pre_save(model_instance, add)
class CustomManager(models.Manager):
"""
Custom manager so as not to return deleted objects
"""
def get_queryset(self):
return super(CustomManager, self).get_queryset().filter(deleted=False)
class AbstractBase(models.Model):
"""
This contains all common object attributes
Every model will inherit this class to avoid repetition
Its abstract hence can't be instatiated
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(
default=False,
help_text="This is to make sure deletes are not actual deletes"
)
# everything will be used to query deleted objects e.g Model.everything.all()
everything = models.Manager()
objects = CustomManager()
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
class Meta:
ordering = ['-updated_at', '-created_at']
abstract = True
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | apps/common/models.py | kwanj-k/ctrim_api |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import sys
import os
from clr import *
sys.path.append("..")
import testfuncs
def test(thefile, compilerPath, silent):
'''return number of successes'''
symbols, ok = testfuncs.buildAndGetSymbols(thefile, compilerPath, silent)
if ok:
try:
ok = symbols["Symbol '/g_s'"]['kind'] == 'Variable'
ok = ok and symbols["Symbol '/g_s'"]['type']['core']['name'] == '/S'
if not silent:
if not ok:
print (fg.RED+ "ERR: g_s type could not be validated"+ style.RESET_ALL)
else:
print (style.BRIGHT+ "OK! "+ style.RESET_ALL)
except Exception as e:
print (fg.RED+ "Err: Parsed --dumpsym may lack some expected keys"+ style.RESET_ALL, e)
return ok
result = 0 # to define for sub-tests
resultFailed = 0
def doTests(compiler, silent, azdxcpath):
global result
global resultFailed
# Working directory should have been set to this script's directory by the calling parent
# You can get it once doTests() is called, but not during initialization of the module,
# because at that time it will still be set to the working directory of the calling script
workDir = os.getcwd()
if test(os.path.join(workDir, "../Semantic/combined-vardecl-udt.azsl"), compiler, silent): result += 1
else: resultFailed += 1
if __name__ == "__main__":
print ("please call from testapp.py")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | tests/Advanced/typeof-vardecl-udt.py | aws-lumberyard-dev/o3de-azslc |
# coding: utf-8
"""Test that tokenizer exceptions and emoticons are handles correctly."""
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text', ["auf'm", "du's", "รผber'm", "wir's"])
def test_de_tokenizer_splits_contractions(de_tokenizer, text):
tokens = de_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize('text', ["z.B.", "d.h.", "Jan.", "Dez.", "Chr."])
def test_de_tokenizer_handles_abbr(de_tokenizer, text):
tokens = de_tokenizer(text)
assert len(tokens) == 1
def test_de_tokenizer_handles_exc_in_text(de_tokenizer):
text = "Ich bin z.Zt. im Urlaub."
tokens = de_tokenizer(text)
assert len(tokens) == 6
assert tokens[2].text == "z.Zt."
assert tokens[2].lemma_ == "zur Zeit"
@pytest.mark.parametrize('text,norms', [("vor'm", ["vor", "dem"]), ("du's", ["du", "es"])])
def test_de_tokenizer_norm_exceptions(de_tokenizer, text, norms):
tokens = de_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
@pytest.mark.xfail
@pytest.mark.parametrize('text,norm', [("daร", "dass")])
def test_de_lex_attrs_norm_exceptions(de_tokenizer, text, norm):
tokens = de_tokenizer(text)
assert tokens[0].norm_ == norm
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | spacy/tests/lang/de/test_exceptions.py | cmgreivel/spaCy |
from datetime import timedelta
from epsilon.extime import Time
from nevow.page import renderer
from nevow.loaders import stan
from nevow.tags import div
from nevow.athena import LiveElement
from xmantissa.liveform import TEXT_INPUT, LiveForm, Parameter
class CalendarElement(LiveElement):
docFactory = stan(div[
"It's a calendar!",
div(render="appointments"),
div(render="appointmentForm")])
def __init__(self, calendar):
LiveElement.__init__(self)
self.calendar = calendar
@renderer
def appointments(self, request, tag):
appointments = self.calendar.getAppointments()
for appointment in appointments:
appDiv = div[
"Appointment with ",
appointment.withWhomUsername, "@",
appointment.withWhomDomain, " at ",
appointment.when.asHumanly()]
if appointment.failed is not None:
appDiv[" (Rejected: ", appointment.failed, ")"]
elif appointment.remoteID is None:
appDiv[" (Pending confirmation)"]
tag[appDiv]
return tag
def _requestAppointment(self, whom):
local, domain = whom.split(u"@")
target = self.calendar.calendarIDFor(local, domain)
self.calendar.requestAppointmentWith(target, Time() + timedelta(days=2))
@renderer
def appointmentForm(self, request, tag):
form = LiveForm(
self._requestAppointment,
[Parameter(u"whom", TEXT_INPUT, unicode, u"Whom:",
u"The username of the person with whom "
u"to create an appointment (user@domain).",
None)],
"Request An Appointment")
form.setFragmentParent(self)
return form
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | doc/listings/interstore/webcal.py | jonathanj/mantissa |
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Union
if TYPE_CHECKING:
from .interfaces import RecipeInterface, APIInterface
class OverrideConfig:
def __init__(self, functions: Union[Callable[[RecipeInterface], RecipeInterface],
None] = None, apis: Union[Callable[[APIInterface], APIInterface], None] = None):
self.functions = functions
self.apis = apis
class JWTConfig:
def __init__(self, override: OverrideConfig, jwt_validity_seconds: int):
self.override = override
self.jwt_validity_seconds = jwt_validity_seconds
def validate_and_normalise_user_input(
jwt_validity_seconds: Union[int, None] = None,
override: Union[OverrideConfig, None] = None):
if override is None:
override = OverrideConfig()
if jwt_validity_seconds is None:
jwt_validity_seconds = 3153600000
return JWTConfig(override, jwt_validity_seconds)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | supertokens_python/recipe/jwt/utils.py | girish946/supertokens-python |
# read csv to model
import tensorflow as tf
import numpy as np
import os
def read_csv(batch_size, file_name, record_defaults=1):
fileName_queue=tf.train.string_input_producer(os.path.dirname(__file__)+"/"+file_name)
reader = tf.TextLineReader(skip_header_lines=1)
key, value=reader.read(fileName_queue,name='read_op')
# decode_csv will convert a Tensor from type string (the text line) in
# a tuple of tensor columns with the specified defaults, which also
# sets teh data type for each column
decoded=tf.decode_csv(records=value)
# batch actually reads the file and loads "batch size" rows in a single tensor
return tf.train.shuffle_batch(decoded, batch_size=batch_size, capacity=batch_size* 50, min_after_dequeue=batch_size)
def inputs():
passenger_id, survived, pclass, name, sex, age, sibsp, parch, ticket, fare, cabin, embarked =\
read_csv(100,"./data/train.csv",) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | read_csv.py | yangzhou95/learn-tensorflow |
"""Author: Brandon Trabucco, Copyright 2019"""
import tensorflow as tf
from mineral.algorithms.tuners.tuner import Tuner
class EntropyTuner(Tuner):
def __init__(
self,
policy,
**kwargs
):
Tuner.__init__(self, **kwargs)
self.policy = policy
def update_algorithm(
self,
observations,
actions,
rewards,
terminals
):
def loss_function():
policy_actions = self.policy.get_expected_value(
observations[:, :(-1), ...])
policy_entropy = -terminals[:, :(-1)] * self.policy.get_log_probs(
policy_actions,
observations[:, :(-1), ...])
entropy_error = policy_entropy - self.target
entropy_loss = self.tuning_variable * tf.stop_gradient(entropy_error)
self.record(
"entropy_tuning_variable",
self.tuning_variable)
self.record(
"entropy_error_mean",
tf.reduce_mean(entropy_error))
self.record(
"entropy_error_max",
tf.reduce_max(entropy_error))
self.record(
"entropy_error_min",
tf.reduce_min(entropy_error))
self.record(
"entropy",
tf.reduce_mean(policy_entropy))
self.record(
"entropy_loss",
tf.reduce_mean(entropy_loss))
return tf.reduce_mean(entropy_loss)
self.optimizer.minimize(
loss_function, [self.tuning_variable])
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | mineral/algorithms/tuners/entropy_tuner.py | brandontrabucco/jetpack |
import Anton as aen
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats import linregress
def changeCelsius(path):
files = aen.searchfiles(path, '.npy')
files.sort()
for i in files:
fname,name = os.path.split(i)
if 'Celsius' in name:
nm = name.split('Celsius')
nm = nm[0]+nm[1]
os.rename(os.path.join(fname,name), os.path.join(fname,nm))
return print('geaender')
def change2under(path):
files = aen.searchfiles(path, '.npy')
files.sort()
j = 0
for i in files:
j += 1
fname, name = os.path.split(i)
try:
nm = name.replace('KE60','KE_60')
os.rename(os.path.join(fname,name), os.path.join(fname,nm))
print(nm)
except: pass
return print('%s Files vorhanden'% str(j))
if __name__ == '__main__':
path = r'Z:\2_Projekt__Permeabilitรคtsbeeinflussung\02_Lรถslichkeitsuntersuchungen\HS Microscope\Experiments\Final_results\data'
change2under(path)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | 2 - data2graph/evaluateData/changeNames.py | Tocha4/HSM-Solubility |
import toga
import time
import toga
from colosseum import CSS
tab_style = CSS(flex=1, padding=20)
def build(app):
font = toga.Font('Helvetica', 40)
return toga.Box(children=[toga.Button('Button')])
def main():
return toga.App('Test Font', 'org.pybee.font', startup=build)
if __name__ == '__main__':
app = main()
app.main_loop()
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | font/font/app.py | Ocupe/toga_test_app_collection |
try:
from unittest import mock
except ImportError:
import mock
from graphql_ws.gevent import GeventConnectionContext, GeventSubscriptionServer
class TestConnectionContext:
def test_receive(self):
ws = mock.Mock()
connection_context = GeventConnectionContext(ws=ws)
connection_context.receive()
ws.receive.assert_called()
def test_send(self):
ws = mock.Mock()
ws.closed = False
connection_context = GeventConnectionContext(ws=ws)
connection_context.send("test")
ws.send.assert_called_with("test")
def test_send_closed(self):
ws = mock.Mock()
ws.closed = True
connection_context = GeventConnectionContext(ws=ws)
connection_context.send("test")
assert not ws.send.called
def test_close(self):
ws = mock.Mock()
connection_context = GeventConnectionContext(ws=ws)
connection_context.close(123)
ws.close.assert_called_with(123)
def test_subscription_server_smoke():
GeventSubscriptionServer(schema=None)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | tests/test_gevent.py | kahkeng/graphql-ws |
import uuid
from django import template
from django.forms.widgets import Media
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from settings import PROGRESSBARUPLOAD_INCLUDE_JQUERY
register = template.Library()
@register.simple_tag
def progress_bar():
"""
progress_bar simple tag
return html5 tag to display the progress bar
and url of ajax function needed to get upload progress
in js/progress_bar.js file.
"""
progress_bar_tag = '<progress id="progressBar" ' \
'data-progress_bar_uuid="%s" value="0" max="100" ' \
'hidden></progress><div id="progressText"></div>' % (uuid.uuid4())
upload_progress_url = '<script>upload_progress_url = "%s"</script>' \
% (reverse('upload_progress'))
return mark_safe(progress_bar_tag + upload_progress_url)
@register.simple_tag
def progress_bar_media():
"""
progress_bar_media simple tag
return rendered script tag for javascript used by progress_bar
"""
if PROGRESSBARUPLOAD_INCLUDE_JQUERY:
js = ["http://code.jquery.com/jquery-1.8.3.min.js",]
else:
js = []
js.append("js/progress_bar.js")
m = Media(js=js)
return m.render()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | afterflight/progressbarupload/templatetags/progress_bar.py | foobarbecue/afterflight |
import torch
from ptstat.core import RandomVariable, _to_v
class Categorical(RandomVariable):
"""
Categorical over 0,...,N-1 with arbitrary probabilities, 1-dimensional rv, long type.
"""
def __init__(self, p=None, p_min=1E-6, size=None, cuda=False):
super(Categorical, self).__init__()
if size:
assert len(size) == 2, str(size)
p = _to_v(1 / size[1], size, cuda)
else:
assert len(p.size()) == 2, str(p.size())
assert torch.min(p.data) >= 0, str(torch.min(p.data))
assert torch.max(torch.abs(torch.sum(p.data, 1) - 1)) <= 1E-5
self._p = torch.clamp(p, p_min)
def _size(self):
return self._p.size()[0], 1 # Type is Long.
def _log_pdf(self, x):
return torch.log(self._p.gather(1, x)).squeeze()
def _sample(self):
return self._p.multinomial(1, True)
def _entropy(self):
return - torch.sum(self._p * torch.log(self._p), 1).squeeze()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | ptstat/dist/categorical.py | timmyzhao/ptstat |
import pytest
from django.urls import resolve, reverse
from django_machinelearning.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | django_machinelearning/users/tests/test_urls.py | daaawx/django_machinelearning |
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.array_grad import _TileGrad
from tensorflow.python.framework import ops
def shape(x):
if isinstance(x, tf.Tensor):
return x.get_shape().as_list()
return np.shape(x)
@ops.RegisterGradient("TileDense")
def tile_grad_dense(op, grad):
grad = tf.convert_to_tensor(grad)
return _TileGrad(op, grad)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | src/schnet/nn/utils.py | Yidansong/SchNet |
from django.http.response import HttpResponseRedirect
from .forms import MyForm
from django.shortcuts import get_object_or_404, render
from .models import Flower
# Create your views here.
def index(request):
q = request.GET.get("q" , None)
if q is None or q == '':
flowers = Flower.objects.all()
else:
flowers = Flower.objects.filter(title__icontains=q)
context = {
'flowers':flowers,
}
return render(request, 'myapp/index.html', context)
def detail(request, slug=None):
flower = get_object_or_404(Flower, slug=slug)
context = {
'flower': flower
}
return render(request, 'myapp/detail.html', context)
def tags(request, slug=None):
flowers = Flower.objects.filter(tags__slug=slug)
context = {
'flowers': flowers
}
return render(request, 'myapp/index.html', context)
def create(request):
if request.method == 'POST':
form = MyForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/')
else:
form = MyForm()
context = {
'form': form
}
return render(request, 'myapp/edit.html', context)
def edit(request, pk=None):
flower = get_object_or_404(Flower, pk=pk)
if request.method == "POST":
form = MyForm(request.POST, instance=flower)
if form.is_valid():
form.save()
return HttpResponseRedirect('/')
else:
form = MyForm(instance=flower)
context = {
'form': form
}
return render(request, 'myapp/edit.html', context)
def delete(request, pk=None):
flower = get_object_or_404(Flower, pk=pk)
flower.delete()
return HttpResponseRedirect('/') | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | myapp/views.py | free20064u/flower |
from django.template import Library
from django.template.defaultfilters import floatformat
from django.contrib.humanize.templatetags.humanize import intcomma
from django.utils.encoding import force_unicode
register = Library()
@register.filter(name='addcss')
def addcss(value, arg):
return value.as_widget(attrs={'class': arg})
@register.filter(name='placeholder')
def placeholder(value, arg):
value.field.widget.attrs["placeholder"] = arg
return value
@register.filter(name='textarea')
def textarea(value, arg):
value.field.widget.attrs["rows"] = arg
return value
@register.filter(name='decimal_to_real')
def decimal_to_real(value, precision=2):
value = floatformat(value, precision)
value, decimal = force_unicode(value).split(',')
value = intcomma(value)
value = value.replace(',', '.') + ',' + decimal
return value | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | app/main/templatetags/formtags.py | HenriqueLR/payments |
# coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.full_disk import FullDisk # noqa: E501
from openapi_client.rest import ApiException
class TestFullDisk(unittest.TestCase):
"""FullDisk unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFullDisk(self):
"""Test FullDisk"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.full_disk.FullDisk() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | test/test_full_disk.py | cons3rt/cons3rt-python-sdk |
from anytree import Node
from csv import reader
from pickle import dump
def make_category_tree(path_to_data):
parents = {}
nodes = {}
with open(path_to_data + '/categories.csv', newline='') as csvfile:
rdr = reader(csvfile, delimiter=',')
for row in rdr:
if (row[1] == 'id'):
continue
if (not row[2]):
row[2] = 0
parents[int(row[1])] = int(float(row[2]))
parents = {k: v for k, v in sorted(parents.items(),
key = lambda item: item[1])}
root = Node("root")
nodes[0] = root
for key, value in parents.items():
nodes[key] = Node(key, parent=root)
for key, value in parents.items():
nodes[key].parent = nodes[value]
with open(path_to_data + '/category_tree.obj', 'wb') as category_tree:
dump(root, category_tree)
from utils.io_custom import read_pickle_object
def get_category_tree(path_to_data):
"""
:param path: path to file with binary category tree
:return: root node of type anytree.Node, it contains all the tree data
"""
return read_pickle_object(path_to_data + '/category_tree.obj')
from anytree.search import find
def get_node(id, path_to_data):
root = get_category_tree(path_to_data)
return find(root, lambda node: node.name == id)
def get_names(path_to_data):
categories = {}
with open(path_to_data + '/categories.csv', "r") as f:
rdr = reader(f, delimiter=',')
for row in rdr:
if row[1] == "id":
continue
categories[int(row[1])] = row[5]
categories["root"] = "root"
return categories
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | utils/category_tree.py | comptech-winter-school/online-store-redirects |
#!/usr/bin/python
# coding: utf-8
# -------------------------------------------------------------------
# Encryption365 AutoRenewal Client For ๅฎๅกLinux้ขๆฟ
# -------------------------------------------------------------------
# Copyright (c) 2020-2099 ็ฏๆบไธญ่ฏโข All rights reserved.
# -------------------------------------------------------------------
# Author: JasonLong <jasonlong@qiaokr.com>
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# Encryption365 AutoRenewal Client
# -----------------------------------------------------------------
import AutoRenew
try:
import psutil
except:
try:
public.ExecShell("pip install psutil")
import psutil
except:
public.ExecShell("pip install --upgrade pip")
public.ExecShell("pip install psutil")
import psutil
# ๅๆญค็จๅบPID
def write_pid():
if not os.path.exists(panelPath+'/plugin/encryption365/src/autodellogs.pid'):
with open(panelPath+'/plugin/encryption365/src/autodellog.pid', 'w') as fs:
fs.write(str(os.getpid()))
else:
with open(panelPath+'/plugin/encryption365/src/autodellog.pid', 'w') as f:
f.write(str(os.getpid()))
# ่ฏปๅๆฌ็จๅบPID
def read_pid():
if os.path.exists(panelPath+'/plugin/encryption365/src/autodellog.pid'):
with open(panelPath+'/plugin/encryption365/src/autodellog.pid', 'r') as f:
return f.read()
else:
return '0'
if __name__ == '__main__':
if read_pid() != False and int(read_pid()):
pid = int(read_pid())
if pid in psutil.pids():
print("ๅๅบไปปๅก่ฟๆชๆง่กๅฎๆ, ๆจๅบ: " + str(pid))
else:
write_pid()
print("ๆฐไปปๅกๅผๅงๆง่ก")
# ๅ ้ค68ๅคฉไนๅ็ๆฅๅฟไฟกๆฏ
AutoRenew.delete_expired_logs()
else:
write_pid()
print("ๆฐไปปๅกๅผๅงๆง่ก")
# ๅ ้ค68ๅคฉไนๅ็ๆฅๅฟไฟกๆฏ
AutoRenew.delete_expired_logs() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | src/AutoDelLogs.py | zhiiker/Encryption365_Baota |
import pytest
from thefuck.rules.git_remote_seturl_add import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', "fatal: No such remote")])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', ""),
Command('git remote add origin url', ''),
Command('git remote remove origin', ''),
Command('git remote prune origin', ''),
Command('git remote set-branches origin branch', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('git remote set-url origin git@github.com:nvbn/thefuck.git', ''),
'git remote add origin git@github.com:nvbn/thefuck.git')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | tests/rules/test_git_remote_seturl_add.py | HiteshMah-Jan/thefuck |
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from .period import Period, Granularity
from .week import Week
class Year(Period):
granularity = Granularity.YEAR
def __init__(self, year):
self.start_datetime = datetime(year, 1, 1)
self.end_datetime = self.start_datetime + relativedelta(years=1)
def __repr__(self):
return self.start_datetime.strftime("%Y")
# When getting weeks for a specified year, we expect to get 1-52 (or 53),
# not the actual weeks for the datespan (default for Period class),
# as they may start with for example 52 or end with 1.
def get_weeks(self, exclude_partial=True):
end_date = self.get_end_date()
year = end_date.year
last_week = end_date.isocalendar()[1]
if last_week == 1:
end_date = end_date - relativedelta(days=7)
last_week = end_date.isocalendar()[1]
weeks = []
for c in range(0, last_week):
week = Week(year, c + 1)
weeks.append(week)
return weeks
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | periods/year.py | iloob/python-periods |
from guillotina import app_settings
from guillotina import configure
from guillotina.content import get_cached_factory
from guillotina.interfaces import IConstrainTypes
from guillotina.interfaces import IDatabase
from guillotina.interfaces import IResource
from typing import List
from typing import Optional
from zope.interface import Interface
@configure.adapter(for_=Interface, provides=IConstrainTypes)
class FTIConstrainAllowedTypes:
def __init__(self, context: IResource) -> None:
self.context = context
def is_type_allowed(self, type_id: str) -> bool:
if type_id in app_settings["container_types"]:
# Containers cannot be added inside containers
return False
allowed_types: Optional[List[str]] = self.get_allowed_types()
if allowed_types is None:
# Context does not define allowed types
return self.is_globally_allowed(type_id)
return type_id in allowed_types
def get_allowed_types(self) -> Optional[List[str]]:
tn = getattr(self.context, "type_name", None)
if tn:
factory = get_cached_factory(tn)
return factory.allowed_types
return []
def is_globally_allowed(self, type_id: str) -> bool:
factory = get_cached_factory(type_id)
return factory.globally_addable
@configure.adapter(for_=IDatabase, provides=IConstrainTypes)
class DatabaseAllowedTypes:
"""
Can only add containers to databases
"""
def __init__(self, context: IResource) -> None:
self.context = context
def is_type_allowed(self, type_id: str) -> bool:
return type_id in app_settings["container_types"]
def get_allowed_types(self) -> Optional[List[str]]:
return app_settings["container_types"]
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | guillotina/constraintypes.py | rboixaderg/guillotina |
# Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from traceback import format_exception
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsyncoreDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | scalyr_agent/third_party/pysnmp/carrier/asyncore/dispatch.py | code-sauce/scalyr-agent-2 |
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class PasswordPolicyExpiration(BaseResourceCheck):
def __init__(self):
name = "Ensure IAM password policy expires passwords within 90 days or less"
id = "CKV_AWS_9"
supported_resources = ['aws_iam_account_password_policy']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
"""
validates iam password policy
https://www.terraform.io/docs/providers/aws/r/iam_account_password_policy.html
:param conf: aws_iam_account_password_policy configuration
:return: <CheckResult>
"""
key = 'max_password_age'
if key in conf.keys():
if conf[key][0] >= 90:
return CheckResult.PASSED
return CheckResult.FAILED
check = PasswordPolicyExpiration()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | checkov/terraform/checks/resource/aws/PasswordPolicyExpiration.py | gustavotabares/checkov |
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 3.1.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.topic_field_capture_schema import TopicFieldCaptureSchema # noqa: E501
from swagger_client.rest import ApiException
class TestTopicFieldCaptureSchema(unittest.TestCase):
"""TopicFieldCaptureSchema unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTopicFieldCaptureSchema(self):
"""Test TopicFieldCaptureSchema"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.topic_field_capture_schema.TopicFieldCaptureSchema() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | docs/jnpr_healthbot_swagger/test/test_topic_field_capture_schema.py | Juniper/healthbot-py-client |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from django.shorcurts import render
from django.http import Http404
from models import contato
def index(request):
contatos = Contato.objects.all()
return render(request, 'contatos/index.html', {
'contatos': contatos
})
def ver_contato(request, contato_id):
try:
contato = Contato.objects.get(id=contato_id)
return render(request, 'contato/ver_contato.html', {
'contato': contato
})
except Contato.DoesNotExists as e:
raise Http404()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | Levantando erros 404.py | dimagela29/Python-POO |
import sys
sys.path.append('../G26/Reportes')
from graphviz import Graph
class Grafo():
def __init__(self, index):
self.index = index
self.dot = Graph()
self.dot.attr(splines='false')
self.dot.node_attr.update(shape = 'circle')
self.dot.edge_attr.update(color = 'blue4')
def newnode(self, label):
self.index += 1
self.dot.node(str(self.index), str(label))
def newchildrenE(self, label):
self.dot.node(str(self.index)+'_'+str(label), str(label))
self.dot.edge(str(self.index), str(self.index)+'_'+str(label))
def newchildrenF(self, father, son):
self.dot.edge(str(father), str(son))
def showtree(self):
self.dot.view() | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | parser/team26/G26/Reportes/graph.py | webdev188/tytus |
from transformers import EvalPrediction
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
def compute_metrics(pred: EvalPrediction):
"""Compute recall at the masked position
"""
mask = pred.label_ids != -100
# filter everything except the masked position and flatten tensors
labels = pred.label_ids[mask].flatten()
preds = pred.predictions[mask].flatten()
_, recall, _, _ = precision_recall_fscore_support(y_true=labels, y_pred=preds, average='micro')
return {'recall': recall}
def self_test():
pred = EvalPrediction(
label_ids=np.array([
[-100, 1, -100],
[ 2, -100, -100],
[-100, -100, 3],
[-100, -100, 4]
]),
predictions=np.array([
[-100, 1, -100], # 1 true positive
[ 2, -100, -100], # 1 true positive
[ 2, 6, 8], # 1 false positive, irrelevant pos will be ignored
[ 1, 7, 4] # 1 true positive, irrelevant pos will be ignored
])
)
m = compute_metrics(pred)
print(f"recall={m['recall']}")
assert m['recall'] == 0.75
print("Looks like it is working!")
if __name__ == "__main__":
self_test()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | src/lm/metrics.py | source-data/soda-roberta |
import os
from aiohttp import web
from app.utility.logger import Logger
class FileSvc:
def __init__(self, payload_dirs, exfil_dir):
self.payload_dirs = payload_dirs
self.log = Logger('file_svc')
self.exfil_dir = exfil_dir
async def download(self, request):
name = request.headers.get('file')
file_path, headers = await self.find_file(name)
if file_path:
self.log.debug('downloading %s...' % name)
return web.FileResponse(path=file_path, headers=headers)
return web.HTTPNotFound(body='File not found')
async def find_file(self, name):
for store in self.payload_dirs:
for root, dirs, files in os.walk(store):
if name in files:
headers = dict([('CONTENT-DISPOSITION', 'attachment; filename="%s"' % name)])
return os.path.join(root, name), headers
return None, None
async def upload(self, request):
try:
reader = await request.multipart()
exfil_dir = await self._create_unique_exfil_sub_directory()
while True:
field = await reader.next()
if not field:
break
filename = field.filename
with open(os.path.join(exfil_dir, filename), 'wb') as f:
while True:
chunk = await field.read_chunk()
if not chunk:
break
f.write(chunk)
self.log.debug('Uploaded file %s' % filename)
return web.Response()
except Exception as e:
self.log.debug('Exception uploading file %s' % e)
""" PRIVATE """
async def _create_unique_exfil_sub_directory(self):
dir_name = str(uuid.uuid4())
path = os.path.join(self.exfil_dir, dir_name)
os.makedirs(path)
return path
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | app/service/file_svc.py | FumblingBear/caldera |
from dowel import logger
import numpy as np
from garage.sampler.utils import truncate_paths
from tests.fixtures.logger import NullOutput
class TestSampler:
def setup_method(self):
logger.add_output(NullOutput())
def teardown_method(self):
logger.remove_all()
def test_truncate_paths(self):
paths = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(100)),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(50)),
),
]
truncated = truncate_paths(paths, 130)
assert len(truncated) == 2
assert len(truncated[-1]['observations']) == 30
assert len(truncated[0]['observations']) == 100
# make sure not to change the original one
assert len(paths) == 2
assert len(paths[-1]['observations']) == 50
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/garage/sampler/test_sampler.py | st2yang/garage |
"""Add locale to user table
Revision ID: aefa596e7114
Revises: d5715c70e375
Create Date: 2020-10-08 12:06:27.967777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "aefa596e7114"
down_revision = "d5715c70e375"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("user", sa.Column("locale", sa.String(length=16), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("user", "locale")
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | migrations/versions/aefa596e7114_add_locale_to_user_table.py | mutalisk999/Flog |
from metrics import MetricFunctionNYUv2, print_single_error
from model import SupervisedLossFunction
from torch.utils.data import DataLoader
from torchvision import transforms
from nyuv2 import NYUv2
from tqdm import tqdm
from general import generate_layers, load_checkpoint, tensors_to_device
import torch
from torchvision.models.segmentation.segmentation import fcn_resnet50
num_layers = 3
def runmodel(model, imgs, depths):
layers = generate_layers(imgs, depths, num_layers)
x = [model(x)['out'] for x in layers]
return torch.stack(x, dim=-1)
def run_test_nyuv2(model, dataloader, loss_fn, metric_fn):
loop = tqdm(dataloader, position=0, leave=True)
for i, tensors in enumerate(loop):
imgs, seg13, normals, depths = tensors_to_device(tensors, DEVICE)
with torch.no_grad():
predictions = runmodel(model, imgs, depths)
loss_fn(predictions, (normals, depths))
metric_fn.evaluate(predictions, (seg13, normals, depths))
loop.close()
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = fcn_resnet50(pretrained=False, num_classes=14)
model = model.to(DEVICE)
epoch_idx, model = load_checkpoint(model, "fcnmodel.pth", DEVICE)
t = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
test_dataset = NYUv2(root="../NYUv2", download=True, rgb_transform=t, seg_transform=t, sn_transform=t, depth_transform=t, train=False)
dataloader = DataLoader(test_dataset, batch_size=2, shuffle=True)
loss_fn = SupervisedLossFunction()
metric_fn = MetricFunctionNYUv2(2)
model.eval()
run_test_nyuv2(model, dataloader, loss_fn, metric_fn)
print_single_error(epoch_idx, loss_fn.show(), metric_fn.show()) | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | fcntest.py | alexjercan/unsupervised-segmentation |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.paginator import Paginator
from .models import Subject
import json
def index(request):
if request.method == "POST":
value = request.POST.get("subject_title")
subject_list = Subject.objects.filter(title__contains=value)
return render(request, "index.html", {"subjects": subject_list})
subject_list = Subject.objects.all()
return render(request, "index.html", {"subjects": subject_list})
def subject_view(request, subject_name):
names = subject_name.split('-')
title = Subject.objects.get(title__istartswith=names[0])
with open(f"data/{title}.json", "r") as file:
data = json.load(file)
return render(request, "table.html", {"subject": title, "data": data})
def autocomplete(request):
if request.is_ajax():
q = request.GET.get('term', '')
subjects = Subject.objects.filter(title__contains=q)
results = []
for subject in subjects:
subject_json = {}
subject_json = subject.title
results.append(subject_json)
data = json.dumps(results)
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | finder/views.py | plaunezkiy/unibase |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
""" Managere for user profiles """
def create_user(self, email, name, password=None):
""" Create a new user Profile """
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
""" Create and save a new superuser with given details """
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""
Database model for users in the system
"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name', ]
def get_full_name(self):
""" Retrieve full name of user """
return self.name
def get_short_name(self):
""" Retrieve short name of user """
return self.name
def __str__(self):
""" Return string representation of our user """
return self.email
class ProfileFeedItem(models.Model):
""" Profile status update """
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE,)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.status_text
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | profiles_api/models.py | Chinmay-395/profiles-rest-api |
from unittest import TestCase
import simplejson as json
class TestBigintAsString(TestCase):
values = [(200, 200),
((2 ** 53) - 1, 9007199254740991),
((2 ** 53), '9007199254740992'),
((2 ** 53) + 1, '9007199254740993'),
(-100, -100),
((-2 ** 53), '-9007199254740992'),
((-2 ** 53) - 1, '-9007199254740993'),
((-2 ** 53) + 1, -9007199254740991)]
def test_ints(self):
for val, expect in self.values:
self.assertEquals(
val,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
def test_lists(self):
for val, expect in self.values:
val = [val, val]
expect = [expect, expect]
self.assertEquals(
val,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
def test_dicts(self):
for val, expect in self.values:
val = {'k': val}
expect = {'k': expect}
self.assertEquals(
val,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
def test_dict_keys(self):
for val, _ in self.values:
expect = {str(val): 'value'}
val = {val: 'value'}
self.assertEquals(
expect,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | misc/seqan_instrumentation/bin/classes/simplejson/tests/test_bigint_as_string.py | weese/seqan |
from finesm import StateMachine, State
class SimpleStateMachine(StateMachine):
waiting = State(default=True)
running = State()
def __init__(self):
super(SimpleStateMachine, self).__init__()
self.foo = False
self.bar = False
self.updoot = 0
@waiting.on_message('start')
def waiting_start(self):
self.set_state(self.running)
@waiting.on_exit
def waiting_exit(self):
self.foo = True
@running.on_enter()
def running_enter(self):
self.bar = True
@running.on_update
def runnin_update(self):
self.updoot += 1
def test_state_machine():
state_machine = SimpleStateMachine()
assert state_machine.state == SimpleStateMachine.waiting
assert state_machine.foo is False
assert state_machine.bar is False
state_machine.update()
assert state_machine.updoot == 0
state_machine.send_message('start')
assert state_machine.foo is True
assert state_machine.bar is True
assert state_machine.state == SimpleStateMachine.running
state_machine.update()
state_machine.update()
assert state_machine.updoot == 2
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | tests/test_state_machine.py | wesleyks/fine_sm |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author: Alan
@time: 2021/05/18
"""
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import traceback
class MultiThread(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
def thread_log(self, worker):
"""ๆ่ท็บฟ็จๅผๅธธ๏ผๅนถไฟๅญๆฅๅฟ"""
try:
result = worker.result()
return result
except:
traceback.print_exc()
def execute(self, fn, *args, **kwargs):
"""็ๆๆฐ็บฟ็จ๏ผๅนถๆๆๅผๅธธ"""
thread = self.submit(fn, *args, **kwargs)
thread.add_done_callback(self.thread_log)
return thread
@staticmethod
def execute_after_done(fn, workers, *args, **kwargs):
wait(workers, timeout=86400, return_when=ALL_COMPLETED)
return fn(*args, **kwargs)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | WebSpider/threads.py | bianQ/similarweb |
from __future__ import print_function
import argparse
import sys
from costar_task_plan.simulation import GetSimulationParser
def GetLaunchOptions():
'''
These are the files that actually set up the environment
'''
return ["ur5","husky","fetch"]
def GetExperimentOptions():
'''
Each of these needs to be handled separately later on
'''
return ["magnetic_assembly",
"stack",
"tables",
"navigation"]
def _assemblyCases():
cases = ["double","training","finished_assembly"]
for i in range(1,11):
cases.append("double%d"%i)
for i in range(1,8):
cases.append("training%d"%i)
for i in range(1,3):
cases.append("assembly%d"%i)
return cases
def ParseGazeboArgs():
parser = GetSimulationParser()
parser.add_argument('--launch',
help="ROS launch file to start Gazebo simulation",
default="ur5",
choices=GetLaunchOptions())
parser.add_argument("--experiment",
help="Experiment file that configures task",
default="magnetic_assembly",
choices=GetExperimentOptions())
parser.add_argument("--case",
help="Case for magnetic assembly experiment",
default="assembly1",
choices=_assemblyCases())
parser.add_argument("--gzclient",
help="Bring up the gazebo client",
action="store_true")
return vars(parser.parse_args())
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | costar_simulation/python/costar_simulation/parse.py | cpaxton/costar_plan |
import unittest
from grains import (
square,
total,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class GrainsTest(unittest.TestCase):
def test_grains_on_square_1(self):
self.assertEqual(square(1), 1)
def test_grains_on_square_2(self):
self.assertEqual(square(2), 2)
def test_grains_on_square_3(self):
self.assertEqual(square(3), 4)
def test_grains_on_square_4(self):
self.assertEqual(square(4), 8)
def test_grains_on_square_16(self):
self.assertEqual(square(16), 32768)
def test_grains_on_square_32(self):
self.assertEqual(square(32), 2147483648)
def test_grains_on_square_64(self):
self.assertEqual(square(64), 9223372036854775808)
def test_square_0_raises_an_exception(self):
with self.assertRaises(ValueError) as err:
square(0)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "square must be between 1 and 64")
def test_negative_square_raises_an_exception(self):
with self.assertRaises(ValueError) as err:
square(-1)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "square must be between 1 and 64")
def test_square_greater_than_64_raises_an_exception(self):
with self.assertRaises(ValueError) as err:
square(65)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "square must be between 1 and 64")
def test_returns_the_total_number_of_grains_on_the_board(self):
self.assertEqual(total(), 18446744073709551615)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | exercises/practice/grains/grains_test.py | highb/python |
from threading import Thread, Event
from time import sleep
def func1():
sleep(2) # Initially sleep for 2 secs
myeventobj.set() # E2
print("func1 sleeping for 3 secs....")
sleep(3) # E3
myeventobj.clear() # E4
def func2():
print("Initially myeventobj is: ", myeventobj.isSet()) # E1
myeventobj.wait()
if myeventobj.isSet(): # E5
print("True when myeventobj.set() is called from func1 .i.e. Internal flag is set")
print("func2 sleeping for 4 secs....")
sleep(4) # E6
if myeventobj.isSet() == False: # E7
print("False when myeventobj.clear() is called from func1.i.e. Internal flag is reset")
myeventobj = Event()
myt1 = Thread(target=func1)
myt2 = Thread(target=func2)
myt1.start()
myt2.start()
myt1.join()
myt2.join()
print("Main Thread Completed")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | Chapter 10/Chap10_Example10.38.py | Anancha/Programming-Techniques-using-Python |
from ....constants import BINARY, MULTICLASS, REGRESSION
from autogluon.core import Categorical, Real, Int
def get_default_searchspace(problem_type, num_classes=None):
if problem_type == BINARY:
return get_searchspace_binary().copy()
elif problem_type == MULTICLASS:
return get_searchspace_multiclass(num_classes=num_classes)
elif problem_type == REGRESSION:
return get_searchspace_regression().copy()
else:
return get_searchspace_binary().copy()
def get_searchspace_binary():
spaces = {
# See docs: https://docs.fast.ai/tabular.models.html
'layers': Categorical(None, [200, 100], [200], [500], [1000], [500, 200], [50, 25], [1000, 500], [200, 100, 50], [500, 200, 100], [1000, 500, 200]),
'emb_drop': Real(0.0, 0.5, default=0.1),
'ps': Real(0.0, 0.5, default=0.1),
'bs': Categorical(256, 64, 128, 512, 1024, 2048, 4096),
'lr': Real(5e-5, 1e-1, default=1e-2, log=True),
'epochs': Int(lower=5, upper=30, default=30),
'early.stopping.min_delta': 0.0001,
'early.stopping.patience': 20,
'smoothing': Real(0.0, 0.3, default=0.0, log=True),
}
return spaces
def get_searchspace_multiclass(num_classes):
return get_searchspace_binary()
def get_searchspace_regression():
return get_searchspace_binary()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | tabular/src/autogluon/tabular/models/fastainn/hyperparameters/searchspaces.py | RuohanW/autogluon |
from django.utils.functional import wraps
from caseworker.core.constants import Permission
from core.exceptions import PermissionDeniedError
from caseworker.core import helpers
def has_permission(permission: Permission):
"""
Decorator for views that checks that the user has a given permission
"""
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if helpers.has_permission(request, permission):
return view_func(request, *args, **kwargs)
raise PermissionDeniedError(
f"You don't have the permission '{permission.value}' to view this, "
"check urlpatterns or the function decorator if you want to change "
"this functionality."
)
return _wrapped_view
return decorator
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | caseworker/core/decorators.py | django-doctor/lite-frontend |
import turtle
def draw_square(some_turtle):
for i in range(1,5):
some_turtle.forward(100)
some_turtle.right(90)
def draw_art():
window = turtle.Screen()
window.bgcolor("blue")
img_tt = turtle.Turtle()
img_tt.shape("turtle")
img_tt.color("white")
img_tt.speed(2)
for i in range(1,37):
draw_square(img_tt)
img_tt.right(10)
"""
draw_square(img_tt)
angle = turtle.Turtle()
angle.shape("arrow")
angle.color("black")
angle.circle(100)
window.exitonclick()
"""
draw_art() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | drawer.py | janakhpon/LearnPyDacity |
""" Exceptions mapped to error codes the JSON-rpc 2 spec.
-32768 to -32000 are reserved for pre-defined errors
code: -32700 message: Parse Error -> invalid json received by the server. Error occurred while parsing the json text
code: -32600 message: Invalid Request -> Json sent is not a valid Request object
code: -32601 message: Method not Found -> The method does not exist or is not available
code: -32602 message: Invalid Params -> Invalid method parameters
code: -32603 message: Internal Error -> Internal JSON-RPC error
code: -32000 -> -32099 message: Server Error -> Reserved for implementation defined server errors
"""
# ----------------- Exceptions thrown by the rpc server
class DeoException(Exception):
""" Defines a BaseException that contains a default message and JSON-rpc 2 error code """
error_code = None
message = ''
def __init__(self, *args, **kwargs):
if not (args or kwargs):
args = (self.message,)
super().__init__(*args, **kwargs)
class ParseError(DeoException):
error_code = -32700
message = 'An error occured while parsing the JSON text.'
class InvalidRequest(DeoException):
error_code = -32600
message = 'Not a valid request object'
class MethodNotFound(DeoException):
error_code = -32601
message = 'The requested method does not exist on this server.'
class InvalidParams(DeoException):
error_code = -32602
message = 'Invalid parameters passed to requested method.'
class InternalError(DeoException):
error_code = -32603
message = 'An internal error occurred.'
# ---- registry exception
class RegistryEntryError(KeyError, DeoException):
""" Raised if the key is not present in the WorfklowRegistry"""
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | deo/exceptions.py | bsnacks000/deo |
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import redis
from botocore.exceptions import ClientError
def counter(client,key):
"""increment by 1 under specific key
:param client: redis.client
:param key: string
:return: True if it's connected
"""
# Connect to redis
#r = redis.Redis(host=endpoint, port=6379, db=0)
# Set the object
try:
client.incrby(key, 1)
except ClientError as e:
# AllAccessDisabled error == endpoint not found
logging.error(e)
return False
return True
def hcounter(client, key, field):
"""increment by 1 under specific field of the key
:param client: redis.client
:param key: string
:param field: string
:return: True if it's connected
"""
# Connect to redis
#r = redis.Redis(host=endpoint, port=6379, db=0)
# Set the object
try:
client.hincrby(key, field, 1)
except ClientError as e:
# AllAccessDisabled error == endpoint not found
logging.error(e)
return False
return True
def handler(event, context):
endpoint = "test.fifamc.ng.0001.euc1.cache.amazonaws.com"
client = redis.Redis(host=endpoint, port=6379, db=0)
print(counter(client,"LambdaML"))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | archived/elasticache/Redis/counter.py | DS3Lab/LambdaML |
import base64
def makeEncryptString(Crypto, createLogger):
def encryptString(publicKey, string):
logger = createLogger(__name__)
pubKey = Crypto.PublicKey.RSA.importKey(str.encode(publicKey))
encryptedString = pubKey.encrypt(string.encode('utf-8'),5000)
base64StringEncrypted = base64.encodebytes(encryptedString[0]).decode()
# logger.info(f'encryptedString as text with base64 encoding : {base64StringEncrypted}')
return base64StringEncrypted
return encryptString
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | api/use_cases/encryption/makeEncryptString.py | potofpie/message-frame |
from Classes_1 import Myclass_1
from Classes_1 import Myclass_2
from Classes_1 import MyClass_3
from Classes_1 import MyNomber
from Classes_1 import MyPercentClass
from Classes_1 import example
from Classes_1 import example_3
from Classes_1 import equations_1
from Classes_1 import equations_2
from Classes_1 import GetSetDemonstration_1
from Classes_1 import GetSetDemonstration_2
def def_for_Classes_1 ():
MyObject_1 = Myclass_1()
MyObject_1.x = 11.5
print(MyObject_1.x)
MyObject_2 = Myclass_2()
MyObject_2.x = 11.5
print(MyObject_2.x)
def printPetNames(owner, **pets):
print(f"Owner Name: {owner}")
for a, b in pets.items():
print(f"{a}: {b}")
printPetNames("Jonathan", dog="Brock", fish=["Larry", "Curly", "Moe"], turtle="Shelldon")
e1 = example(1, 2, 3, 6, 8)
print("Sum of list :", e1.answer)
e2 = example(6)
print("Square of integer :", e2.answer)
e3 = example("Programmers")
print("String :", e3.answer)
abc1 = equations_1(4, 2)
abc2 = equations_1(4, 2, 3)
abc3 = equations_1(1, 2, 3, 4, 5)
print("equation 1 :", abc1.ans)
print("equation 2 :", abc2.ans)
print("equation 3 :", abc3.ans)
# ะัะทะพะฒ ะดะปั ะฒะฐัะธะฐะฝัะฐ ะบะปะฐััะฐ ั ะฝะตัะบะพะปัะบะธะผะธ ะบะพะฝััััะบัะพัะฐะผะธ 3
li = [[4, 2], [4, 2, 3], [1, 2, 3, 4, 5]]
i = 0
while i < 3:
input = li[i]
# no.of.arguments = 2
if len(input) == 2:
p = equations_2.eq1(input)
print("equation 1 :", p.ans)
# no.of.arguments = 3
elif len(input) == 3:
p = equations_2.eq1(input)
print("equation 2 :", p.ans)
# More than three arguments
else:
p = equations_2.eq3(input)
print("equation 3 :", p.ans)
# increment loop
i += 1 | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": true
... | 3 | def_for_Classes_1.py | AlekseyVinokurov/python_lesson_2 |
import websocket
import threading
from time import sleep
def on_message(ws, message):
print(message)
def on_close(ws):
print("closed")
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://localhost:9001", on_message = on_message, on_close = on_close)
wst = threading.Thread(target=ws.run_forever)
wst.daemon = True
wst.start()
conn_timeout = 5
while not ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
msg_counter = 0
while ws.sock.connected:
#ws.send('Hello world %d'%msg_counter)
sleep(1)
msg_counter += 1 | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | non_block_client.py | baset-sarker/python-socket-smt |
from sqlalchemy import *
from migrate import *
import migrate.changeset
def upgrade(migrate_engine):
metadata = MetaData()
metadata.bind = migrate_engine
package_relationship_table = Table('package_relationship',
metadata, autoload=True)
package_relationship_revision_table = Table('package_relationship_revision',
metadata, autoload=True)
state_column = Column('state', UnicodeText)
state_column.create(package_relationship_table)
state_column = Column('state', UnicodeText)
state_column.create(package_relationship_revision_table)
# No package relationship objects exist to migrate, so no
# need to populate state column
def downgrade(migrate_engine):
raise NotImplementedError()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | ckan/migration/versions/019_pkg_relationships_state.py | florianm/ckan |
from task import *
from my_numpy import np, my_list
import copy
# push tests here
def get_standart_der(f1):
def der(u):
der = []
eps = 0.0000001
for i in range(0, len(u)):
u1 = copy.copy(u)
u1[i] = u1[i]+eps
der.append((f1(u1)-f1(u))/eps)
u1[i] = u[i]-eps
return np.array(der)
return der
#test func
# x^2+xy+y^2
def f0(u):
return u[0]*u[0]+u[0]*u[1]+u[1]*u[1]+0.0
def f0der(u):
return np.array([2*u[0]+u[1], 2*u[1]+u[0]])
func0 = Func(f0, f0der)
#0<=x<=1, -1<=y<=0
frames0 = Frames(0, frames_minnimize_function)
frames0.setframes(frames0, np.array([0.0, -1.0]), np.array([1.0,0.0]))
#0x+y<=0 0x-y<=1 x+0y<=1 -x+0y<=0
frames1 = Frames(1, symplex_meyhod_minimize_function)
frames1.setframes(frames1, np.array([[0,1],[0,-1],[1,0],[-1,0]]), np.array([0,1,1,0]))
#job0 = Job(func0, frames0, np.array([1,-1]), alpha_1) #u0=(1,-1)
#job0.check_errors()
#job1 = Job(func0, frames0, np.array([-1,0]), alpha_1) #u0=(-1,0)
#job1.check_errors()
#job2 = Job(func0, frames1, np.array([1, 0]), alpha_1) #u0=(1, 0)
#job2.check_errors()
#job3 = Job(func0, frames0, np.array([0, 0]), alpha_1) #u0=(0, 0)
#job3.check_errors()
def f1(u):
u1 = [(u[i]-i*u[0])*(u[i]-i*u[0])*(u[i]-i*u[0])*(u[i]-i*u[0]) for i in range(len(u))][1:]
return 150*sum(u1) + (u[0]-2)*(u[0]-2)*(u[0]-2)*(u[0]-2)
f1_der = get_standart_der(f1)
print(f1([1,0,0,0,0]))
#ะะฐัะต ะทะฐะดะฐะฝะธะต ะฝะฐ ะฒัะพัะพะน ััะฐะฟ - ะพัะปะฐะดะธัั ะฟัะพะณัะฐะผะผั ะดะปั ัะปะตะดัััะตะณะพ ััะฝะบัะธะพะฝะฐะปะฐ
#J(x) = 150*sum_(i=2)^(6)(x(i)-ix(1))^4+(x(1)-2)^4 ---> min
#g1(x)=sum_(i=1)^(6)(x(i)^2)<=363.
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (e... | 3 | tests.py | ariolwork/uni_conditional_gradient |
#!/usr/bin/env python
"""
A very simple progress bar which keep track of the progress as we consume an
iterator.
"""
import os
import signal
import time
from prompt_toolkit import HTML
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import ProgressBar
def main():
bottom_toolbar = HTML(
' <b>[f]</b> Print "f" <b>[q]</b> Abort <b>[x]</b> Send Control-C.'
)
# Create custom key bindings first.
kb = KeyBindings()
cancel = [False]
@kb.add("f")
def _(event):
print("You pressed `f`.")
@kb.add("q")
def _(event):
"Quit by setting cancel flag."
cancel[0] = True
@kb.add("x")
def _(event):
"Quit by sending SIGINT to the main thread."
os.kill(os.getpid(), signal.SIGINT)
# Use `patch_stdout`, to make sure that prints go above the
# application.
with patch_stdout():
with ProgressBar(key_bindings=kb, bottom_toolbar=bottom_toolbar) as pb:
for i in pb(range(800)):
time.sleep(0.01)
if cancel[0]:
break
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | examples/progress-bar/custom-key-bindings.py | gousaiyang/python-prompt-toolkit |
from drink_robot.controllers.index import bp as index
from drink_robot.controllers.recipe import bp as recipe
from drink_robot.controllers.bottle import bp as bottle
from drink_robot.controllers.pour import bp as pour
import pigpio
def init_pins(app):
if app.config['DEBUG']:
return
gpio = pigpio.pi()
for pin in app.config['PINS'].values():
gpio.set_mode(pin, pigpio.OUTPUT)
gpio.write(pin, 1)
def init_all_blueprints(app):
app.register_blueprint(index)
app.register_blueprint(recipe)
app.register_blueprint(bottle)
app.register_blueprint(pour) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | drink_robot/controllers/__init__.py | cactode/drink_robot |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: (! %p/exported_python_args 2>&1) | FileCheck %s
# pylint: disable=missing-docstring,line-too-long,dangerous-default-value
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return self.callee(x)
# CHECK: While importing SavedModel function 'callee': in input signature:
# CHECK-SAME: Unhandled structured value kind {{.*}} at index path: <value>.1.foo
@tf.function
def callee(self, x, n={'foo': 42}):
return x
if __name__ == '__main__':
common.do_test(TestModule)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py | yage99/tensorflow |
# Toolkit used for Classification
# Importing Libraries
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
# Logistic Regression Classification
def logRegress(X_train, y_train, X_test, y_test):
# Fitting Logistic Regression to the Training set
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Counting accurate responses
cm = numClose(y_test, y_pred)
return [y_pred, cm]
# SVM Classificaiton
def svm(X_train, y_train, X_test, y_test):
# Fitting SVM to the Training set
classifier = SVC(kernel = 'linear', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Counting accurate responses
cm = numClose(y_test, y_pred)
return [y_pred, cm]
# Random Forest Classification
def rfor(X_train, y_train, X_test, y_test):
# Fitting Random Forest Classification to the Training set
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Counting accurate responses
cm = numClose(y_test, y_pred)
return [y_pred, cm]
# Helper Method to Calculate num close
def numClose(y_test, y_pred):
result = 0;
for pos in range(0, len(y_test)):
if(y_test[pos] == y_pred[pos]):
# or
#y_test[pos] == y_pred[pos] + 1 or
#y_test[pos] == y_pred[pos] -1):
result += 1
return result | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | src/NCAAClassification.py | parekhmitchell/NCAA-ML |
from __future__ import annotations
import dataclasses
import logging
from typing import Dict, Text, Any
import rasa.shared.utils.io
from rasa.engine.graph import GraphComponent, ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class RuleOnlyDataProvider(GraphComponent):
"""Provides slots and loops that are only used in rules to other policies.
Policies can use this to exclude features which are only used by rules from the
featurization.
"""
rule_only_data: Dict[Text, Any]
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> RuleOnlyDataProvider:
"""Creates component (see parent class for docstring)."""
rule_only_data = {}
try:
with model_storage.write_to(resource) as directory:
rule_only_data = rasa.shared.utils.io.read_json_file(
directory / "rule_only_data.json"
)
except ValueError:
logger.debug(
"Failed to load rule-only data from a trained 'RulePolicy'. "
"Providing empty rule-only data instead."
)
return cls(rule_only_data)
def provide(self) -> Dict[Text, Any]:
"""Provides data to other graph component."""
return self.rule_only_data
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | rasa/graph_components/providers/rule_only_provider.py | praneethgb/rasa |
from pymongo import MongoClient
import json
from newsapi.database import mongo
class UserModel:
def __init__(self, _id, username, password):
self.id = _id
self.username = username
self.password = password
@classmethod
def find_by_username(cls, username):
result = mongo.db.user.find_one({'username': username})
if result:
user = cls(_id=result['_id'], username=result['username'], password=result['password'])
else:
user = None
return user
@classmethod
def find_by_id(cls, _id):
result = mongo.db.user.find_one({'_id': _id})
if result:
user = cls(_id=result['_id'], username=result['username'], password=result['password'])
else:
user = None
return user
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | newsapi/user/models.py | rubiagatra/news-api |
from . import compat
have_greenlet = False
if compat.py3k:
try:
import greenlet # noqa F401
except ImportError:
pass
else:
have_greenlet = True
from ._concurrency_py3k import await_only
from ._concurrency_py3k import await_fallback
from ._concurrency_py3k import greenlet_spawn
from ._concurrency_py3k import AsyncAdaptedLock
from ._concurrency_py3k import asyncio # noqa F401
if not have_greenlet:
asyncio = None # noqa F811
def _not_implemented():
if not compat.py3k:
raise ValueError("Cannot use this function in py2.")
else:
raise ValueError(
"the greenlet library is required to use this function."
)
def await_only(thing): # noqa F811
return thing
def await_fallback(thing): # noqa F81
return thing
def greenlet_spawn(fn, *args, **kw): # noqa F81
_not_implemented()
def AsyncAdaptedLock(*args, **kw): # noqa F81
_not_implemented()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | lib/sqlalchemy/util/concurrency.py | Dreamsorcerer/sqlalchemy |
from test.utilities.env_vars import set_env_vars
from test.utilities.excel import Excel
def test_simple_script_for_addition(xll_addin_path):
with set_env_vars('basic_functions'):
with Excel() as excel:
excel.register_xll(xll_addin_path)
(
excel.new_workbook()
.range('A1').set(3.0)
.range('A2').set(4.0)
.range('B1').set_formula('=excelbind.execute_python("return arg0 + arg1", A1, A2)')
.calculate()
)
assert excel.range('B1').value == 7.0
print("done testing")
def test_combination_str_n_float(xll_addin_path):
with set_env_vars('basic_functions'):
with Excel() as excel:
excel.register_xll(xll_addin_path)
(
excel.new_workbook()
.range('A1').set("Hello times ")
.range('A2').set(3.0)
.range('B1').set_formula('=excelbind.execute_python("return arg0 + str(arg1)", A1, A2)')
.calculate()
)
assert excel.range('B1').value == 'Hello times 3.0'
print("done testing")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | test/test_execute_python.py | RuneLjungmann/excelbind |
import numpy as np
import matplotlib.pyplot as plt
def train_test_splitter(X, y, ratio = 0.8, random_seed = 0):
assert(len(X) == len(y)), "The number of points in feature matrix and target vector should be the same."
np.random.seed(random_seed)
n = len(y)
idx = np.arange(n)
np.random.shuffle(idx)
train_idx = idx[:int(n * ratio)]
test_idx = idx[int(n * ratio):]
return X[train_idx,:], X[test_idx,:], y[train_idx], y[test_idx]
def error_rate(y, y_predicted):
assert len(y) == len(y_predicted), "The number of targets and predictions should be the same."
assert len(y) != 0, "The number of targets and predictions should not be zero."
return np.sum(np.array(y) != np.array(y_predicted)) / len(y)
def plot_losses(losses, savefig = False, showfig = False, filename = 'loss.png'):
fig = plt.figure(figsize = (12,8))
plt.plot(np.arange(len(losses)), losses, color = 'r', marker = 'o', label = 'Loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Number of Iterations')
if savefig:
fig.savefig(filename, format = 'png', dpi = 600, bbox_inches = 'tight')
if showfig:
plt.show()
plt.close()
return | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | utils.py | leimao/Logistic_Regression_Python |
from flask import render_template, flash, redirect, url_for, request
from flask.views import MethodView
from app.middleware import auth
from app.models.user import User
from app.validators.register_form import RegisterForm
from app.services import avatar_service
class RegisterController(MethodView):
@auth.optional
def get(self):
"""
Show register form
Returns:
Register template with form
"""
return render_template('auth/register.html', form=RegisterForm())
@auth.optional
def post(self):
"""
Handle the POST request and sign up the user if form validation passes
Returns:
A redirect or a template with the validation errors
"""
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if 'avatar' in request.files and request.files['avatar']:
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | app/controllers/auth/register.py | TheSynt4x/flask-blog |
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
class KubeUpgrade(base.Resource):
def __repr__(self):
return "<kube_upgrade %s>" % self._info
class KubeUpgradeManager(base.Manager):
resource_class = KubeUpgrade
@staticmethod
def _path(uuid=None):
return '/v1/kube_upgrade/%s' % uuid if uuid else '/v1/kube_upgrade'
def list(self):
"""Retrieve the list of kubernetes upgrades known to the system."""
return self._list(self._path(), "kube_upgrades")
def get(self, uuid):
"""Retrieve the details of a given kubernetes upgrade.
:param uuid: uuid of upgrade
"""
try:
return self._list(self._path(uuid))[0]
except IndexError:
return None
def create(self, to_version, force):
"""Create a new kubernetes upgrade.
:param to_version: target kubernetes version
:param force: ignore non management-affecting alarms
"""
new = {}
new['to_version'] = to_version
new['force'] = force
return self._create(self._path(), new)
def delete(self):
"""Delete a kubernetes upgrade."""
return self.api.json_request('DELETE', self._path())
def update(self, patch):
"""Update a kubernetes upgrade."""
return self._update(self._path(), patch)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | sysinv/cgts-client/cgts-client/cgtsclient/v1/kube_upgrade.py | albailey/config |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class AcknowledgeTaskResultRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'AcknowledgeTaskResult')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TaskDetailNos(self):
return self.get_query_params().get('TaskDetailNo')
def set_TaskDetailNos(self, TaskDetailNos):
for depth1 in range(len(TaskDetailNos)):
if TaskDetailNos[depth1] is not None:
self.add_query_param('TaskDetailNo.' + str(depth1 + 1) , TaskDetailNos[depth1])
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/AcknowledgeTaskResultRequest.py | yndu13/aliyun-openapi-python-sdk |
import pytest
from django.urls import reverse
from coruscant_django.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.get(username="admin")
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | coruscant_django/users/tests/test_admin.py | Jarbton/coruscant-django |
# the class to build the graph, as already described in main.py
class Graph:
def __init__(self):
self.nodes = set()
self.edges = set()
self.neighboors = {}
self.weight_edges = {}
self.coord_nodes = {}
def add_node(self, node):
self.nodes.add(node)
def add_edge(self, edge):
self.edges.add(edge)
def direct_neighboors(self):
if len(self.neighboors) == 0:
self.neighboors = {node: [] for node in self.nodes}
for item in self.edges:
self.neighboors[item[0]].append(item[1])
def weighted_edges(self, data, type_of_distance):
if len(self.weight_edges) == 0:
self.weight_edges = {edge: {} for edge in self.edges}
for item in data:
self.weight_edges[(item[0], item[1])][type_of_distance] = item[2]
def add_network_distance(self):
for key in self.weight_edges:
self.weight_edges[key]['n'] = 1
def coordinate_nodes(self, data):
if len(self.coord_nodes) == 0:
self.coord_nodes = {node: [] for node in self.nodes}
for item in data:
self.coord_nodes[item[0]].append(item[1])
self.coord_nodes[item[0]].append(item[2])
# implementation from scratch of the queue data structure, using lists
class Queue:
def __init__(self):
self.queue = []
def enqueue(self, x):
self.queue.append(x)
def dequeue(self):
return self.queue.pop(0)
def front(self):
return self.queue[0]
def size(self):
return len(self.queue)
def isEmpty(self):
if self.size() == 0:
return True
else:
return False
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | graph.py | michelemeo/ADM-HW5 |
""" Module containing DocumentPublisher class """
from ..common.common import SectionHandler
# pylint: disable=too-few-public-methods
class DocumentPublisher(SectionHandler):
""" Responsible for converting the DocumentPublisher section:
- /cvrf:cvrfdoc/cvrf:DocumentPublisher
"""
type_category_mapping = {
'Vendor': 'vendor',
'Coordinator': 'coordinator',
'User': 'user',
'Discoverer': 'discoverer',
'Other': 'other',
}
def __init__(self, config):
super().__init__()
self.name = config.get('publisher_name')
self.namespace = config.get('publisher_namespace')
def _process_mandatory_elements(self, root_element):
self.csaf['name'] = self.name
self.csaf['namespace'] = self.namespace
self.csaf['category'] = self.type_category_mapping[root_element.attrib['Type']]
def _process_optional_elements(self, root_element):
# optional values
if hasattr(root_element, 'ContactDetails'):
self.csaf['contact_details'] = root_element.ContactDetails.text
if hasattr(root_element, 'IssuingAuthority'):
self.csaf['issuing_authority'] = root_element.IssuingAuthority.text
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter |
from django.core.management.base import BaseCommand
import random
from faker import Faker
import pandas as pd
from pandas import DataFrame
import time
from BookClub.management.commands.helper import get_top_n_books, get_top_n_users_who_have_rated_xyz_books, get_top_n_books_shifted
from BookClub.models.user import User
class Command(BaseCommand):
"""The database seeder."""
def handle(self, *args, **options):
tic = time.time()
model_instances = self.import_users()
try:
User.objects.bulk_create(model_instances)
except Exception as e:
print(e)
toc = time.time()
total = toc-tic
print('Done in {:.4f} seconds'.format(total))
print(str(len(model_instances)) + " Users created")
def import_users(self):
file_path = ("static/dataset/BX-Users.csv")
data = DataFrame(pd.read_csv(file_path, header=0, encoding= "ISO-8859-1", sep=';'))
isbns = get_top_n_books_shifted(300)
rating_users = get_top_n_users_who_have_rated_xyz_books(1000, isbns)
faker = Faker()
chosen_users = data[data['User-ID'].isin(rating_users)]
chosen_users = chosen_users.to_dict('records')
model_instances = []
i = 0;
for record in chosen_users:
i +=1
Faker.seed(i)
u = User(
pk=i,
username=faker.unique.user_name(),
email=faker.unique.email(),
password='pbkdf2_sha256$260000$qw2y9qdBlYmFUZVdkUqlOO$nuzhHvRnVDDOAo70OL14IEqk+bASVNTLjWS1N+c40VU=',
)
model_instances.append(u)
return model_instances
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": fals... | 3 | BookClub/management/commands/importuserstargeted.py | amir-rahim/BookClubSocialNetwork |
import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoRadioButton2 import *
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.radioButtonMedium.toggled.connect(self.dispSelected)
self.ui.radioButtonLarge.toggled.connect(self.dispSelected)
self.ui.radioButtonXL.toggled.connect(self.dispSelected)
self.ui.radioButtonXXL.toggled.connect(self.dispSelected)
self.ui.radioButtonDebitCard.toggled.connect(self.dispSelected)
self.ui.radioButtonNetBanking.toggled.connect(self.dispSelected)
self.ui.radioButtonCashOnDelivery.toggled.connect(self.dispSelected)
self.show()
def dispSelected(self):
selected1="";
selected2=""
if self.ui.radioButtonMedium.isChecked()==True:
selected1="Medium"
if self.ui.radioButtonLarge.isChecked()==True:
selected1="Large"
if self.ui.radioButtonXL.isChecked()==True:
selected1="Extra Large"
if self.ui.radioButtonXXL.isChecked()==True:
selected1="Extra Extra Large"
if self.ui.radioButtonDebitCard.isChecked()==True:
selected2="Debit/Credit Card"
if self.ui.radioButtonNetBanking.isChecked()==True:
selected2="NetBanking"
if self.ui.radioButtonCashOnDelivery.isChecked()==True:
selected2="Cash On Delivery"
self.ui.labelSelected.setText("Chosen shirt size is "+selected1+" and payment method as " + selected2)
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | Chapter01/callRadioButton2.pyw | houdinii/Qt5-Python-GUI-Programming-Cookbook |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import xml.etree.ElementTree as ET
import lxml.html
from lxml import etree
from lxml.html.soupparser import fromstring
import re
_badtags = set(["script", 'style' , 'iframe'])
_have_a_chars = re.compile(r'\w', re.UNICODE)
_splitter_chars = re.compile(r'[\s,]', re.UNICODE)
def mark_tokens_in_etree(html_text) : # etree doc
token_counter = 0 # ััะพ ััะตััะธะบ
#
html = fromstring(html_text)
body = html.find('.//body')
elements = [ i for i in body.xpath('//*') if bool(i.text or i.tail) and i.text!='' and i.text!='\n']
for el in elements:
if el.tag.lower() in _badtags :
continue
if el.get("class") and ( 'isa_control' in el.get("class") ):
continue
# ัะตัั ะตััั ะปะธ ััั ะฑัะบะฒั
if bool(el.text):
text = el.text
text_l = [i for i in re.split(' |\t|\n', text) if i != '']
token_counter = _subdiv_element_text_to_marked_tokens(el, text_l, token_counter)
if bool(el.tail) and bool(el.tail.strip()):
text = el.tail
_span = etree.Element('span')
el.addnext(_span)
_span.text = ''
_span.tail = ''
el.tail = ''
text_l = [i for i in re.split(' |\t|\n', text) if i != '']
token_counter = _subdiv_element_text_to_marked_tokens(_span, text_l, token_counter)
if el.tag=='br' and bool(el.tail) :
el2 = etree.Element("span")
el.addnext(el2)
el=el2
el.text = '' ; el.tail = ''
s = etree.tostring(html, method='html')
return s
def _subdiv_element_text_to_marked_tokens(el, text_l, counter):
i = counter
span = etree.Element('span')
el.insert(0,span)
for t in text_l:
token = etree.Element('span', {"class": "chunk token_{}".format(i), "data-token-id":"{}".format(i), })
token.text = t
token.append(etree.Entity('nbsp'))
span.append( token )
span.append(etree.Entity('shy'))
i += 1
return i
# test ( tdd )
if __name__ == '__main__':
with open('/tmp/builded.html', 'r') as f:
data = f.read()
data_out = mark_tokens_in_etree(data)
with open('/tmp/builded_2.html', 'w') as fw:
fw.write(data_out.encode('utf-8'))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | docato/docato/Crawlers/LJ/Crawler/crawler_tokenize.py | pavlovma007/docato |
from django.contrib import admin
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from . import filters
from .. import models
@admin.register(models.RefreshToken)
class RefreshTokenAdmin(admin.ModelAdmin):
list_display = ['user', 'token', 'created', 'revoked', 'is_expired']
list_filter = (filters.RevokedFilter, filters.ExpiredFilter)
raw_id_fields = ('user',)
search_fields = ('token',)
actions = ('revoke',)
def revoke(self, request, queryset):
queryset.update(revoked=timezone.now())
revoke.short_description = _('Revoke selected %(verbose_name_plural)s')
def is_expired(self, obj):
return obj.is_expired()
is_expired.boolean = True
is_expired.short_description = _('is expired')
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | ariadne_jwt/refresh_token/admin/__init__.py | abaumg/ariadne-jwt |
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView
from django.views.generic.edit import UpdateView
from django.views.generic import ListView
from django.urls import reverse
from myapp.models import Bike
class BikeDetailView(DetailView):
model = Bike
template_name = 'bike_detail.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['home_url'] = reverse('home')
return context
class BikeCreateView(CreateView):
model = Bike
template_name = 'bike_create.html'
fields = ['type','price','image']
class BikeUpdateView(UpdateView):
model = Bike
template_name = 'bike_create.html'
fields = ['type','price','image']
class BikeListView(ListView):
model = Bike
paginate_by = 3
#queryset=Bike.objects.filter(type='mountain')
template_name = 'bike_list.html' | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | week07/myproject/myapp/views.py | wasit7/dsi202_2021 |
from highcliff.actions.actions import AIaction
class MonitorTemperature(AIaction):
effects = {"problem_with_temperature": False}
preconditions = {}
def behavior(self):
# decide if medication is needed and update the world accordingly
raise NotImplementedError
def __adjustment_needed(self):
# this should be called by custom behavior if it determines that adjustment is needed
self.effects["problem_with_temperature"] = True
class AuthorizeTemperatureAdjustment(AIaction):
effects = {"temperature_adjustment_authorized": True}
preconditions = {"problem_with_temperature": True}
def behavior(self):
# custom behavior must be specified by anyone implementing an AI action
raise NotImplementedError
def __authorization_failed(self):
# this should be by custom behavior if it fails to confirm that the proper maintenance was given
self.effects["temperature_adjustment_authorized"] = False
self.effects["problem_with_temperature"] = True
class AdjustTemperature(AIaction):
effects = {"problem_with_temperature": False}
preconditions = {"temperature_adjustment_authorized": True}
def behavior(self):
# custom behavior must be specified by anyone implementing an AI action
raise NotImplementedError
def __adjustment_failed(self):
# this should be called by custom behavior if it fails to complete the adjustment
self.effects["problems_with_temperature"] = True
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | highcliff/temperature/temperature.py | sermelo/Highcliff-SDK |
import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
items = []
event = threading.Event()
class Pasien(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
while True:
time.sleep(2)
event.wait()
item = items.pop()
logging.info('Pasien notify: {} pasien menuju ruangan bidan {}'\
.format(item, self.name))
class Bidan(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
for i in range(5):
time.sleep(2)
item = random.randint(0, 100)
items.append(item)
logging.info('Bidan notify: Bidan mengecek kandungan pasien {}'\
.format(item, self.name))
event.set()
event.clear()
if __name__ == "__main__":
t1 = Bidan()
t2 = Pasien()
t1.start()
t2.start()
t1.join()
t2.join()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | Chapter02/1194029 Nur Ikhsani Suwandy Futri 3a D4 TI studi_kasus/Event.py | nurikhsanisf/Python-Pararel_SISTER |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.