source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from .. import fixtures
from ..assertions import eq_
from ..schema import Column
from ..schema import Table
from ... import Integer
from ... import String
class SimpleUpdateDeleteTest(fixtures.TablesTest):
run_deletes = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"plain_pk",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
],
)
def test_update(self, connection):
t = self.tables.plain_pk
r = connection.execute(t.update().where(t.c.id == 2), data="d2_new")
assert not r.is_insert
assert not r.returns_rows
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "d1"), (2, "d2_new"), (3, "d3")],
)
def test_delete(self, connection):
t = self.tables.plain_pk
r = connection.execute(t.delete().where(t.c.id == 2))
assert not r.is_insert
assert not r.returns_rows
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "d1"), (3, "d3")],
)
__all__ = ("SimpleUpdateDeleteTest",)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | lib/sqlalchemy/testing/suite/test_update_delete.py | Dreamsorcerer/sqlalchemy |
# Copyright (c) 2020-2022 The PyUnity Team
# This file is licensed under the MIT License.
# See https://docs.pyunity.x10.bz/en/latest/license.html
from pyunity import Behaviour, GameObject, SceneManager, Material, RGB, Mesh, Vector3, MeshRenderer, WaitForSeconds
class Switch(Behaviour):
async def Start(self):
await WaitForSeconds(3)
SceneManager.LoadSceneByIndex(1)
def main():
scene = SceneManager.AddScene("Scene")
scene2 = SceneManager.AddScene("Scene 2")
scene.mainCamera.transform.localPosition = Vector3(0, 0, -10)
scene2.mainCamera.transform.localPosition = Vector3(0, 0, -10)
cube = GameObject("Cube")
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Switch)
scene.Add(cube)
cube2 = GameObject("Cube 2")
renderer = cube2.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(0, 0, 255))
scene2.Add(cube2)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | pyunity/examples/example6/__init__.py | rayzchen/PyUnity |
#!/bin/env dls-python
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # Allows for Python 2/3 compatibility, 'builtins' is namespace for inbuilt functions
else:
import builtins
import unittest
from mock import patch, MagicMock
p = patch('dls_ade.Server')
server_mock = MagicMock()
m = p.start()
m.return_value = server_mock
from dls_ade import dls_list_modules
p.stop()
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = dls_list_modules.make_parser()
def test_parser_understands_domain(self):
args = self.parser.parse_args("-i TS".split())
self.assertEqual(args.area, "ioc")
self.assertEqual(args.domain_name, "TS")
class PrintModuleListTest(unittest.TestCase):
def setUp(self):
self.server_mock = server_mock
def tearDown(self):
self.server_mock.reset_mock()
def test_server_repo_list_called(self):
source = "test/source"
dls_list_modules.get_module_list(source)
self.server_mock.get_server_repo_list.assert_called_once_with(source)
def test_given_valid_source_then_list_of_modules(self):
self.server_mock.get_server_repo_list.return_value = [
"test/source/module", "test/source/module2.git"
]
source = "test/source"
module_list = dls_list_modules.get_module_list(source)
self.assertIsNotNone(module_list)
self.assertListEqual(module_list, ['module', 'module2'])
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | dls_ade/dls_list_modules_test.py | hir12111/dls_ade |
def V(i,j):
return 'V%d_%d' % (i,j)
def domains(Vs):
return [ q + ' in 1..9' for q in Vs ]
def all_different(Qs):
return 'all_distinct([' + ', '.join(Qs) + '])'
def get_column(j):
return [V(i,j) for i in range(9)]
def get_raw(i):
return [V(i,j) for j in range(9)]
def horizontal():
return [ all_different(get_raw(i)) for i in range(9)]
def vertical():
return [all_different(get_column(j)) for j in range(9)]
def block(i):
x = (i // 3) * 3
y = (i % 3) * 3
return all_different([V(i, j) for i in range(x, x + 3) for j in range(y, y + 3)])
def blocks():
return [block(i) for i in range(9)]
def print_constraints(Cs, indent, d):
position = indent
writeln((indent - 1) * ' ')
for c in Cs:
writeln(c + ',')
position += len(c)
if position > d:
position = indent
writeln((indent - 1) * ' ')
def sudoku(assigments):
variables = [ V(i,j) for i in range(9) for j in range(9)]
writeln(':- use_module(library(clpfd)).')
writeln('solve([' + ', '.join(variables) + ']) :- ')
cs = domains(variables) + vertical() + horizontal() + blocks()
for i,j,val in assigments:
cs.append('{} #= {}'.format(V(i,j), val))
print_constraints(cs, 4, 70)
writeln('labeling([ff], [' + ', '.join(variables) + ']).')
writeln('')
writeln(':- solve(X), write(X), nl.')
def writeln(s):
output.write(s + '\n')
txt = open('zad_input.txt').readlines()
output = open('zad_output.txt', 'w')
raw = 0
triples = []
for x in txt:
x = x.strip()
if len(x) == 9:
for i in range(9):
if x[i] != '.':
triples.append( (raw,i,int(x[i])) )
raw += 1
sudoku(triples)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | L3/Z4/main.py | JacekLeja/AI-Projects |
from typing import List
import unittest
from model import tasks
class TestRepository(unittest.TestCase):
def test_list(self):
rep = tasks.Repository()
l = rep.list()
self.assertEqual(len(l), 2)
self.assertEqual(l[0].id, 1)
self.assertEqual(l[0].text, "task1")
self.assertEqual(l[0].done, False)
self.assertEqual(l[1].id, 2)
rep._tasks[0].done = True
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
def test_add(self):
rep = tasks.Repository()
task = tasks.Task(100, "new task")
rep.add(task)
l = rep.list()
self.assertEqual(len(l), 3)
self.assertEqual(l[2].id, 3)
self.assertEqual(l[2].text, "new task")
self.assertEqual(l[2].done, False)
def test_done(self):
rep = tasks.Repository()
rep.done(1)
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | server/tests/test_repository.py | 74th/vscode-book-python |
#
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
from unittest.mock import MagicMock, patch
from monkq.utils.i18n import LazyTranslation
def test_lazytranslation_not_setting() -> None:
with patch("monkq.utils.i18n.gettext", MagicMock()) as mockg:
mockg.find.return_value = None
trans = LazyTranslation()
trans.setup("CN")
trans.gettext("hello")
mockg.NullTranslations().gettext.assert_called()
def test_lazytranslation() -> None:
with patch("monkq.utils.i18n.gettext", MagicMock()) as mockg:
mockg.find.return_value = os.path.abspath(__file__)
trans = LazyTranslation()
trans.setup("CN")
trans.gettext("hello")
mockg.GNUTranslations().gettext.assert_called()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"an... | 3 | tests/util/test_i18n.py | zsluedem/MonkTrader |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index_route():
return render_template('/index.html')
@app.route('/<target>')
@app.route('/<target>.html')
def nav_target_route(target):
return render_template(target+'.html')
if __name__ == '__main__':
app.run() | [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | templates/server.py | CAL-Origami/website |
"""The SRP Energy integration."""
import logging
from srpenergy.client import SrpEnergyClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from .const import SRP_ENERGY_DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup(hass, config):
"""Old way of setting up the srp_energy component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up the SRP Energy component from a config entry."""
# Store an SrpEnergyClient object for your srp_energy to access
try:
srp_energy_client = SrpEnergyClient(
entry.data.get(CONF_ID),
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
)
hass.data[SRP_ENERGY_DOMAIN] = srp_energy_client
except (Exception) as ex:
_LOGGER.error("Unable to connect to Srp Energy: %s", str(ex))
raise ConfigEntryNotReady from ex
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Unload a config entry."""
# unload srp client
hass.data[SRP_ENERGY_DOMAIN] = None
# Remove config entry
await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
return True
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | homeassistant/components/srp_energy/__init__.py | tbarbette/core |
from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
url_name = 'r{}f{}'.format(n_resblocks, n_feats)
if url_name in url:
self.url = url[url_name]
else:
self.url = None
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
# define body module
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
# cd ..(src), export PYTHONPATH=`pwd`
# if __name__ == '__main__':
# import torch
# import utility
# from option import args
# torch.manual_seed(args.seed)
# checkpoint = utility.checkpoint(args)
# print(args)
# model = VDSR(args)
# print(model)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | src/model/vdsr.py | delldu/EDSR |
"""Generates the offset dictionary for the SVO pipeline."""
from time import time as marktime
from typing import List
from itertools import groupby
from collections import defaultdict
def generate_svo_offsets(svo_list: List, time: List, minimum_offsets):
"""Creates offset dictionary and int-to-string lookup for SVO format."""
print("Generating Offsets:")
start = marktime()
svo_dict = defaultdict(list)
for i, svo in enumerate(svo_list):
svo_dict[svo].append(time[i])
svo_int_dict, lookup = text_to_int(svo_dict)
# prune SVOs, excluding those with fewer occurrences than specified by minimum_offsets
offsets = {
k: v for k, v in svo_int_dict.items() if len(v) >= minimum_offsets
}
print("Finished offset generation in {} seconds".format(
round(marktime() - start)))
print("Commencing timestamp deduplication...")
# increment simultaneous occurrences by 1 millisecond to satisfy Kleinberg requirements
for item in offsets.keys():
offsets[item].sort()
offsets[item] = [
g + i * 0.001
for k, group in groupby(offsets[item])
for i, g in enumerate(group)
]
print("finished timestamp deduplication in {} seconds".format(
round(marktime() - start)))
print("Finished Generating Offsets. Returning offset dictionary.")
return offsets, lookup
def text_to_int(svo_dict):
"""Converts SVO terms to integers, and generates a lookup dictionary."""
svo_int_dict = defaultdict(list)
lookup_dict = defaultdict(tuple)
i = 0
for k, v in svo_dict.items():
svo_int_dict[i] = v
lookup_dict[i] = k
i = i + 1
return svo_int_dict, lookup_dict
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | nate/svonet/svo_offsets.py | UWNETLAB/nelanna |
from typing import Dict, List, Any
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
class S3BucketObjectLock(BaseResourceCheck):
def __init__(self) -> None:
name = "Ensure that S3 bucket has lock configuration enabled by default"
id = "CKV_AWS_143"
supported_resources = ["aws_s3_bucket"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
lock_conf = conf.get("object_lock_configuration")
if lock_conf and lock_conf[0]:
lock_enabled = lock_conf[0].get("object_lock_enabled")
if lock_enabled in ["Enabled", ["Enabled"]]:
return CheckResult.PASSED
return CheckResult.FAILED
return CheckResult.UNKNOWN
def get_evaluated_keys(self) -> List[str]:
return ["object_lock_configuration/[0]/object_lock_enabled"]
check = S3BucketObjectLock()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | checkov/terraform/checks/resource/aws/S3BucketObjectLock.py | jamesholland-uk/checkov |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a SearchAlgorithm for resnet example."""
import random
import logging
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.search_space.search_algs import SearchAlgorithm
@ClassFactory.register(ClassType.SEARCH_ALGORITHM)
class RandomSearch(SearchAlgorithm):
"""Random Search."""
def __init__(self, search_space):
super(RandomSearch, self).__init__(search_space)
self.hyper_parameters = search_space.cfg.get('hyper_parameters') or {}
self.count = 0
def search(self):
"""Search a params."""
self.count += 1
params = {}
for param_key, param_values in self.hyper_parameters.items():
params[param_key] = random.choice(param_values)
logging.info("params:%s", params)
return self.count, params
def update_params(self, params):
"""Update params into search_space."""
return None
@property
def is_completed(self):
"""Make trail completed."""
return self.count > 2
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/search_space/fine_grained_space/sample/random_search.py | Huawei-Ascend/modelzoo |
def queens(n):
a = list(range(n))
up = [True]*(2*n - 1)
down = [True]*(2*n - 1)
def sub(i):
if i == n:
yield tuple(a)
else:
for k in range(i, n):
j = a[k]
p = i + j
q = i - j + n - 1
if up[p] and down[q]:
up[p] = down[q] = False
a[i], a[k] = a[k], a[i]
yield from sub(i + 1)
up[p] = down[q] = True
a[i], a[k] = a[k], a[i]
yield from sub(0)
#Count solutions for n=8:
sum(1 for p in queens(8))
92
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"... | 3 | lang/Python/n-queens-problem-6.py | ethansaxenian/RosettaDecode |
#!/usr/bin/env python
# encoding: utf-8
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator:
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
self.val_ = None
self.has_next_ = iterator.hasNext()
self.has_peak_ = False
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.has_peak_:
self.has_peak_ = True
self.val_ = self.iterator.next()
return self.val_
def next(self):
"""
:rtype: int
"""
self.val_ = self.peek()
self.has_peak_ = False
self.has_next_ = self.iterator.hasNext()
return self.val_
def hasNext(self):
"""
:rtype: bool
"""
return self.has_next_
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}... | 3 | Sec24_Design/q0284.py | OctoberChang/LeetCode-Solutions |
import sys
import struct
from math import sqrt
def cross(a, b):
return [
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]
]
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def normalized(a):
s = 1 / sqrt(dot(a, a))
return [ a[0] * s, a[1] * s, a[2] * s ]
def mul(m, a):
return [
dot(m[0], a),
dot(m[1], a),
dot(m[2], a)
]
def opp(a):
return [-a[0], -a[1], -a[2]]
def lookFrom(p):
z = p
x = normalized(cross([0,0,1], z))
y = normalized(cross(z, x))
invp = opp(mul([x, y, z], p))
return [
[x[0], x[1], x[2], invp[0]],
[y[0], y[1], y[2], invp[1]],
[z[0], z[1], z[2], invp[2]],
[0, 0, 0, 1],
]
def write_view_matrix(inputFilename, outputFilepath):
with open(outputFilepath, 'wb') as outFile:
for i, line in enumerate(open(inputFilename, 'r')):
coords = [float(x) for x in line.split()]
if len(coords) != 3:
print("Unable to parse line: %s " % line)
exit(1)
mat = lookFrom(coords)
print(mat)
column_major_data = tuple(mat[i][j] for j in range(4) for i in range(4))
outFile.write(struct.pack("f"*16, *column_major_data))
if __name__ == "__main__":
inputFilename = sys.argv[1] if len(sys.argv) > 1 else "octahedron.xyz"
outputFilepath = sys.argv[2] if len(sys.argv) > 2 else "octahedron_camera.bin"
write_view_matrix(inputFilename, outputFilepath)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | share/scripts/augen_octahedron2camera.py | eliemichel/GrainViewer |
import os
import torch
from weaver.utils.logger import _logger
from weaver.utils.import_tools import import_module
ParticleTransformer = import_module(
os.path.join(os.path.dirname(__file__), 'ParticleTransformer.py'), 'ParT').ParticleTransformer
class ParticleTransformerWrapper(torch.nn.Module):
def __init__(self, **kwargs) -> None:
super().__init__()
self.mod = ParticleTransformer(**kwargs)
@torch.jit.ignore
def no_weight_decay(self):
return {'mod.cls_token', }
def forward(self, points, features, lorentz_vectors, mask):
return self.mod(features, v=lorentz_vectors, mask=mask)
def get_model(data_config, **kwargs):
cfg = dict(
input_dim=len(data_config.input_dicts['pf_features']),
num_classes=len(data_config.label_value),
# network configurations
pair_input_dim=4,
embed_dims=[128, 512, 128],
pair_embed_dims=[64, 64, 64],
num_heads=8,
num_layers=8,
num_cls_layers=2,
block_params=None,
cls_block_params={'dropout': 0, 'attn_dropout': 0, 'activation_dropout': 0},
fc_params=[],
activation='gelu',
# misc
trim=True,
for_inference=False,
)
cfg.update(**kwargs)
_logger.info('Model config: %s' % str(cfg))
model = ParticleTransformerWrapper(**cfg)
model_info = {
'input_names': list(data_config.input_names),
'input_shapes': {k: ((1,) + s[1:]) for k, s in data_config.input_shapes.items()},
'output_names': ['softmax'],
'dynamic_axes': {**{k: {0: 'N', 2: 'n_' + k.split('_')[0]} for k in data_config.input_names}, **{'softmax': {0: 'N'}}},
}
return model, model_info
def get_loss(data_config, **kwargs):
return torch.nn.CrossEntropyLoss()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"ans... | 3 | networks/example_ParticleTransformer.py | jet-universe/particle_transformer |
import pytest
from django.urls import resolve, reverse
from car_rental.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | car_rental/car_rental/users/tests/test_drf_urls.py | TheHaRyPL/Car-rental |
# -*- coding: utf-8 -*-
import cherrypy
__all__ = ['Jinja2Tool']
class Jinja2Tool(cherrypy.Tool):
def __init__(self):
cherrypy.Tool.__init__(self, 'before_finalize',
self._render,
priority=10)
def _render(self, template=None, debug=False):
"""
Applied once your page handler has been called. It
looks up the template from the various template directories
defined in the Jinja2 plugin then renders it with
whatever dictionary the page handler returned.
"""
if cherrypy.response.status > 399:
return
# retrieve the data returned by the handler
data = cherrypy.response.body or {}
template = cherrypy.engine.publish("lookup-template", template).pop()
if template and isinstance(data, dict):
cherrypy.response.body = template.render(**data) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | jinja2tool.py | sborgeson/building-data-analysis |
# Add common test utils, which can be used by all test scripts..
from netaddr import IPNetwork
def assertEqual(a, b, error_msg):
'''Assert with error msg'''
assert (a == b), error_msg
def get_ip_list_from_prefix(prefix):
return map(str, IPNetwork(prefix).iter_hosts())
def get_min_max_ip_from_prefix(prefix):
ip_list = get_ip_list_from_prefix(prefix)
min_ip = ip_list[0]
max_ip = ip_list[-1]
return [min_ip, max_ip]
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | tcutils/test_lib/test_utils.py | lmadhusudhanan/contrail-test |
import pytest
from homework import tasks
from homework.models import AnswerCrossCheck
pytestmark = [pytest.mark.django_db]
def test_crosschecks_are_created(question_dispatcher):
question_dispatcher()
assert AnswerCrossCheck.objects.count() == 2
def test_question_method_does_the_same(question):
question.dispatch_crosscheck(answers_per_user=1)
assert AnswerCrossCheck.objects.count() == 2
def test_task_does_the_same(question):
tasks.disptach_crosscheck.delay(question_id=question.pk, answers_per_user=1)
assert AnswerCrossCheck.objects.count() == 2
def test_email_is_sent(question_dispatcher, send_mail, mocker, answers):
question_dispatcher()
assert send_mail.call_count == 2
send_mail.assert_has_calls([
mocker.call(
to=answers[0].author.email,
template_id='new-answers-to-check',
disable_antispam=True,
ctx={
'answers': [
{
'url': mocker.ANY,
'text': mocker.ANY,
},
],
},
),
])
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | src/homework/tests/cross_check/tests_question_crosscheck_dispatcher.py | denkasyanov/education-backend |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import errno
import logging
import os.path
from hadoop import conf
from hadoop import confparse
_SSL_SITE_PATH = None # Path to ssl-client.xml
_SSL_SITE_DICT = None # A dictionary of name/value config options
_CNF_TRUSTORE_LOCATION = 'ssl.client.truststore.location'
LOG = logging.getLogger(__name__)
def reset():
global _SSL_SITE_DICT
_SSL_SITE_DICT = None
def get_conf():
if _SSL_SITE_DICT is None:
_parse_ssl_client_site()
return _SSL_SITE_DICT
def _parse_ssl_client_site():
global _SSL_SITE_DICT
global _SSL_SITE_PATH
for indentifier in conf.HDFS_CLUSTERS.get():
try:
_SSL_SITE_PATH = os.path.join(conf.HDFS_CLUSTERS[indentifier].HADOOP_CONF_DIR.get(), 'ssl-client.xml')
data = file(_SSL_SITE_PATH, 'r').read()
break
except KeyError:
data = ""
except IOError as err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (_SSL_SITE_PATH, err))
return
# Keep going and make an empty ConfParse
data = ""
_SSL_SITE_DICT = confparse.ConfParse(data)
def get_trustore_location():
return get_conf().get(_CNF_TRUSTORE_LOCATION)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | desktop/libs/hadoop/src/hadoop/ssl_client_site.py | sandeepreddy3647/hue-1 |
from talon import Module, screen, ui, actions
from talon.engine import engine
import os
import platform
active_platform = platform.platform(terse=True)
mod = Module()
@mod.action_class
class Actions:
def screenshot():
'''takes a screenshot of the entire screen and saves it to the desktop as screenshot.png'''
img = screen.capture_rect(screen.main_screen().rect)
path = os.path.expanduser(os.path.join('~', 'Desktop', 'screenshot.png'))
img.write_file(path)
def screenshot_window():
'''takes a screenshot of the current window and says it to the desktop as screenshot.png'''
img = screen.capture_rect(ui.active_window().rect)
path = os.path.expanduser(os.path.join('~', 'Desktop', 'screenshot.png'))
img.write_file(path)
def screenshot_selection():
'''triggers an application is capable of taking a screenshot of a portion of the screen'''
if "Windows-10" in active_platform:
actions.key("super-shift-s")
elif "Darwin" in active_platform:
actions.key("ctrl-shift-cmd-4")
def screenshot_clipboard():
'''takes a screenshot of the entire screen and saves it to the clipboard'''
if "Windows-10" in active_platform:
engine.mimic("press print screen")
elif "Darwin" in active_platform:
actions.key("ctrl-shift-cmd-3")
def screenshot_window_clipboard():
'''takes a screenshot of the window and saves it to the clipboard'''
if "Windows-10" in active_platform:
engine.mimic("press control alt print screen")
elif "Darwin" in active_platform:
actions.key("ctrl-shift-cmd-4")
actions.sleep("50ms")
actions.key("space") | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | code/screenshot.py | frenchy64/knausj_talon |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import re
from trac.upgrades import backup_config_file
def do_upgrade(env, version, cursor):
"""Change [notification] ticket_subject_template and [notification]
batch_subject_template to use syntax compatible with Jinja2.
"""
config = env.config
section = 'notification'
re_template_var = re.compile(r'\$([\w.]+)')
def update_template(name):
old_value = config.get(section, name)
if old_value:
if re_template_var.match(old_value):
new_value = re_template_var.sub(r'${\1}', old_value)
env.log.info("Replaced value of [%s] %s: %s -> %s",
section, name, old_value, new_value)
config.set(section, name, new_value)
return True
return False
updated = update_template('ticket_subject_template')
updated |= update_template('batch_subject_template')
if updated:
backup_config_file(env, '.db45.bak')
config.save()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exc... | 3 | trac/upgrades/db45.py | NetSpida/trac |
"""
Module to create a file with the file sizes
From : http://mayankjohri.wordpress.com/2008/07/02/create-list-of-files-in-a-dir-tree/
"""
import os
import sys
from argparse import ArgumentParser
LIST_FILE = "file_sizes.txt"
def get_filesize(path):
""" Module to list file sizes"""
file_list = []
for root, _ , files in os.walk(path):
for file_name in files:
file_path = os.path.join(root, file_name)
file_size = os.path.getsize(file_path)
file_list.append((file_path, file_size))
file_list.sort()
return file_list
def create_filelist(list_filename=LIST_FILE, path="."):
""" Module to create a file list"""
try:
list_file = open(list_filename, 'w')
except IOError:
print("Unable to open file: ", list_filename)
return None
for file_path, size in get_filesize(path):
list_file.write(" ".join((file_path, str(size), "\n")))
list_file.close()
def create_arguments(parser):
""" Adding arguments to the parser"""
parser.add_argument("-f", "--list_file", help="file to print the list to")
parser.add_argument("-p", "--path", help="path to the files")
def main():
""" Main program wrapper"""
parser = ArgumentParser(description='Process arguments')
create_arguments(parser)
args = parser.parse_args()
if not args.list_file or not args.path:
print("Please Eneter the list_file and path.\n Exitting...")
print(args.list_file, args.path)
return
list_file = args.list_file
path = args.path
print("list_file", list_file, 'path', path)
create_filelist(list_file, path)
if __name__ == "__main__":
sys.exit(main())
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | dist/server/list_filesize.py | teru01/AStream |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import make_response, render_template
from app import app, ENV
@app.errorhandler(404)
def not_found(error):
error = "404 Not found"
return make_response(
render_template("error.html", title = ENV["sitename"], error = error),
404
)
@app.errorhandler(400)
def bad_request(error):
error = "400 Bad Request"
return make_response(
render_template("error.html", title = ENV["sitename"], error = error),
400
)
@app.errorhandler(500)
def server_error(error):
error = "500 Internal server error"
return make_response(
render_template("error.html", title = ENV["sitename"], error = error),
500
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | app/errorHandlers.py | L-Nafaryus/elnafo |
import cpboard
import periphery
import pytest
import smbus
import sys
def pytest_addoption(parser):
group = parser.getgroup('i2cslave')
group.addoption("--bus", dest='i2cbus', type=int, help='I2C bus number')
group.addoption("--serial-wait", default=20, dest='serial_wait', type=int, help='Number of milliseconds to wait before checking board output (default: 20ms)')
group.addoption("--smbus-timeout", default=True, dest='smbus_timeout', type=bool, help='Use SMBUS timeout limit (default: True)')
@pytest.fixture(scope='session')
def board(request):
board = cpboard.CPboard.from_try_all(request.config.option.boarddev)
board.open()
board.repl.reset()
return board
class I2C:
def __init__(self, bus):
self.bus = periphery.I2C('/dev/i2c-%d' % bus)
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def close(self):
self.bus.close()
def transfer(self, address, messages):
#__tracebackhide__ = True # Hide this from pytest traceback
self.bus.transfer(address, messages)
Message = periphery.I2C.Message
def read(self, address, n):
data = [0] * n
msgs = [I2C.Message(data, read=True)]
self.transfer(address, msgs)
return msgs[0].data
def write(self, address, data):
msgs = [I2C.Message(data)]
self.transfer(address, msgs)
def write_read(self, address, data, n):
recv = [0] * n
msgs = [I2C.Message(data), I2C.Message(recv, read=True)]
self.transfer(address, msgs)
return msgs[1].data
@pytest.fixture
def i2cbus(request):
return I2C(request.config.option.i2cbus)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | tests/i2cslave/conftest.py | notro/cp-smbusslave |
import osgtest.library.core as core
import osgtest.library.files as files
import osgtest.library.mysql as mysql
import osgtest.library.osgunittest as osgunittest
import osgtest.library.service as service
class TestStopSlurm(osgunittest.OSGTestCase):
def slurm_reqs(self):
core.skip_ok_unless_installed(*core.SLURM_PACKAGES)
def test_01_stop_slurm(self):
self.slurm_reqs()
self.skip_ok_unless(core.state['%s.started-service' % core.config['slurm.service-name']], 'did not start slurm')
service.check_stop(core.config['slurm.service-name']) # service requires config so we stop it first
service.check_stop(core.config['slurm.ctld-service-name'])
files.restore(core.config['slurm.config'], 'slurm')
files.restore(core.config['cgroup.config'], 'slurm')
files.restore(core.config['cgroup_allowed_devices_file.conf'], 'slurm')
def test_02_stop_slurmdbd(self):
self.slurm_reqs()
core.skip_ok_unless_installed('slurm-slurmdbd')
self.skip_ok_unless(core.state['slurmdbd.started-service'], 'did not start slurmdbd')
# service requires config so we stop it first; use stop() since slurmdbd fails to remove pid file
service.stop('slurmdbd')
files.restore(core.config['slurmdbd.config'], 'slurm')
mysql.check_execute("drop database %s; " % core.config['slurmdbd.name'] + \
"drop user %s;" % core.config['slurmdbd.user'],
'drop mysql slurmdb')
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | osgtest/tests/test_740_slurm.py | djw8605/osg-test |
#!/usr/bin/env python3
# coding: utf-8
import numpy as np
from .ddfa import _parse_param
from .params import u_filter, w_filter, w_exp_filter, std_size, param_mean, param_std
def reconstruct_paf_anchor(param, whitening=True):
if whitening:
param = param * param_std + param_mean
p, offset, alpha_shp, alpha_exp = _parse_param(param)
anchor = p @ (u_filter + w_filter @ alpha_shp + w_exp_filter @ alpha_exp).reshape(3, -1, order='F') + offset
anchor[1, :] = std_size + 1 - anchor[1, :]
return anchor[:2, :]
def gen_offsets(kernel_size):
offsets = np.zeros((2, kernel_size * kernel_size), dtype=np.int)
ind = 0
delta = (kernel_size - 1) // 2
for i in range(kernel_size):
y = i - delta
for j in range(kernel_size):
x = j - delta
offsets[0, ind] = x
offsets[1, ind] = y
ind += 1
return offsets
def gen_img_paf(img_crop, param, kernel_size=3):
"""Generate PAF image
img_crop: 120x120
kernel_size: kernel_size for convolution, should be even number like 3 or 5 or ...
"""
anchor = reconstruct_paf_anchor(param)
anchor = np.round(anchor).astype(np.int)
delta = (kernel_size - 1) // 2
anchor[anchor < delta] = delta
anchor[anchor >= std_size - delta - 1] = std_size - delta - 1
img_paf = np.zeros((64 * kernel_size, 64 * kernel_size, 3), dtype=np.uint8)
offsets = gen_offsets(kernel_size)
for i in range(kernel_size * kernel_size):
ox, oy = offsets[:, i]
index0 = anchor[0] + ox
index1 = anchor[1] + oy
p = img_crop[index1, index0].reshape(64, 64, 3).transpose(1, 0, 2)
img_paf[oy + delta::kernel_size, ox + delta::kernel_size] = p
return img_paf
def main():
pass
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding s... | 3 | utils/paf.py | lexical-kenobi/Face-Vision-3D_Pose |
import unittest
import tkinter
from test.support import requires, swap_attr
from tkinter.test.support import AbstractDefaultRootTest
from tkinter.simpledialog import Dialog, askinteger
requires('gui')
class DefaultRootTest(AbstractDefaultRootTest, unittest.TestCase):
def test_askinteger(self):
@staticmethod
def mock_wait_window(w):
nonlocal ismapped
ismapped = w.master.winfo_ismapped()
w.destroy()
with swap_attr(Dialog, 'wait_window', mock_wait_window):
ismapped = None
askinteger("Go To Line", "Line number")
self.assertEqual(ismapped, False)
root = tkinter.Tk()
ismapped = None
askinteger("Go To Line", "Line number")
self.assertEqual(ismapped, True)
root.destroy()
tkinter.NoDefaultRoot()
self.assertRaises(RuntimeError, askinteger, "Go To Line", "Line number")
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
... | 3 | resources/WPy32/python-3.10.2/Lib/tkinter/test/test_tkinter/test_simpledialog.py | eladkarako/yt-dlp_kit |
import torch
from torch import nn
# LapSRN, 拉普拉斯金字塔结构, 2017
class LapSRN(nn.Module):
def __init__(self, fea_chan=64, scale=2, conv_num=3):
super(LapSRN, self).__init__()
self.level_num = int(scale/2)
self.share_ski_upsample = nn.ConvTranspose2d(
1, 1, 4, stride=scale, padding=1)
self.input_conv = nn.Conv2d(1, fea_chan, 3, padding=1)
seq = []
for _ in range(conv_num):
seq.append(nn.Conv2d(fea_chan, fea_chan, 3, padding=1))
seq.append(nn.LeakyReLU(0.2, True))
self.share_embedding = nn.Sequential(*seq)
self.share_fea_upsample = nn.ConvTranspose2d(
fea_chan, fea_chan, 4, stride=scale, padding=1)
self.share_output_conv = nn.Conv2d(fea_chan, 1, 3, padding=1)
def forward(self, img):
tmp = self.input_conv(img)
for _ in range(self.level_num):
skip = self.share_ski_upsample(img)
img = self.share_embedding(tmp)
img = self.share_fea_upsample(img)
tmp = img
img = self.share_output_conv(img)
img = img+skip
return img
class L1_Charbonnier_loss(torch.nn.Module):
"""L1 Charbonnierloss."""
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.mean(error)
return loss
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | Networks/LapSRN/LapSRN.py | ZFhuang/DiveIntoDLSketches |
class Backend:
async def on_start(self, app):
pass
async def on_shutdown(self, app):
pass
def prepare_context(self, ctx):
pass
async def perform_updates_request(self, submit_update):
raise NotImplementedError
async def perform_send(self, target_id, message, attachments, kwargs):
raise NotImplementedError
async def perform_api_call(self, method, kwargs):
raise NotImplementedError
@classmethod
def get_identity(cls):
return cls.__name__.lower()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | kutana/backend.py | sakost/kutana |
from typing import Callable, Any
opts = {}
class Registry(object):
def __init__(self, key: str):
self.key = key
def __call__(self, func: Callable[[], Any]) -> Callable[[], Any]:
def wrapper(*args, **kwargs) -> Any:
key = args[0].__class__.__name__
x = func(*args, **kwargs)
opts[key] = x
return x
return wrapper
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | lm/utils/registry.py | akashmehra/nlp |
from . import FixtureTest
class AirportIataCodes(FixtureTest):
def test_sfo(self):
# San Francisco International
self.load_fixtures(['https://www.openstreetmap.org/way/23718192'])
self.assert_has_feature(
13, 1311, 3170, 'pois',
{'kind': 'aerodrome', 'iata': 'SFO'})
def test_oak(self):
# Oakland airport
self.load_fixtures(['https://www.openstreetmap.org/way/54363486'])
self.assert_has_feature(
13, 1314, 3167, 'pois',
{'kind': 'aerodrome', 'iata': 'OAK'})
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},
... | 3 | integration-test/398-airport-iata-codes.py | nextzen/vector-datasource |
"""Tests for module plot for visualization """
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib
matplotlib.use('Agg')
def test_plot1D_mat():
import ot
n_bins = 100 # nb bins
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
def test_plot2D_samples_mat():
import ot
n_bins = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
xs = ot.datasets.get_2D_samples_gauss(n_bins, mu_s, cov_s)
xt = ot.datasets.get_2D_samples_gauss(n_bins, mu_t, cov_t)
G = 1.0 * (np.random.rand(n_bins, n_bins) < 0.01)
ot.plot.plot2D_samples_mat(xs, xt, G, thr=1e-5)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | test/test_plot.py | dohmatob/POT |
import typing
import strawberry
def test_fetch_entities():
global Product
@strawberry.federation.type(keys=["upc"])
class Product:
upc: str
@classmethod
def resolve_reference(cls, upc):
return Product(upc)
@strawberry.federation.type(extend=True)
class Query:
@strawberry.field
def top_products(self, first: int) -> typing.List[Product]:
return []
schema = strawberry.federation.Schema(query=Query)
query = """
query ($representations: [_Any!]!) {
_entities(representations: $representations) {
... on Product {
upc
}
}
}
"""
result = schema.execute_sync(
query,
variable_values={
"representations": [{"__typename": "Product", "upc": "B00005N5PF"}]
},
)
assert not result.errors
assert result.data == {"_entities": [{"upc": "B00005N5PF"}]}
del Product
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"... | 3 | tests/federation/test_entities.py | patrick91/strawberry |
from sklearn.svm import SVC
class Estimator:
@staticmethod
def fit(train_x, train_y):
return SVC(probability=True).fit(train_x, train_y)
@staticmethod
def predict(trained, test_x):
return trained.predict(test_x) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | utils/trainer.py | Vitalii36/full-cycle_ML_Solution |
from perfuserBench import microbench
def test_params_product():
d = { "a": [1,2,3], "b": [4,5,6] }
p = microbench.dict_product(d)
x = [i for i in p]
assert(len(x) == 9)
def test_params_product_empty():
p = microbench.dict_product({})
x = [i for i in p]
assert(len(x) == 1) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | nosetests/test_params.py | giraldeau/perfuser-bench |
import logging
from openstack_internal.nova.hypervisor_details import OSHypervisor
from topology.link import Link
from topology.node import Node
LOG = logging.getLogger(__name__)
class Server(Node):
def __init__(self, int_id: int, hypervisor: OSHypervisor):
super().__init__(int_id=int_id, _id=hypervisor.get_id(), name=hypervisor.get_name(), is_switch=False)
print(f"Server Name: {self.name}")
self.cpu = hypervisor.get_available_vcpus()
self.hdd = hypervisor.get_available_disk_gb()
self.ram = hypervisor.get_available_ram_mb()
self.in_links: Link or None = None
self.out_links: Link or None = None
def add_in_link(self, link: Link):
self.in_links = link
def add_out_link(self, link: Link):
self.out_links = link
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | app/topology/server.py | kukkalli/orchestrator |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the blocksdir option.
"""
import os
import shutil
from test_framework.test_framework import bitcoinRTestFramework, initialize_datadir
class BlocksdirTest(bitcoinRTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.stop_node(0)
shutil.rmtree(self.nodes[0].datadir)
initialize_datadir(self.options.tmpdir, 0)
self.log.info("Starting with non exiting blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))
os.mkdir(blocksdir_path)
self.log.info("Starting with exiting blocksdir ...")
self.start_node(0, ["-blocksdir=" + blocksdir_path])
self.log.info("mining blocks..")
self.nodes[0].generate(10)
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat"))
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index"))
if __name__ == '__main__':
BlocksdirTest().main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | test/functional/feature_blocksdir.py | fancywarlock/bitcoinr |
import pytest
from core.models import UL_ORG_ADMIN
from sushi.models import CounterReportType, SushiCredentials
from organizations.tests.conftest import organizations # noqa
from publications.tests.conftest import platforms # noqa
from logs.tests.conftest import report_type_nd # noqa
@pytest.fixture()
def counter_report_type_named(report_type_nd):
def fn(name, version=5):
rt = report_type_nd(0, short_name=name + 'rt')
return CounterReportType.objects.create(
code=name, counter_version=version, name=name + ' title', report_type=rt
)
yield fn
@pytest.fixture()
def counter_report_type(report_type_nd):
report_type = report_type_nd(0)
yield CounterReportType.objects.create(
code='TR', counter_version=5, name='Title report', report_type=report_type
)
@pytest.fixture()
def counter_report_type_wrap_report_type(report_type_nd):
def fun(report_type, code='TR', counter_version=5, name='Title report'):
return CounterReportType.objects.create(
code=code, counter_version=counter_version, name=name, report_type=report_type
)
return fun
@pytest.fixture()
def credentials(organizations, platforms):
credentials = SushiCredentials.objects.create(
organization=organizations[0],
platform=platforms[0],
counter_version=5,
lock_level=UL_ORG_ADMIN,
url='http://a.b.c/',
)
yield credentials
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | apps/sushi/tests/conftest.py | techlib/czechelib-stats |
# -*- test-case-name: cowrie.test.utils -*-
# Copyright (c) 2017 Michel Oosterhof <michel@oosterhof.net>
# See the COPYRIGHT file for more information
from os import environ
from twisted.logger import textFileLogObserver
from twisted.python import logfile
from cowrie.core.config import CowrieConfig
class CowrieDailyLogFile(logfile.DailyLogFile):
"""
Overload original Twisted with improved date formatting
"""
def suffix(self, tupledate):
"""
Return the suffix given a (year, month, day) tuple or unixtime
"""
try:
return "{:02d}-{:02d}-{:02d}".format(
tupledate[0], tupledate[1], tupledate[2]
)
except Exception:
# try taking a float unixtime
return "_".join(map(str, self.toDate(tupledate)))
def logger():
dir = CowrieConfig().get("honeypot", "log_path", fallback="log")
logfile = CowrieDailyLogFile("cowrie.log", dir)
# use Z for UTC (Zulu) time, it's shorter.
if "TZ" in environ and environ["TZ"] == "UTC":
timeFormat = "%Y-%m-%dT%H:%M:%S.%fZ"
else:
timeFormat = "%Y-%m-%dT%H:%M:%S.%f%z"
return textFileLogObserver(logfile, timeFormat=timeFormat)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | src/cowrie/python/logfile.py | GreyNoise-Intelligence/cowrie |
"""QuizSubmissionFiles API Tests for Version 1.0.
This is a testing template for the generated QuizSubmissionFilesAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.quiz_submission_files import QuizSubmissionFilesAPI
class TestQuizSubmissionFilesAPI(unittest.TestCase):
"""Tests for the QuizSubmissionFilesAPI."""
def setUp(self):
self.client = QuizSubmissionFilesAPI(
secrets.instance_address, secrets.access_token
)
def test_upload_file(self):
"""Integration test for the QuizSubmissionFilesAPI.upload_file method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | py3canvas/tests/quiz_submission_files.py | tylerclair/py3canvas |
import subprocess
import humanize
import random
from datetime import datetime
from discord.ext import commands
class Others(commands.Cog, name='Others'):
"""
Others module
Other random commands that don't fit neatly into one of the other modules
"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def cookies(self, ctx):
"""
Info on Girl Scout cookies!
"""
choices = [
'You know you want some cookies!',
'COOOKIIEEES OM NOM NOM NOM',
'Get some!']
await ctx.reply(content=f'{random.choice(choices)}\n'
'https://digitalcookie.girlscouts.org/scout/julia229799')
@commands.command()
async def uptime(self, ctx):
"""
Reports uptime and some statistics about the bot's runtime.
"""
uptime = datetime.now() - self.bot.start_time
uptime_str = humanize.precisedelta(uptime, minimum_unit='seconds')
result = subprocess.run(['git','log','--format="%C(auto) %h %s"','-1'],
capture_output=True)
git_str = result.stdout.decode('ascii')
response = f'TLMBot has been up for {uptime_str}. It is currently running from commit {git_str}'
await ctx.reply(content=response)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | bot/others.py | timwoj/tlmbot |
#!/usr/bin/env python3
# coding: utf-8
r"""
范围类型。
::
+-> Container: obj.__contains__(self, item) # item in obj
|
+-> Sized: obj.__len__(self) # len(obj)
|
+-> Iterable: obj.__iter__(self) # iter(obj)
|
+-> Collection
|
| +-> Iterable: obj.__iter__(self) # iter(obj)
| |
+-> Reversible: obj.__reversed__(self) # reversed(obj)
|
+-> Sequence: obj.__getitem__(self, index) # obj[index]
| obj.count(self, value)
| obj.index(self, value, start=0, stop=None)
|
range
Notes
-----
- `范围类型 <https://docs.python.org/zh-cn/3/library/stdtypes.html#ranges>`_
"""
__version__ = '2020.09.27'
__since__ = '2020.09.24'
__author__ = 'zhengrr'
__license__ = 'UNLICENSE'
from typing import Sequence
def test_issubclass() -> None:
assert issubclass(range, Sequence)
def test_literal() -> None:
v = range(1337)
assert isinstance(v, range)
v = range(0, 1337, 1)
assert isinstance(v, range)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer... | 3 | rrpython/tests/types/test_range.py | afoolsbag/rrPython |
"""Set of functions to smooth model handling for templates that create
Python code
"""
import yacg.model.model as model
import yacg.util.stringUtils as stringUtils
def getDefaultPythonValue(propertyObj):
if propertyObj.isArray:
return '[]'
else:
if propertyObj.default is not None:
return getPythonValueForType(propertyObj.type, propertyObj.default)
else:
return None
def getPythonValueForType(type, value):
if (value is None):
return 'None'
if (value == 'None'):
return 'None'
if (isinstance(type, model.BooleanType)):
if (value == 'true'):
return 'True'
else:
return 'False'
elif (isinstance(type, model.EnumType)):
return '''{}.{}'''.format(type.name, stringUtils.toUpperCaseName(value))
elif (isinstance(type, model.StringType)):
return '''{}'''.format(str(value))
else:
return str(value)
def getExtendsType(type, modelTypes, baseModelDomain):
return getTypeWithPackage(type.extendsType, modelTypes, baseModelDomain)
def getTypeWithPackage(type, modelTypes, baseModelDomain):
for t in modelTypes:
if (t.name == type.name):
sameSource = True
if (hasattr(type, 'source') and (hasattr(t, 'source'))):
if type.source != t.source:
sameSource = False
if sameSource:
return type.name
return getTypeWithPackageEnforced(type, baseModelDomain)
def getTypeWithPackageEnforced(type, baseModelDomain):
if baseModelDomain is None:
return type.name
elif (type.domain is not None) and (type.domain != baseModelDomain):
return '{}.{}'.format(type.domain, type.name)
else:
return type.name
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exclud... | 3 | yacg/generators/helper/pythonFuncs.py | kylape/yacg |
import pytest
from pock.behaviour import Behaviour, ValueResult
from pock.mock import Mock
from pock.verification import VerificationBuilder, VerificationError
@pytest.fixture
def mock():
return Mock()
@pytest.fixture
def verification_builder(mock):
return VerificationBuilder(mock, lambda result: len(result) > 0, 'error')
def test_verification_builder_raises_verification_error_when_not_called(verification_builder):
""" :type verification_builder: VerificationBuilder"""
with pytest.raises(VerificationError):
verification_builder.something(1)
def test_accessing_a_property_returns_a_list_of_invocations(verification_builder, mock):
"""
:type verification_builder: VerificationBuilder
:type mock: Mock
"""
mock._add_property_behaviour(Behaviour('property', result=ValueResult(None)))
mock.property
assert verification_builder.property.results == ['property']
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | tests/integration/verification_integration_test.py | atbentley/pock |
import pandas as pd
import sanalytics.estimators.pu_estimators as pu
from gensim.models.doc2vec import Doc2Vec
import sanalytics.evaluation.evaluation_metric as see
from progressbar import progressbar
import sanalytics.algorithms.utils as sau
from time import time
import numpy as np
## Read threshold
arg = sys.argv[1].split("|")
t = float(arg[0])
name = arg[1]
## Import Data
X_test = pd.read_parquet("datasets/rq3_data/sec1.0_test.parquet")
## Import D2V
d2v = Doc2Vec.load("datasets/rq3_d2v/{}.model".format(name))
## In pos set
def pos_set(str):
if "|" in str: return False
if "sse" in str: return True
if "set1" in str: return True
if "set2" in str: return True
## Predict functions
def predict(post, thresh, d2v):
vec = d2v.infer_vector("{} {} {}".format(post.title, post.question, post.answers).split())
sims = d2v.docvecs.most_similar([vec], topn=1000)
return min(len([i for i in sims if pos_set(i[0]) and i[1] > thresh]), 1)
## Columns
c_90 = ["variation", "classifier", "test_set", "recall", "prec_lower", "prec_opt", "f1_lower", "f1_opt", "f_measure", "eval_time"]
## Test set
results_90 = []
start_pred = time()
X_test["preds"] = [predict(i, t, d2v) for i in progressbar(X_test.itertuples())]
end_pred = time()
results = see.evaluate_metrics(X_test[X_test.label=="security"].preds, X_test.preds)
results_90.append(["sec1.0", "d2v_baseline_{}_{}".format(t, name), "test"] + list(results) + [end_pred - start_pred])
df_90 = pd.DataFrame(results_90, columns=c_90)
df_90.to_csv("analysis/test_models/d2v_baseline/preds/d2v_baseline_{}_{}_90.csv".format(t, name), index=False)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | Code/analysis/psf_baseline/predict.py | lhmtriet/PUMiner_MSR |
# Counting Sundays
# Problem 19
# You are given the following information, but you may prefer to do some research for yourself.
#
# 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
#
# Classic calendar problem. Simply a matter of implementation.
MONTH_DICTIONARY = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
MONTH_COUNT = 12
YEAR_INIT = 1900
YEAR_START = 1901
YEAR_END = 2000
WEEKDAY_START = 1
def is_leap_year(year_num):
if year_num % 4 == 0 and (year_num % 100 != 0 or year_num % 400 == 0):
return True
def get_month_duration(month, year_num):
if month != 2:
return MONTH_DICTIONARY.get(month)
else:
if is_leap_year(year_num):
return MONTH_DICTIONARY.get(month) + 1
else:
return MONTH_DICTIONARY.get(month)
def run():
week_counter = WEEKDAY_START
year = YEAR_INIT
sundays = 0
while year < YEAR_END + 1:
for month in range(1, MONTH_COUNT + 1):
week_counter = week_counter + get_month_duration(month, year)
if week_counter % 7 == 0:
sundays += 1
# Don't count year 1900
if year == 1900:
sundays = 0
year += 1
print("The total number of Sundays from Jan {0} to Dec {1} is {2}".format(YEAR_START, YEAR_END, sundays))
return 0
# Sample Output:
# The total number of Sundays from Jan 1901 to Dec 2000 is 171
#
# Total running time for Problem19.py is 0.0007747151312018198 seconds
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | src/scripts/Problem19.py | YangLuGitHub/Euler |
"""
Some useful utility functions.
"""
import contextlib
import importlib
import sys
from typing import List, Optional, Tuple
@contextlib.contextmanager
def saved_sys_path():
"""
Contextmanager that will restore the value
of :data:`sys.path` when leaving the ``with``
block.
"""
orig_path = list(sys.path)
try:
yield
finally:
sys.path[:] = orig_path
importlib.invalidate_caches()
def split_package(name: str) -> Tuple[Optional[str], str]:
"""
Return (package, name) given a fully qualified module name
package is ``None`` for toplevel modules
"""
if not isinstance(name, str):
raise TypeError(f"Expected 'str', got instance of {type(name)!r}")
if not name:
raise ValueError(f"Invalid module name {name!r}")
name_abs = name.lstrip(".")
dots = len(name) - len(name_abs)
if not name_abs or ".." in name_abs:
raise ValueError(f"Invalid module name {name!r}")
package, _, name = name_abs.rpartition(".")
if dots:
package = ("." * dots) + package
return (package if package != "" else None), name
class FakePackage:
"""
Instances of these can be used to represent a fake
package in :data:`sys.modules`.
Used as a workaround to fetch information about modules
in packages when the package itself cannot be imported
for some reason (for example due to having a SyntaxError
in the module ``__init__.py`` file).
"""
def __init__(self, path: List[str]):
"""
Create a new instance.
Args:
path: The search path for sub modules
"""
self.__path__ = path
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | modulegraph2/_utilities.py | xoviat/modulegraph2 |
#!/usr/bin/env python3
import torch
import torch.cuda.profiler as profiler
from apex import pyprof
class Foo(torch.jit.ScriptModule):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
@torch.jit.script_method
def forward(self, input):
return self.n*input + self.m
#Initialize pyprof after the JIT step
pyprof.nvtx.init()
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(Foo, 'forward')
foo = Foo(4)
foo.cuda()
x = torch.ones(4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x)
profiler.stop()
print(z)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | apex/pyprof/examples/jit/jit_script_method.py | oyj0594/apex |
#!usr/bin/env python
# -*- coding: utf-8 -*-
DEFAULT = 'default_settings.json'
import os
import json
class Settings(dict):
def __init__(self, config_path=None):
super(Settings, self).__init__()
try:
with open(config_path, 'r') as f:
params = json.load(f)
except:
m = f'Could not read config file: {config_path}'
raise ValueError(m)
# add default values for missing settings:
with open(os.sep.join((os.path.dirname(__file__),
'default_settings.json')), 'r') as f:
defaults = json.load(f)
for k in defaults:
if k not in params:
settings[k] = defaults[k]
for k, v in params.items():
self[k] = v
# store the config path too:
self.config_path = config_path
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Settings, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Settings, self).__delitem__(key)
del self.__dict__[key]
def __repr__(self):
return 'Settings: ' + json.dumps(self, indent=4)
def dump(self, path):
with open(path, 'w') as f:
json.dump(self, f, indent=4)
@classmethod
def load(self, path):
s = Settings(path)
return s
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | pandora/parameters.py | mikekestemont/openbox |
# -*- coding: utf-8 -*-
__author__ = 'ffuentes'
import argparse
import os
import sys
import csv
import logging
from apps.noclook.models import ServiceType, ServiceClass
from django.core.management.base import BaseCommand, CommandError
logger = logging.getLogger('noclook_service_types_import')
def insert_service_type(name, service_class):
try:
service_class_, created = ServiceClass.objects\
.get_or_create(name=service_class)
service_type, created = ServiceType.objects\
.get_or_create(name=name,
service_class=service_class_)
except Exception:
logger.warning('Bad things happened importing {} - {}'\
.format(name, service_class))
class Command(BaseCommand):
help = 'Import service types'
def add_arguments(self, parser):
parser.add_argument('--csv_file', help='The csv file to import',
type=str)
parser.add_argument('--no_header', action='store_true',
default=False, help='CSV file has no header')
def handle(self, *args, **options):
with open(options['csv_file'], 'r') as csv_file:
rows = csv.reader(csv_file)
#skip header
if not options['no_header']:
next(rows, None)
for name, service_class in rows:
insert_service_type(name, service_class)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | src/niweb/apps/noclook/management/commands/import_service_types.py | SUNET/ni |
from pydub import AudioSegment
from pydub.playback import play
import os
import utils
class audiofile:
def __init__(self, file):
""" Init audio stream """
self.file = file
def play(self):
""" Play entire file """
utils.displayInfoMessage('Playing Audio')
pathparts = self.file.rsplit(".", 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
play(song)
utils.displayInfoMessage('')
utils.displayErrorMessage('')
def length(self):
pathparts = self.file.rsplit(".", 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
return song.duration_seconds | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | AudioFile.py | CoryXie/SpeechShadowing |
import torch.nn as nn
from models.network import swish, CondResBlock
class MNISTModel(nn.Module):
def __init__(self, args):
super(MNISTModel, self).__init__()
self.act = swish
# self.relu = torch.nn.ReLU(inplace=True)
self.args = args
self.filter_dim = args.filter_dim
self.init_main_model()
self.init_label_map()
self.filter_dim = args.filter_dim
# self.act = self.relu
self.cond = args.cond
self.sigmoid = args.sigmoid
def init_main_model(self):
args = self.args
filter_dim = self.filter_dim
im_size = 28
self.conv1 = nn.Conv2d(1, filter_dim, kernel_size=3, stride=1, padding=1)
self.res1 = CondResBlock(args, filters=filter_dim, latent_dim=1, im_size=im_size)
self.res2 = CondResBlock(args, filters=2*filter_dim, latent_dim=1, im_size=im_size)
self.res3 = CondResBlock(args, filters=4*filter_dim, latent_dim=1, im_size=im_size)
self.energy_map = nn.Linear(filter_dim*8, 1)
def init_label_map(self):
args = self.args
self.map_fc1 = nn.Linear(10, 256)
self.map_fc2 = nn.Linear(256, 256)
def main_model(self, x, latent):
x = x.view(-1, 1, 28, 28)
x = self.act(self.conv1(x))
x = self.res1(x, latent)
x = self.res2(x, latent)
x = self.res3(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
energy = self.energy_map(x)
return energy
def label_map(self, latent):
x = self.act(self.map_fc1(latent))
x = self.map_fc2(x)
return x
def forward(self, x, latent):
args = self.args
x = x.view(x.size(0), -1)
if self.cond:
latent = self.label_map(latent)
else:
latent = None
energy = self.main_model(x, latent)
return energy | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | models/mnist_model.py | karimul/ebm-sampling |
import time
from multiprocessing import Process
from proxypool.api import app
from proxypool.getter import Getter
from proxypool.tester import Tester
from proxypool.db import RedisClient
from proxypool.setting import *
class Scheduler():
def schedule_tester(self, cycle=TESTER_CYCLE):
"""
定时测试代理
"""
tester = Tester()
while True:
print('测试器开始运行')
tester.run()
time.sleep(cycle)
def schedule_getter(self, cycle=GETTER_CYCLE):
"""
定时获取代理
"""
getter = Getter()
while True:
print('开始抓取代理')
getter.run()
time.sleep(cycle)
def schedule_api(self):
"""
开启API
"""
app.run(API_HOST, API_PORT)
def run(self):
print('代理池开始运行')
if TESTER_ENABLED:
tester_process = Process(target=self.schedule_tester)
tester_process.start()
if GETTER_ENABLED:
getter_process = Process(target=self.schedule_getter)
getter_process.start()
if API_ENABLED:
print('API开启')
api_process = Process(target=self.schedule_api)
api_process.start()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | proxypool/scheduler.py | pthchen/proxypool |
import pygame
class ControlManager(object):
@classmethod
def up(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def down(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def left(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def right(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def angle(cls, pos):
raise NotImplementedError('Error: Abstract class')
@classmethod
def prim_button(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def sec_button(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def select_button(cls):
raise NotImplementedError('Error: Abstract class')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | src/ControlManager.py | NEKERAFA/Soul-Tower |
# Copyright (C) 2018 Kevin Ross
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class ChecksDebugger(Signature):
name = "checks_debugger"
description = "Checks if process is being debugged by a debugger"
severity = 1
categories = ["anti-debug"]
authors = ["Kevin Ross"]
minimum = "2.0"
filter_apinames = [
"CheckRemoteDebuggerPresent",
"IsDebuggerPresent",
]
def on_call(self, call, process):
self.mark_call()
def on_complete(self):
return self.has_marks()
class ChecksKernelDebugger(Signature):
name = "checks_kernel_debugger"
description = "Checks if process is being debugged by a kernel debugger"
severity = 2
categories = ["anti-debug"]
authors = ["Kevin Ross"]
minimum = "2.0"
filter_apinames = [
"SystemKernelDebuggerInformation",
]
def on_call(self, call, process):
self.mark_call()
def on_complete(self):
return self.has_marks()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | modules/signatures/windows/antidbg_debuggercheck.py | Yuanmessi/Bold-Falcon |
import os
import unittest
from stex_client.private import Private
class PrivateTestCase(unittest.TestCase):
def setUp(self):
self.option = {
'tokenObject': {
'access_token': os.environ.get('ENV_TOKEN'),
},
'accessTokenUrl': 'https://api3.stex.com/oauth/token',
'scope': 'profile trade withdrawal reports push settings',
's2s': True
}
def test_profile_info(self):
res = Private(self.option).profile_info()
self.assertTrue(res['success'])
def test_profile_wallets(self):
res = Private(self.option).profile_wallets()
self.assertTrue(res['success'])
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | test/private.py | StocksDevelopment/python_client |
import unittest
import unittest.mock
from programy.clients.render.passthrough import PassThroughRenderer
class MockConsoleBotClient(object):
def __init__(self):
self._response = None
def process_response(self, client_context, response):
self._response = response
class PassThroughRendererTests(unittest.TestCase):
def test_text_only(self):
mock_console = MockConsoleBotClient()
renderer = PassThroughRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "Hello world")
self.assertEqual(mock_console._response, "Hello world")
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | test/programytest/clients/render/test_passthrough.py | motazsaad/fit-bot-fb-clt |
"""
Functions needed for dealing with age stratification
"""
def add_zero_to_age_breakpoints(breakpoints):
"""
append a zero on to a list if there isn't one already present, for the purposes of age stratification
:param breakpoints: list
integers for the age breakpoints requested
:return: list
age breakpoints with the zero value included
"""
return [0] + breakpoints if 0 not in breakpoints else breakpoints
def split_age_parameter(age_breakpoints, parameter):
"""
creates a dictionary to request splitting of a parameter according to age breakpoints, but using values of 1 for
each age stratum
allows that later parameters that might be age-specific can be modified for some age strata
:param age_breakpoints: list
list of the age breakpoints to be requested, with breakpoints as string
:param parameter: str
name of parameter that will need to be split
:return: dict
dictionary with age groups as string as keys and ones for all the values
"""
age_breakpoints = ["0"] + age_breakpoints if "0" not in age_breakpoints else age_breakpoints
return {parameter: {str(age_group): 1.0 for age_group in age_breakpoints}}
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | summer/model/utils/age_stratification.py | malanchak/AuTuMN |
import numpy as np
import tensorflow as tf
from model import Model
from common.shared_functions import glorot_variance, make_tf_variable, make_tf_bias
class HighwayLayer(Model):
vertex_embedding_function = {'train': None, 'test': None}
def __init__(self, shape, next_component=None, next_component_2=None):
self.next_component = next_component
self.next_component_2 = next_component_2
self.shape = shape
def compute_vertex_embeddings(self, mode='train'):
if self.vertex_embedding_function[mode] is None:
code_1 = self.next_component.get_all_codes(mode=mode)[0]
code_2 = self.next_component_2.get_all_codes(mode=mode)[0]
gates = self.get_gates(mode=mode)
self.vertex_embedding_function[mode] = gates * code_1 + (1-gates) * code_2
return self.vertex_embedding_function[mode]
def local_initialize_train(self):
variance = glorot_variance(self.shape)
self.W = make_tf_variable(0, variance, self.shape)
self.b = make_tf_bias(self.shape[1], init=1)
def local_get_weights(self):
return [self.W, self.b]
def get_gates(self, mode='train'):
code = self.next_component_2.get_all_codes(mode=mode)[0]
hidden = tf.matmul(code, self.W) + self.b
return tf.nn.sigmoid(hidden)
def get_all_codes(self, mode='train'):
collected_messages = self.compute_vertex_embeddings(mode=mode)
return collected_messages, None, collected_messages
def get_all_subject_codes(self, mode='train'):
return self.compute_vertex_embeddings(mode=mode)
def get_all_object_codes(self, mode='train'):
return self.compute_vertex_embeddings(mode=mode)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | code/extras/highway_layer.py | vamships/RelationPrediction |
from StringIO import StringIO
from django import http
from django.core.serializers.json import Serializer
from django.db.models.query import QuerySet
from django.views.generic import TemplateView
class TemplateContextView(TemplateView):
""" Allow define context in as_view method.
"""
context = dict()
def __init__(self, context=None, **kwargs):
self.context = context or dict()
super(TemplateContextView, self).__init__(**kwargs)
def get(self, request, *args, **kwargs):
self.context.update(self.get_context_data(**kwargs))
return self.render_to_response(self.context)
class AbstractResponseMixin(object):
""" Abstract class for data serialize.
"""
mimetype = "application/text"
@staticmethod
def render_template(context):
"String representation of given context."
return str(context)
def render_to_response(self, context):
"Return HttpResponse."
return http.HttpResponse(
self.render_template(context),
content_type=self.mimetype)
class JSONResponseMixin(AbstractResponseMixin):
""" Serialize queryset or any objects context in JSON.
"""
mimetype = "application/json"
def render_template(self, context):
encoder = Serializer()
if isinstance(context, QuerySet):
return encoder.serialize(context, ensure_ascii=False)
else:
encoder.objects = context
encoder.options = dict()
encoder.stream = StringIO()
encoder.end_serialization()
return encoder.getvalue()
class JSONView(JSONResponseMixin, TemplateView):
""" Render view context in JSON.
"""
pass
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | starter/templates/python-django/main/utils/views.py | klen/starter-python-django |
import gpipsfs
def test_coron():
gpi = gpipsfs.GPI()
gpi.obsmode='H_coron'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() < 5e-4
def test_direct():
gpi = gpipsfs.GPI()
gpi.obsmode='H_direct'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() > 0.99
def test_unblocked():
gpi = gpipsfs.GPI()
gpi.obsmode='H_unblocked'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() > 0.35
assert psf[0].data.sum() < 0.40
def test_obsmode():
def check_modes(gpi, apod, occ, lyot, filt):
assert gpi.apodizer == apod, 'Got unexpected apodizer value. Was {}, expected {}'.format(gpi.apodizer, apod)
assert gpi.occulter == occ, 'Got unexpected occulter value. Was {}, expected {}'.format(gpi.occulter, occ)
assert gpi.lyotmask == lyot, 'Got unexpected lyotmask value. Was {}, expected {}'.format(gpi.lyotmask, lyot)
assert gpi.filter == filt, 'Got unexpected filter value. Was {}, expected {}'.format(gpi.filter, filt)
gpi = gpipsfs.GPI()
gpi.obsmode='H_direct'
check_modes(gpi, 'CLEAR','SCIENCE','Open', 'H')
gpi.obsmode='H_coron'
check_modes(gpi, 'H','H','080m12_04', 'H')
gpi.obsmode='K1_unblocked'
check_modes(gpi, 'K1','SCIENCE','080m12_06_03', 'K1')
gpi.obsmode='NRM_J'
check_modes(gpi, 'NRM','SCIENCE','Open', 'J')
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | gpipsfs/tests/test_main.py | brynickson/gpipsfs |
"""
Segment_tree creates a segment tree with a given array and function,
allowing queries to be done later in log(N) time
function takes 2 values and returns a same type value
"""
class SegmentTree:
def __init__(self, arr, function):
self.segment = [0 for x in range(3 * len(arr) + 3)]
self.arr = arr
self.fn = function
self.maketree(0, 0, len(arr) - 1)
def make_tree(self, i, l, r):
if l == r:
self.segment[i] = self.arr[l]
elif l < r:
self.make_tree(2 * i + 1, l, int((l + r) / 2))
self.make_tree(2 * i + 2, int((l + r) / 2) + 1, r)
self.segment[i] = self.fn(self.segment[2 * i + 1], self.segment[2 * i + 2])
def __query(self, i, L, R, l, r):
if l > R or r < L or L > R or l > r:
return None
if L >= l and R <= r:
return self.segment[i]
val1 = self.__query(2 * i + 1, L, int((L + R) / 2), l, r)
val2 = self.__query(2 * i + 2, int((L + R + 2) / 2), R, l, r)
print(L, R, " returned ", val1, val2)
if val1 != None:
if val2 != None:
return self.fn(val1, val2)
return val1
return val2
def query(self, L, R):
return self.__query(0, 0, len(self.arr) - 1, L, R)
"""
Example -
mytree = SegmentTree([2,4,5,3,4],max)
mytree.query(2,4)
mytree.query(0,3) ...
mytree = SegmentTree([4,5,2,3,4,43,3],sum)
mytree.query(1,8)
...
"""
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | WEEKS/CD_Sata-Structures/_MISC/algorithms/tree/segment_tree/segment_tree.py | webdevhub42/Lambda |
from fastapi import Depends, Body, APIRouter
from sqlalchemy.orm import Session
from database import get_db
from models.forex_model import Forex2_m5,Forex2_m30,Forex2_m240
from datetime import datetime as dt
from crud.forex_crud import get_last_time, add_forex
from schemas import forex_schema as schema
import pdb
# モジュール化する場合は、APIRouterのインスタンスを作る → 命名はrouter
router = APIRouter(
responses={404: {"forex": "Not found"}},
)
def session_clear(exception):
if exception and Session.is_active:
Session.rollback()
else:
pass
Session.close()
@router.get("/getlasttime/")
async def gettime(db: Session = Depends(get_db),):
return {
"m5": get_last_time(db=db,model=Forex2_m5),
"m30": get_last_time(db=db,model=Forex2_m30),
"m240": get_last_time(db=db,model=Forex2_m240)
}
@router.post("/gettick/")
async def gettick(
db: Session = Depends(get_db),
body=Body(...)):
time, peristr, open, high, low, close, volume = body["content"].split(",")
obj =schema.Forex(
id = dt.strptime(time, "%Y.%m.%d %H:%M"),
open = open,
high= high,
low= low,
close= close,
volume= volume
)
if peristr == "forex_f1":
repo = Forex2_m5
elif peristr == "forex_f2":
repo = Forex2_m30
elif peristr == "forex_f3":
repo = Forex2_m240
else:
return {"error": "invalid peristr"}
try:
r = add_forex(
db=db,
schema = obj,
model = repo,
commit=True,
)
except Exception as e:
session_clear(e)
return {"error": "invalid data"}
return {"msg": "data posting comleted" }
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excl... | 3 | app/routers/forex_router.py | chanmoto/eurusd_pix2pix |
from serpapi.serp_api_client import *
from serpapi.serp_api_client_exception import SerpApiClientException
class YoutubeSearch(SerpApiClient):
"""YoutubeSearch enables to search google scholar and parse the result.
```python
from serpapi import YoutubeSearch
query = YoutubeSearch({"search_query": "chair"})
data = query.get_dict()
```
doc: https://serpapi.com/youtube-search-api
"""
def __init__(self, params_dict):
super(YoutubeSearch, self).__init__(params_dict, YOUTUBE_ENGINE)
def get_location(self, q, limit = 5):
raise SerpApiClientException("location is not supported by youtube search engine")
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | serpapi/youtube_search.py | serpapi/google_search_results_python |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for finding credentials.
When running this tool, there are two possible scenarios:
1. The tool is running as part of CI to publish the docs. In that case,
credentials should be at a well-known location or use ADC.
2. The tool is running locally as part of development. In that case,
the credentials should be passed into the command-line or via ADC.
(2) takes precedence. Command-line params also override local files.
"""
import os
from typing import List, Optional
from google.oauth2 import service_account
import google.auth
_WELL_KNOWN_LOCATIONS: List[str] = []
if "KOKORO_KEYSTORE_DIR" in os.environ:
_WELL_KNOWN_LOCATIONS.append(
os.path.join(
os.environ["KOKORO_KEYSTORE_DIR"], "73713_docuploader_service_account"
)
)
def find_path():
for location in _WELL_KNOWN_LOCATIONS:
if os.path.exists(location):
return location
return ""
def find(credentials_file: Optional[str] = ""):
if not credentials_file:
credentials_file = find_path()
if credentials_file != "":
credentials = service_account.Credentials.from_service_account_file(
credentials_file
)
return credentials, credentials.project_id
else:
return google.auth.default()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | docuploader/credentials.py | renovate-bot/docuploader |
import collections
from typing import Iterator
import itertools
from stream_lib.stream_api import Stream, T
class ItertoolsStream(Stream[T]):
@staticmethod
def stream(*iterables: Iterator[T]):
if len(iterables) == 1:
return ItertoolsStream(*iterables)
else:
return ItertoolsStream(itertools.zip_longest(*iterables))
def __init__(self, delegate: Iterator[T]):
assert isinstance(delegate, collections.Iterable)
if not isinstance(delegate, collections.Iterator):
delegate = iter(delegate)
self._delegate = delegate
def __iter__(self):
self._delegate = iter(self._delegate)
return self._delegate
def __next__(self):
return next(self._delegate)
def map(self, func):
return self._stream(map(func, self))
def flatmap(self, func):
return self.map(func).flatten()
def flatten(self):
return self._stream(itertools.chain.from_iterable(self))
def filter(self, predicate):
return self._stream(filter(predicate, self))
def slice(self, start, stop, step=1):
return self._stream(itertools.islice(self, start, stop, step))
def limit(self, size):
return self.slice(0, size)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | stream-lib/stream_lib/itertools_stream.py | flegac/deep-experiments |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Stc(AutotoolsPackage):
"""STC: The Swift-Turbine Compiler"""
homepage = 'http://swift-lang.org/Swift-T'
url = 'http://swift-lang.github.io/swift-t-downloads/spack/stc-0.8.3.tar.gz'
git = "https://github.com/swift-lang/swift-t.git"
version('master', branch='master')
version('0.8.3', sha256='d61ca80137a955b12e84e41cb8a78ce1a58289241a2665076f12f835cf68d798')
version('0.8.2', sha256='13f0f03fdfcca3e63d2d58d7e7dbdddc113d5b9826c9357ab0713b63e8e42c5e')
depends_on('java', type=('build', 'run'))
depends_on('ant', type='build')
depends_on('turbine', type=('build', 'run'))
depends_on('turbine@master', type=('build', 'run'), when='@master')
depends_on('zsh', type=('build', 'run'))
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
@property
def configure_directory(self):
if self.version == Version('master'):
return 'stc/code'
else:
return '.'
def configure_args(self):
args = ['--with-turbine=' + self.spec['turbine'].prefix]
return args
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | var/spack/repos/builtin/packages/stc/package.py | xiki-tempula/spack |
from torch import nn, Tensor
from typing import Union
from torch.nn import CrossEntropyLoss
class LabelSmoothCrossEntropy(nn.Module):
def __init__(self, smoothing=0.1):
super().__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
self.log_softmax = nn.LogSoftmax(dim=-1)
def forward(self, pred: Tensor, target: Tensor) -> Tensor:
pred = self.log_softmax(pred)
nll_loss = -pred.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)
smooth_loss = -pred.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class DistillationLoss(nn.Module):
"""Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
"""
def __init__(self, alpha: float = 0.95, temp: Union[float, int] = 6) -> None:
super().__init__()
self.alpha = alpha
self.temp = temp
self.kd_loss = nn.KLDivLoss()
self.entropy_loss = nn.CrossEntropyLoss()
self.log_softmax = nn.LogSoftmax(dim=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, pred_student: Tensor, pred_teacher: Tensor, target: Tensor) -> Tensor:
loss = self.kd_loss(self.log_softmax(pred_student / self.temp), self.softmax(pred_teacher / self.temp)) * (self.alpha * self.temp * self.temp)
loss += self.entropy_loss(pred_student, target) * (1. - self.alpha)
return loss
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true... | 3 | utils/losses.py | jimilee/image-classification |
# FIXME: fix all "happy paths coding" issues
import liblo
from threading import Thread
class Mext(object):
device = None
def __init__(self, device_port=5000):
self.device_receiver = liblo.ServerThread(device_port)
self.device_receiver.add_method("/monome/grid/key", "iii", self.on_grid_key)
self.device_receiver.add_method(
"/serialosc/device", "ssi", self.on_serialosc_device
)
self.device_receiver.start()
liblo.send(liblo.Address(12002), "/serialosc/list", "127.0.0.1", device_port)
def set_grid_key_callback(self, fn):
self.grid_key_callback = fn
def set_led_level(self, x, y, value):
Thread(
target=(
lambda: liblo.send(
self.device, "/monome/grid/led/level/set", x, y, value
)
)
).start()
def set_led_map(self, offset_x, offset_y, values):
Thread(
target=(
lambda: liblo.send(
self.device,
"/monome/grid/led/level/map",
offset_x,
offset_y,
*values
)
)
).start()
def on_grid_key(self, path, args):
x, y, edge = args
if self.grid_key_callback:
self.grid_key_callback(x, y, edge)
def on_serialosc_device(self, path, args):
_, sysId, port = args
self.device = liblo.Address(port)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | ui/mext.py | szymonkaliski/nott |
import json
import numpy as np
import gensim
class EmbeddingModel(object):
def __init__(
self, vocab_file, embedding_file, normalize_embeddings=True):
with open(embedding_file, 'rb') as f:
self.embedding_mat = np.load(f)
if normalize_embeddings:
self.embedding_mat = self.embedding_mat / np.linalg.norm(
self.embedding_mat, axis=1, keepdims=True)
with open(vocab_file, 'r') as f:
tks = json.load(f)
self.vocab = dict(zip(tks, range(len(tks))))
def __contains__(self, word):
return word in self.vocab
def __getitem__(self, word):
if word in self.vocab:
index = self.vocab[word]
return self.embedding_mat[index]
else:
raise KeyError
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | nsm/word_embeddings.py | MartinoMensio/neural-symbolic-machines |
"""
@author: Anuj Kumar
@email: cdac.anuj@gmail.com
@date: 16-Apr-18
"""
import logging
from utility.services import Services
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
from selenium.webdriver.support.select import Select
from time import sleep
class DropdownPage:
def __init__(self, driver):
self.driver = driver
self.services = Services(self.driver)
self.header = "Dropdown List"
self.xpath_heading = "//h3"
self.xpath_dropdown = "//select[@id='dropdown']"
def verify_dropdown_page(self):
"""
This method is to verify Dropdown page.
return: instance of Dropdown page
rtype: DropdownPage instance
"""
logging.info("## Verifying Dropdown page ##")
self.services.wait_for_element(self.xpath_heading)
actual_heading = self.services.get_text_by_xpath(self.xpath_heading)
logging.info("# Actual heading on Dropdown page: %s" % actual_heading)
assert actual_heading == self.header, "Actual header (%s), should be same as expected header (%s)." % (
actual_heading, self.header)
def select_dropdown_option(self, opt):
"""
This method is to select value in dropdown.
@param opt: visible text
type opt: string
"""
select = Select(self.driver.find_element_by_xpath(self.xpath_dropdown))
#select.select_by_index(1)
#select.select_by_value("2")
select.select_by_visible_text(opt)
sleep(1)
actual = select.first_selected_option.text
assert actual == opt, "Selected value {0}, should be same as expected {1}".format(actual, opt)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer... | 3 | attic/2019/contributions-2019/open/mudaliar-yptu/PWAF/pages/dropdown_page.py | Agriad/devops-course |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestDygraphShardingStage2(TestMultipleGpus):
# check sharding logic as well as the accuracy with single mode
def test_dygraph_sharding_optimizer_stage2(self):
self.run_mnist_2gpu('dygraph_sharding_stage2.py')
def test_dygraph_sharding_optimizer_stage2_offload(self):
self.run_mnist_2gpu('dygraph_sharding_stage2_offload.py')
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | python/paddle/fluid/tests/unittests/test_dygraph_sharding_stage2.py | zmxdream/Paddle |
import integ_test_base
import requests
class TestDeployModelSSLOnAuthOff(integ_test_base.IntegTestBase):
def _get_transfer_protocol(self) -> str:
return "https"
def _get_certificate_file_name(self) -> str:
return "./tests/integration/resources/2019_04_24_to_3018_08_25.crt"
def _get_key_file_name(self) -> str:
return "./tests/integration/resources/2019_04_24_to_3018_08_25.key"
def test_deploy_ssl_on_auth_off(self):
self.deploy_models(self._get_username(), self._get_password())
session = requests.Session()
# Do not verify servers' cert to be signed by trusted CA
session.verify = False
# Do not warn about insecure request
requests.packages.urllib3.disable_warnings()
models = ["PCA", "Sentiment%20Analysis", "ttest", "anova"]
for m in models:
m_response = session.get(
url=f"{self._get_transfer_protocol()}://"
f"localhost:9004/endpoints/{m}"
)
self.assertEqual(200, m_response.status_code)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | tests/integration/test_deploy_model_ssl_on_auth_off.py | phantomcosmonaut/TabPy |
class Solution:
r"""
函数注解
>>> def add(x: int, y: int) -> int:
... return a + b
>>> add.__annotations__
{'x': <class 'int'>, 'y': <class 'int'>, 'return': <class 'int'>}
"""
def __init__(self):
pass
def solve(self):
pass
if __name__ == '__main__':
import doctest
doctest.testmod() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | 7/7_3.py | kopsh/python_cookbook |
""" Formula for building parallel """
from pakit import Archive, Recipe
class Parallel(Recipe):
"""
GNU parallel executes shell jobs in parallel
"""
def __init__(self):
super(Parallel, self).__init__()
self.homepage = 'http://www.gnu.org/software/parallel'
self.repos = {
'unstable': Archive('https://ftp.gnu.org/gnu/parallel/'
'parallel-20181022.tar.bz2',
hash='2e84dee3556cbb8f6a3794f5b21549faffb132'
'db3fc68e2e95922963adcbdbec')
}
self.repos['stable'] = self.repos['unstable']
def build(self):
self.cmd('./configure --prefix={prefix}')
self.cmd('make install')
def verify(self):
lines = self.cmd('parallel --version').output()
assert lines[0].find('GNU parallel') != -1
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | parallel.py | pakit/recipes |
import json
from pyquery import PyQuery
from scylla.database import ProxyIP
from .base_provider import BaseProvider
class ProxyScraperProvider(BaseProvider):
def urls(self) -> [str]:
return ['https://raw.githubusercontent.com/sunny9577/proxy-scraper/master/proxies.json']
def parse(self, document: PyQuery) -> [ProxyIP]:
ip_list: [ProxyIP] = []
text = document.html()
json_object = json.load(text)
if not json_object or type(json_object['usproxy']) != list:
return ip_list
for ip_port in json_object['usproxy']:
p = ProxyIP(ip=ip_port['ip'], port=ip_port['port'])
ip_list.append(p)
return ip_list
@staticmethod
def should_render_js() -> bool:
return False
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | scylla/providers/proxy_scraper_provider.py | cities/scylla |
from django.contrib.auth.models import User, Group
from .models import Typhoon, Point, GraphPoint
from rest_framework import serializers
class TyphoonListSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Typhoon
fields = ('num', 'name', 'englishname', 'startat', 'endat', 'year')
lookup_field = 'year'
class TyphoonListViewsetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Typhoon
fields = ('num', 'name', 'englishname', 'startat', 'endat', 'year')
lookup_field = 'year'
class PointListSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
ename = serializers.SerializerMethodField()
isdelete = serializers.SerializerMethodField()
class Meta:
model = Point
fields = ('name', 'ename', 'typhoonnumber', 'happenedat', 'typhoontime', 'latitude', 'longitude',
'intensity', 'windspeed', 'airpressure', 'ordinarywindspeed', "is_change", 'isdelete')
lookup_field = 'typhoonnumber'
def get_name(self, obj):
return obj.typhoonnumber.name
def get_ename(self, obj):
return obj.typhoonnumber.englishname
def get_isdelete(self, obj):
return obj.typhoonnumber.is_delate
class TyphoonGraphDetailSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
ename = serializers.SerializerMethodField()
isdelete = serializers.SerializerMethodField()
class Meta:
model = GraphPoint
fields = ('name', 'ename', 'typhoonnumber', 'happenedat',
'intensity', 'isdelete', "is_change")
lookup_field = 'typhoonnumber'
def get_name(self, obj):
return obj.typhoonnumber.name
def get_ename(self, obj):
return obj.typhoonnumber.englishname
def get_isdelete(self, obj):
return obj.typhoonnumber.is_delate
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | TyphoonApi/typhoon/serializers.py | ZhangDubhe/Tropical-Cyclone-Information-System |
from reliapy.distributions.continuous import _Continuous
from scipy.stats import exponnorm as prob
class ExponNorm(_Continuous):
def __init__(self, K=None, loc=None, scale=None, random_state=None):
self.K = K
self.loc = loc
self.scale = scale
self.stats = prob.stats(K=self.K, loc=self.loc, scale=self.scale, moments='mv')
self.random_state = random_state
super().__init__()
def pdf(self, X=None):
"""
PDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
PDF of X.
"""
return prob.pdf(X, K=self.K, loc=self.loc, scale=self.scale)
def cdf(self, X=None):
"""
CDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
CDF of X.
"""
return prob.cdf(X, K=self.K, loc=self.loc, scale=self.scale)
def icdf(self, y=None):
"""
Inverse CDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
Inverse CDF of X.
"""
return prob.ppf(y, K=self.K, loc=self.loc, scale=self.scale)
def moment(self, n=1):
"""
Get the non-central moments of order n.
**Input:**
* **n** (`float`)
Order of the moment.
**Output**
non central moment.
"""
return prob.moment(n, K=self.K, loc=self.loc, scale=self.scale)
def rvs(self, n_sim=1):
"""
Get `n_sim` random samples.
**Input:**
* **n_sim** (`float`)
Number of random samples.
**Output**
Samples.
"""
return prob.rvs(K=self.K, loc=self.loc, scale=self.scale, size=n_sim, random_state=self.random_state)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | src/reliapy/distributions/continuous/_exponnorm.py | reliapy/reliapy |
from .KdfParams import KdfParams
from .CipherParams import CipherParams
class CryptoStruct:
def __init__(
self,
cipher: int,
ciphertext: str,
cipherparams: CipherParams,
kdf: str,
kdfparams: KdfParams,
mac: str,
):
self._cipher = cipher
self._ciphertext = ciphertext
self._cipherparams = cipherparams
self._kdf = kdf
self._kdfparams = kdfparams
self._mac = mac
@classmethod
def from_dict(cls, crypto):
new_crypto = cls.__new__(cls)
for key in crypto:
setattr(new_crypto, key, crypto[key])
return new_crypto
@property
def cipher(self):
return self._cipher
@cipher.setter
def cipher(self, cipher):
self._cipher = cipher
@property
def ciphertext(self):
return self._ciphertext
@ciphertext.setter
def ciphertext(self, ciphertext):
self._ciphertext = ciphertext
@property
def cipherparams(self):
return self._cipherparams
@cipherparams.setter
def cipherparams(self, cipherparams):
if isinstance(cipherparams, dict):
self._cipherparams = CipherParams.from_dict(cipherparams)
else:
self._cipherparams = cipherparams
@property
def kdf(self):
return self._kdf
@kdf.setter
def kdf(self, kdf):
self._kdf = kdf
@property
def kdfparams(self):
return self._kdfparams
@kdfparams.setter
def kdfparams(self, kdfparams):
if isinstance(kdfparams, dict):
self._kdfparams = KdfParams.from_dict(kdfparams)
else:
self._kdfparams = kdfparams
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
self._mac = mac | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | xchainpy/xchainpy_crypto/xchainpy_crypto/models/CryptoStruct.py | tirinox/xchainpy-lib |
from app01.models import Comment
# 方案二
"""
思想:根据根评论递归查找它下面所有子评论
把它放到根评论的空间中
"""
def find_root_sub_comment(root_comment, sub_comment_list):
for sub_comment in root_comment.comment_set.all():
# 找根评论的子评论
sub_comment_list.append(sub_comment)
find_root_sub_comment(sub_comment, sub_comment_list)
def sub_comment_list(nid):
# 找到某个文章的所有评论
comment_query = Comment.objects.filter(article_id=nid).order_by('-create_time') # 降序排
# 把评论存储到列表
comment_list = []
for comment in comment_query:
# 如果它的父亲是None,就说明是根评论
if not comment.parent_comment:
# 递归查找这个根评论下面所有子评论
list = []
find_root_sub_comment(comment, list)
comment.sub_comment = list
comment_list.append(comment)
continue
return comment_list
# for comment in comment_list:
# print(comment)
# for sub_comment in comment.sub_comment:
# print(sub_comment) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | app01/utils/sub_comment.py | Stylllzy/Blog-1.0 |
# coding: utf-8
import os
import pandas as pd
import numpy as np
import czsc
cur_path = os.path.split(os.path.realpath(__file__))[0]
file_kline = os.path.join(cur_path, "data/000001.SH_D.csv")
kline = pd.read_csv(file_kline, encoding="utf-8")
kline.loc[:, "dt"] = pd.to_datetime(kline.dt)
bars = kline.to_dict("records")
close = np.array([x['close'] for x in bars], dtype=np.double)
def test_sma():
ma5 = czsc.SMA(close, 5)
assert len(ma5) == len(close)
assert round(ma5[-1], 2) == 3362.53
assert round(ma5[-2], 2) == 3410.62
def test_macd():
diff, dea, macd = czsc.MACD(close)
assert len(diff) == len(dea) == len(macd) == len(close)
assert round(macd[-1], 2) == 13.35
assert round(macd[-5], 2) == 88.0
assert round(diff[-1], 2) == 117.3
assert round(diff[-5], 2) == 127.51
assert round(dea[-1], 2) == 110.62
assert round(dea[-5], 2) == 83.51
def test_jdk():
high = np.array([x['high'] for x in bars], dtype=np.double)
low = np.array([x['low'] for x in bars], dtype=np.double)
k, d, j = czsc.KDJ(close, high, low)
assert round(k[-1], 2) == 59.94
assert round(d[-1], 2) == 80.47
assert round(j[-1], 2) == 18.87
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | test/test_ta.py | mylovage/czsc |
from django import forms
from django.contrib import admin
from django.contrib.contenttypes import generic
from bible import Verse, RangeError # python-bible
from models import Scripture
class ScriptureForm(forms.ModelForm):
class Meta:
model = Scripture
def clean(self):
# Checking to see if the start (and end) verse is in the given
# version, if not populate the appropriate self._errors entry.
version = self.cleaned_data['version'].model_class().translation
if 'start_verse' in self.cleaned_data:
try:
verse = Verse(self.cleaned_data['start_verse']+' '+version)
except (RangeError, Exception) as err:
self._errors['start_verse'] = self.error_class([err.__str__()])
del self.cleaned_data['start_verse']
if 'end_verse' in self.cleaned_data and self.cleaned_data['end_verse']:
try:
verse = Verse(self.cleaned_data['end_verse']+' '+version)
except (RangeError, Exception) as err:
self._errors['end_verse'] = self.error_class([err.__str__()])
del self.cleaned_data['end_verse']
return super(ScriptureForm, self).clean()
class ScriptureInline(generic.GenericTabularInline):
"Import and use wherever you wish for inline scripture adding/editing."
model = Scripture
form = ScriptureForm
fields = ('start_verse', 'end_verse', 'version')
extra = 1
class ScriptureAdmin(admin.ModelAdmin):
list_display = ('start_verse', 'end_verse', 'version', 'start_book', 'end_book')
list_filter = ('version', 'start_book_id')
fields = ('start_verse', 'end_verse', 'version')
form = ScriptureForm
#admin.site.register(Scripture, ScriptureAdmin) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | bibletext/admin.py | richardbolt/django-bibletext |
"""
Allow null group.creator
Revision ID: 21b1ce37e327
Revises: 9389d52b037d
Create Date: 2017-04-13 16:49:16.218511
"""
from alembic import op
revision = "21b1ce37e327"
down_revision = "9389d52b037d"
def upgrade():
op.alter_column("group", "creator_id", nullable=True)
def downgrade():
op.alter_column("group", "creator_id", nullable=False)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | h/migrations/versions/21b1ce37e327_allow_null_group_creator.py | tgiardina/rpp-h |
# coding=UTF-8
import h5py
import numpy as np
import json
from DataClass.BassClass.ReaderBase import *
'''
MDFReader.py: Access measurement data from a MDF file.
'''
class ComplexEnconding(json.JSONEncoder):
def default(self, o):
if isinstance(o, complex):
return "{real}+{image}i".format(real=o.real, image=o.real)
class MDFReaderClass(ReaderBaseClass):
def __init__(self, SMNameFile, MeasNameFile):
super().__init__()
self.__SMNameFile = SMNameFile
self.__MeasNameFile = MeasNameFile
self._init_FileHandle()
self._init_Message()
# Get the file handle of the HDF5.
def _init_FileHandle(self):
self.__SMF = h5py.File(self.__SMNameFile, 'r')
self.__MeasF = h5py.File(self.__MeasNameFile, 'r')
return True
def __get_SMData(self):
S = self.__SMF[SYSMTRMEASDATA]
return S[:, :, :, :].squeeze()
def __get_MeasData(self):
S = self.__MeasF[MEASSIGNALDATA]
return S[:, :, :, :]
def __get_BackGround(self):
S = self.__SMF[ISBACKGROUNDFRAME]
return S[:].view(bool)
def __get_SamPointNum(self):
S = self.__SMF[NUMSAMPLINGPOINTS]
return int(np.array(S, dtype=np.int32))
def __get_CaliSize(self):
S = self.__SMF[CALIBRATIONSIZE]
return S[:]
# Initialize the Message.
def _init_Message(self):
self._set_MessageValue(MEASUREMENT, AUXSIGNAL, self.__get_SMData())
self._set_MessageValue(MEASUREMENT, MEASIGNAL, self.__get_MeasData())
self._set_MessageValue(MEASUREMENT, TYPE, SYSTEMMATRIX)
self._set_MessageValue(MEASUREMENT, BGFLAG, self.__get_BackGround())
self._set_MessageValue(SAMPLE, SAMNUMBER, self.__get_SamPointNum())
self._set_MessageValue(MEASUREMENT, MEANUMBER, self.__get_CaliSize())
return True | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": fals... | 3 | src/DataClass/Reader/MDFReader.py | XiaoYaoNet/MPIRF |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import os
from copy import deepcopy
from typing import Dict
import aiohttp
import pytest
import tenacity
from minio import Minio
from servicelib.minio_utils import MinioRetryPolicyUponInitialization
from yarl import URL
from .helpers.utils_docker import get_service_published_port
@pytest.fixture(scope="module")
def storage_endpoint(docker_stack: Dict, testing_environ_vars: Dict) -> URL:
prefix = testing_environ_vars["SWARM_STACK_NAME"]
assert f"{prefix}_storage" in docker_stack["services"]
default_port = testing_environ_vars["STORAGE_ENDPOINT"].split(":")[1]
endpoint = f"127.0.0.1:{get_service_published_port('storage', default_port)}"
# nodeports takes its configuration from env variables
old_environ = deepcopy(os.environ)
os.environ["STORAGE_ENDPOINT"] = endpoint
yield URL(f"http://{endpoint}")
# restore environ
os.environ = old_environ
@pytest.fixture(scope="function")
async def storage_service(
minio_service: Minio, storage_endpoint: URL, docker_stack: Dict
) -> URL:
await wait_till_storage_responsive(storage_endpoint)
yield storage_endpoint
# HELPERS --
# TODO: this can be used by ANY of the simcore services!
@tenacity.retry(**MinioRetryPolicyUponInitialization().kwargs)
async def wait_till_storage_responsive(storage_endpoint: URL):
async with aiohttp.ClientSession() as session:
async with session.get(storage_endpoint.with_path("/v0/")) as resp:
assert resp.status == 200
data = await resp.json()
assert "data" in data
assert data["data"] is not None
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | packages/pytest-simcore/src/pytest_simcore/simcore_storage_service.py | colinRawlings/osparc-simcore |
import re
import yaml
def replace_e_float(d):
p = re.compile(r"^-?\d+(\.\d+)?e-?\d+$")
for name, val in d.items():
if type(val) == dict:
replace_e_float(val)
elif type(val) == str and p.match(val):
d[name] = float(val)
def load_cfg(name, prefix="."):
with open(f"{prefix}/{name}.yaml") as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
replace_e_float(cfg)
return cfg
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | pol/common/load_cfg.py | neevparikh/lwm |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack import CLI
import tempfile
from azure.cli.core import AzCommandsLoader
from azure.cli.core.cloud import get_active_cloud
from azure.cli.core.commands import AzCliCommand
from azure.cli.core._config import ENV_VAR_PREFIX
MOCK_CLI_CONFIG_DIR = tempfile.mkdtemp()
MOCK_CLI_ENV_VAR_PREFIX = "MOCK_" + ENV_VAR_PREFIX
class MockClient:
def __init__(self):
pass
def get(self):
pass
class MockCLI(CLI):
def __init__(self):
super(MockCLI, self).__init__(
cli_name="mock_cli",
config_dir=MOCK_CLI_CONFIG_DIR,
config_env_var_prefix=MOCK_CLI_ENV_VAR_PREFIX,
)
self.cloud = get_active_cloud(self)
class MockCmd:
def __init__(self, cli_ctx):
self.cli_ctx = cli_ctx
self.cmd = AzCliCommand(AzCommandsLoader(cli_ctx), "mock-cmd", None)
def supported_api_version(
self,
resource_type=None,
min_api=None,
max_api=None,
operation_group=None,
parameter_name=None,
):
return self.cmd.supported_api_version(
resource_type=resource_type,
min_api=min_api,
max_api=max_api,
operation_group=operation_group,
parameter_name=parameter_name,
)
def get_models(self, *attr_args, **kwargs):
return self.cmd.get_models(*attr_args, **kwargs)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?"... | 3 | src/azure-cli/azure/cli/command_modules/acs/tests/latest/mocks.py | staer/azure-cli |
import operator
from aiogram_dialog import ChatEvent, DialogManager
from aiogram_dialog.widgets.kbd import Select
from aiogram_dialog.widgets.text import Format
# let's assume this is our window data getter
async def get_data(**kwargs):
fruits = [
("Apple", '1'),
("Pear", '2'),
("Orange", '3'),
("Banana", '4'),
]
return {
"fruits": fruits,
"count": len(fruits),
}
async def on_fruit_selected(c: ChatEvent, item_id: str, select: Select, manager: DialogManager):
print("Fruit selected: ", item_id)
fruits_kbd = Select(
Format("{item[0]} ({pos}/{data[count]})"), # E.g `✓ Apple (1/4)`
id="s_fruits",
item_id_getter=operator.itemgetter(1), # each item is a tuple with id on a first position
items="fruits", # we will use items from window data at a key `fruits`
on_click=on_fruit_selected,
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | docs/examples/widgets/select.py | prostmich/aiogram_dialog |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class QueryBmTaskResultRequest(Request):
def __init__(self):
super(QueryBmTaskResultRequest, self).__init__(
'bmvpc', 'qcloudcliV1', 'QueryBmTaskResult', 'bmvpc.api.qcloud.com')
def get_taskId(self):
return self.get_params().get('taskId')
def set_taskId(self, taskId):
self.add_param('taskId', taskId)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | qcloudsdkbmvpc/QueryBmTaskResultRequest.py | f3n9/qcloudcli |
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
from keyword2cmdline import command, command_config
TRANSLATIONS = {
("Hello world", "hi.IN"): "नमस्ते दुनिया"}
@command_config
def exclamation(number=2,
sign="!",
use=True):
return (sign * number if use else "")
@command
def main(text="Hello world",
language='en.US',
exclamation=exclamation):
print_text = TRANSLATIONS.get((text, language), text)
print_text = (print_text + exclamation())
print(print_text)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | examples/hello_world_recursive.py | wecacuee/keyword2cmdline |
#######################################################################
import leiap
import pandas as pd
#######################################################################
def test_clean_datetimes_return_df():
pts = leiap.get_points_simple()
df = leiap.clean_datetimes(pts)
assert isinstance(df, pd.DataFrame)
def test_clean_datetimes_length():
"""Row length should be greater than 0"""
pts = leiap.get_points_simple()
df = leiap.clean_datetimes(pts)
assert df.shape[0] > 0
def test_clean_datetimes_columns():
"""Must have 'dt_adj' column"""
pts = leiap.get_points_simple()
cols = leiap.clean_datetimes(pts).columns.tolist()
assert 'dt_adj' in cols
def test_clean_datetimes_dt_col_type():
pass
#######################################################################
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/test_time.py | deppen8/leiap |
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Prints "1" if Chrome targets should be built with hermetic Xcode.
Prints "2" if Chrome targets should be built with hermetic Xcode, but the OS
version does not meet the minimum requirements of the hermetic version of Xcode.
Otherwise prints "0".
Usage:
python should_use_hermetic_xcode.py <target_os>
"""
import os
import sys
_THIS_DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
_BUILD_PATH = os.path.join(_THIS_DIR_PATH, os.pardir)
sys.path.insert(0, _BUILD_PATH)
import mac_toolchain
def _IsCorpMachine():
return os.path.isdir('/Library/GoogleCorpSupport/')
def main():
allow_corp = sys.argv[1] == 'mac' and _IsCorpMachine()
if os.environ.get('FORCE_MAC_TOOLCHAIN') or allow_corp:
if not mac_toolchain.PlatformMeetsHermeticXcodeRequirements(sys.argv[1]):
return "2"
return "1"
else:
return "0"
if __name__ == '__main__':
print(main())
sys.exit(0)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | build/mac/should_use_hermetic_xcode.py | o-lim/generate-ninja |
class MLConfig:
class ConstError(PermissionError):
pass
class ConstCaseError(ConstError):
pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("can't change const %s" % name)
if not name.isupper():
raise self.ConstCaseError('const name "%s" is not all uppercase' % name)
self.__dict__[name] = value
mlConfig = MLConfig()
# SITL Type PX4 or Ardupilot
mlConfig.MODE = 'Ardupilot'
# Output Debug information
mlConfig.DEBUG = False
# LSTM Input Length
mlConfig.INPUT_LEN = 3
# state + sensor
mlConfig.CONTEXT_LEN = 12
# size of input
mlConfig.DATA_LEN = mlConfig.CONTEXT_LEN + 20
# size of output
mlConfig.OUTPUT_DATA_LEN = 6
# Value retrans
mlConfig.RETRANS = True | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | ModelFit/config.py | xidian-uav/uavga |
"""This module provides AdaHandler, an implementation of ETLHandler for ADA."""
import logging
import requests
from fractalis.data.etlhandler import ETLHandler
logger = logging.getLogger(__name__)
class AdaHandler(ETLHandler):
"""This ETLHandler provides integration with ADA.
'Ada provides key infrastructure for secured integration, visualization,
and analysis of anonymized clinical and experimental data stored in CSV
and tranSMART format, or provided by RedCAP and Synapse apps.'
Project URL: https://git-r3lab.uni.lu/peter.banda/ncer-pd
"""
_handler = 'ada'
@staticmethod
def make_label(descriptor: dict) -> str:
return '{} ({})'.format(descriptor['dictionary']['label'],
descriptor['data_set'])
def _get_token_for_credentials(self, server: str, auth: dict) -> str:
try:
user = auth['user']
passwd = auth['passwd']
if len(user) == 0 or len(passwd) == 0:
raise KeyError
except KeyError as e:
logger.exception(e)
raise ValueError("The authentication object must contain the "
"non-empty fields 'user' and 'passwd'.")
r = requests.post(url='{}/login'.format(server),
headers={'Accept': 'application/json'},
data={'id': user, 'password': passwd},
timeout=10)
if r.status_code != 200:
error = "Could not authenticate. " \
"Reason: [{}]: {}".format(r.status_code, r.text)
logger.error(error)
raise ValueError(error)
cookie = r.headers['Set-Cookie']
token = [s for s in cookie.split(';')
if s.startswith('PLAY2AUTH_SESS_ID')][0]
token = '='.join(token.split('=')[1:]) # remove PLAY2AUTH_SESS_ID=
return token
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer... | 3 | fractalis/data/etls/ada/handler_ada.py | LCSB-BioCore/Fractalis |
import json
import logging
import storage
from defs import DeviceRequest
from device.models import Device, DeviceAddress
from hint.models import HintAuthentication
from hint.procedures.request_library import create_device
LOGGER = logging.getLogger(__name__)
"""
This module specifies the handling of device messages.
"""
def incoming_message(device: Device, request_type: int, data: bytes):
"""
A device has sent a message to HUME.
:param device: sender Device
:param request_type: type of received message
:param data: message data
"""
LOGGER.info(f"got new message from device {device.uuid}")
# Device responded to a capability request
if request_type == DeviceRequest.CAPABILITY:
LOGGER.info("message was a capability response")
capability_response(device, data)
def capability_response(device, data):
"""
Called when a device responds to a capability request.
:param device: Device callee
:param data: capability data
:return:
"""
# TODO: Store the gotten capabilities in HUME as well, HUME needs to
# know some things for validation, but add what's needed WHEN it's
# needed.
capabilities = json.loads(data)
hint_auth = storage.get(HintAuthentication, None)
if create_device(capabilities, hint_auth.session_id, hint_auth.csrf_token):
LOGGER.info("device created in HINT successfully")
# Update the device entry, set correct uuid
storage.delete(device) # Clear old address-resolved entry from local
new_device = Device(uuid=capabilities["uuid"],
address=device.address,
name=device.name,
attached=True)
storage.save(new_device)
# Update device address entry to enable bi-directional lookups.
device_address = storage.get(DeviceAddress, device.address)
device_address.uuid = capabilities["uuid"]
storage.save(device_address)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | hume/device/request_handler.py | megacorpincorporated/hume |
###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
class MonitoringVnic(MonitoringSimpleObject):
def __init__(self, env):
super().__init__(env)
# add monitoring setup for remote host
def create_setup(self, o):
self.setup('vnic', o, values={'vnictype': o['vnic_type']})
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | monitoring/setup/monitoring_vnic.py | korenlev/calipso-cvim |
import pickle
import numpy as np
xgboost = pickle.load(open('./xgboost.pkl', 'rb'))
scaler = pickle.load(open('./scaler.pkl', 'rb'))
def transform_input(input):
return scaler.transform([input])
def make_hard_prediction(input):
return xgboost.predict(transform_input(input))
def make_soft_prediction(input):
return xgboost.predict_proba(transform_input(input))[0,1]
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | api.py | BabakShah/DS-SafetyPrediction |
from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
# 补充字段
mobile = models.CharField(
unique=True,
verbose_name='手机号',
null=True,
max_length=11
)
class Meta:
db_table = 'tb_users' # 指定模型类User所映射mysql表名
verbose_name = '手机号'
verbose_name_plural = '手机号'
def __str__(self):
return self.username | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | meiduo_mall/meiduo_mall/apps/users/models.py | lyq919233278/meiduo_mall |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.