source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from flask_wtf import Form
from wtforms import StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length
from app.models import User
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
class EditForm(Form):
nickname = StringField('nickname', validators=[DataRequired()])
about_me = TextAreaField('about_me', validators=[Length(min=0, max=140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname=self.nickname.data).first()
if user is not None:
self.nickname.errors.append('This nickname is already in use. Please choose another one.')
return False
return True
class PostForm(Form):
post = StringField('post', validators=[DataRequired()])
class SearchForm(Form):
search = StringField('search', validators=[DataRequired()])
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | app/forms.py | Stephanie-Spears/Microblog-Flask |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v2beta2_horizontal_pod_autoscaler_list import V2beta2HorizontalPodAutoscalerList # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV2beta2HorizontalPodAutoscalerList(unittest.TestCase):
"""V2beta2HorizontalPodAutoscalerList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta2HorizontalPodAutoscalerList(self):
"""Test V2beta2HorizontalPodAutoscalerList"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v2beta2_horizontal_pod_autoscaler_list.V2beta2HorizontalPodAutoscalerList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | kubernetes_asyncio/test/test_v2beta2_horizontal_pod_autoscaler_list.py | PidgeyBE/kubernetes_asyncio |
import tensorflow as tf
import cv2 as cv
import numpy as np
from PIL import Image
from core import utils
classesPath = "../../data/coco.names"
modelPath = "../../checkpoint/yolov3_cpu_nms.pb"
IMAGE_H, IMAGE_W = 416, 416
classes = utils.read_coco_names(classesPath)
num_classes = len(classes)
input_tensor, output_tensors = utils.read_pb_return_tensors(tf.get_default_graph(), modelPath,
["Placeholder:0", "concat_9:0", "mul_6:0"])
class YoloV3Net:
def __init__(self):
self.sess = tf.Session()
def run(self, img):
# Processing frame
img_resized = self._preprocessFrame(img)
boxes, scores = self.sess.run(output_tensors, feed_dict={input_tensor: np.expand_dims(img_resized, axis=0)})
boxes, scores, labels = utils.cpu_nms(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.5)
# Keeping only box labelled "person"
if boxes is not None:
boxes = self._getOnlyDetectedPeople(boxes, labels)
return boxes
def __del__(self):
self.sess.close()
@staticmethod
def _getOnlyDetectedPeople(boxes, labels):
pBoxes = []
for i in np.arange(len(boxes)):
if labels[i] == 0:
pBoxes.append(boxes[i])
return pBoxes
@staticmethod
def _preprocessFrame(frame):
frameRGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
image = Image.fromarray(frameRGB)
img_resized = np.array(image.resize(size=(IMAGE_H, IMAGE_W)), dtype=np.float32)
return img_resized / 255.
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | detection/yolov3/yolov3.py | benoitLemoine/stage2A |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 IBM Corporation
Licensed under the Apache License, Version 2.0 (the “License”);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* Rafael Sene <rpsene@br.ibm.com>
* Daniel Kreling <dbkreling@br.ibm.com>
* Roberto Oliveira <rdutra@br.ibm.com>
"""
import unittest
import csv
from cpi import core
class CoreTests(unittest.TestCase):
""" Class to run tests from core """
def test_execute(self):
self.assertEqual(0, core.execute("cd"))
self.assertNotEqual(0, core.execute("foo_bar"))
def test_execute_stdout(self):
status, output = core.execute_stdout("cd")
self.assertEqual(0, status)
self.assertEqual("", output)
status, output = core.execute_stdout("ls /bla/foo")
self.assertNotEqual(0, status)
self.assertIn(b"No such file or directory", output)
def test_cmdexist(self):
assert core.cmdexists("cd") is True
assert core.cmdexists("foo_bar") is False
def test_get_processor(self):
self.assertEqual("POWER8", core.get_processor())
def test_supported_processor(self):
assert core.supported_processor("POWER7") is False
assert core.supported_processor("POWER8") is True
def test_percentage(self):
self.assertEqual("100.00", core.percentage(10, 20))
self.assertEqual("-50.00", core.percentage(20, 10))
self.assertEqual("0.00", core.percentage(10, 10))
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | tests/core_test.py | ToThAc/cpi-breakdown |
from concurrent import futures
import time
import grpc
import app.helloworld_pb2 as helloworld_pb2
import app.helloworld_pb2_grpc as helloworld_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def Greet(self, request, context):
print('Saying `hello` to %s' % request.name)
return helloworld_pb2.GreetResponse(message='Hello, {}!'.format(request.name))
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | app/server.py | betandr/grpcdemo |
import pytest
from thefuck.rules.git_rebase_no_changes import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def output():
return '''Applying: Test commit
No changes - did you forget to use 'git add'?
If there is nothing left to stage, chances are that something else
already introduced the same changes; you might want to skip this patch.
When you have resolved this problem, run "git rebase --continue".
If you prefer to skip this patch, run "git rebase --skip" instead.
To check out the original branch and stop rebasing, run "git rebase --abort".
'''
def test_match(output):
assert match(Command('git rebase --continue', output))
assert not match(Command('git rebase --continue', ''))
assert not match(Command('git rebase --skip', ''))
def test_get_new_command(output):
assert (get_new_command(Command('git rebase --continue', output)) ==
'git rebase --skip')
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | tests/rules/test_git_rebase_no_changes.py | HiteshMah-Jan/thefuck |
from uuid import uuid4
class SpanContext:
def __init__(self, trace_id: str = None, span_id: str = None):
self.__trace_id = trace_id if trace_id else uuid4().hex
self.__span_id = span_id if span_id else uuid4().hex[16:]
@property
def trace_id(self) -> str:
return self.__trace_id
@property
def span_id(self) -> str:
return self.__span_id
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | trace/spancontext.py | iredelmeier/doughknots |
from typing import Generic, TypeVar, List, Optional
T = TypeVar('T')
class Stack(Generic[T]):
def __init__(self):
self.items: List[T] = []
def empty(self) -> bool:
return len(self.items) == 0
def push(self, item: T):
self.items.append(item)
def pop(self) -> T:
return self.items.pop()
def peek(self, default: Optional[T] = None) -> Optional[T]:
if len(self.items) > 0:
return self.items[-1]
return default
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | stx/utils/stack.py | bakasoft/stx |
"""
The SerialDevice class:
a device that communicates through the serial port.
"""
from device import Device
import serial
import ctypes
from serial.tools import list_ports
__all__ = ['SerialDevice']
class SerialDevice(Device):
'''
A device that communicates through the serial port.
'''
def __init__(self, name = None):
Device.__init__(self)
# Open the serial port
self.port = serial.Serial()
if name is None: # looks for USB serial device
for port in list_ports.grep('USB Serial Port'):
name,_,_ = port
break # just the first one
self.port.port = name
def __del__(self):
self.port.close()
del self.port
def CRC_16(self, butter, length):
# Calculate CRC-16 checksum based on the data sent
#
crc_polynom = 0x1021
crc = 0
n = 0
lll = length
while (lll > 0):
crc = crc ^ butter[n] << 8
for _ in range(8):
if (crc & 0x8000):
crc = crc << 1 ^ crc_polynom
else:
crc = crc << 1
lll -= 1
n += 1
crc_high = ctypes.c_ubyte(crc >> 8)
crc_low = ctypes.c_ubyte(crc)
return (crc_high.value, crc_low.value)
if __name__ == '__main__':
for port in list_ports.comports():
print(port)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | OPTIMAQS/controller/controller/serialdevice.py | jeremyforest/whole_optic_gui |
from airypi.remote_obj import send_to_device
def multi():
send_to_device({'type': 'transaction',
'func': 'begin'})
def execute():
send_to_device({'type': 'transaction',
'func': 'execute'})
def sleep(duration):
send_to_device({'type': 'transaction',
'func': 'sleep',
'args': {'duration': duration}}) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | airypi/action_queue.py | airypi/airypi |
# Copyright (C) 2013-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This test case is to test the speed of GDB when it is analyzing the
# function prologue.
from perftest import perftest
class SkipPrologue(perftest.TestCaseWithBasicMeasurements):
def __init__(self, count):
super(SkipPrologue, self).__init__("skip-prologue")
self.count = count
def _test(self):
for _ in range(1, self.count):
# Insert breakpoints on function f1 and f2.
bp1 = gdb.Breakpoint("f1")
bp2 = gdb.Breakpoint("f2")
# Remove them.
bp1.delete()
bp2.delete()
def warm_up(self):
self._test()
def execute_test(self):
for i in range(1, 4):
gdb.execute("set code-cache off")
gdb.execute("set code-cache on")
self.measure.measure(self._test, i)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | contrib/gnu/gdb/dist/gdb/testsuite/gdb.perf/skip-prologue.py | TheSledgeHammer/2.11BSD |
# coding=utf-8
"""
The Landinge Page actions API endpoint
Documentation: https://mailchimp.com/developer/reference/landing-pages/
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class LandingPageAction(BaseApi):
"""
Manage your Landing Pages, including publishing and unpublishing.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(LandingPageAction, self).__init__(*args, **kwargs)
self.endpoint = 'landing-pages'
self.page_id = None
def publish(self, page_id):
"""
Publish a landing page that is in draft, unpublished, or has been
previously published and edited.
:param page_id: The unique id for the page.
:type page_id: :py:class:`str`
"""
self.page_id = page_id
return self._mc_client._post(url=self._build_path(page_id, 'actions/publish'))
def unpublish(self, page_id):
"""
Unpublish a landing page that is in draft or has been published.
:param page_id: The unique id for the page.
:type page_id: :py:class:`str`
"""
self.page_id = page_id
return self._mc_client._post(url=self._build_path(page_id, 'actions/unpublish'))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | mailchimp3/entities/landingpageaction.py | michaelwalkerfl/python-mailchimp |
from lemur.plugins.bases import DestinationPlugin
class TestDestinationPlugin(DestinationPlugin):
title = 'Test'
slug = 'test-destination'
description = 'Enables testing'
author = 'Kevin Glisson'
author_url = 'https://github.com/netflix/lemur.git'
def __init__(self, *args, **kwargs):
super(TestDestinationPlugin, self).__init__(*args, **kwargs)
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
return
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | lemur/tests/plugins/destination_plugin.py | x-lhan/lemur |
import threading
# Thread running server processing loop
class ServerThread(threading.Thread):
"""
A helper class to run server in a thread.
The following snippet runs the server for 4 seconds and quit::
server = SimpleServer()
server_thread = ServerThread(server)
server_thread.start()
time.sleep(4)
server_thread.stop()
"""
def __init__(self, server):
"""
:param server: :class:`pcaspy.SimpleServer` object
"""
super(ServerThread, self).__init__()
self.server = server
self.running = True
def run(self):
"""
Start the server processing
"""
while self.running:
self.server.process(0.1)
def stop(self):
"""
Stop the server processing
"""
self.running = False
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | pcaspy/tools.py | dchabot/python-pcaspy |
import pymongo
from bson import ObjectId
from src.services import config
collection = config.db.incomes
def search_by_user_email(user_email, itype):
return collection.find({"user_email": user_email, "itype": itype})
def sum_amounts_by_user(user_email, itype):
pipeline = [{"$match": {"user_email": user_email, "itype": itype}}, {"$group": {"_id": "null", "total": {"$sum": "$amount"}}}]
return collection.aggregate(pipeline)
def save(income):
collection.insert_one(income.__dict__)
def save_all(incomes):
collection.insert_many(incomes)
def update(income_id, income):
collection.find_one_and_update(
{"_id": ObjectId(income_id)},
{"$set": income.__dict__},
upsert=True)
def delete(income_id):
collection.delete_one({"_id": ObjectId(income_id)})
def filter(user_email, category, date, account, itype):
pipeline = [{
"$match": {
"user_email": user_email,
"category": category,
"date": date,
"account": account,
"itype": itype
}},
{"$sort": {"date": pymongo.DESCENDING}}
]
return collection.aggregate(pipeline)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | src/services/incomesService.py | TTIP-UNQ-Team6/gastapp_back |
import os
import codecs
import re
from setuptools import setup
def read(*parts):
return codecs.open(os.path.join(os.path.dirname(__file__), *parts)).read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django-sorter',
version=find_version("sorter", "__init__.py"),
description='A helper app for sorting objects in Django templates.',
long_description=read('README.rst'),
author='Jannis Leidel',
author_email='jannis@leidel.info',
license='BSD',
url='https://django-sorter.readthedocs.io/',
packages=['sorter', 'sorter.templatetags'],
package_data={
'sorter': [
'templates/sorter/*.html',
'locale/*/*/*',
],
},
classifiers=[
"Development Status :: 4 - Beta",
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
install_requires=[
'django-appconf >= 0.4',
'django-ttag >= 2.3',
'URLObject >= 2.0.1',
],
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | setup.py | jazzband/django-sorter |
import pandas
import os
path, _ = os.path.split(__file__)
kidera_factors = pandas.read_csv(os.path.join(path, 'kidera.csv'),
header=None,
index_col=0)
symbol_lookup = { 'ALA': 'A', 'ARG': 'R',
'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLN': 'Q',
'GLU': 'E', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I',
'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F',
'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W',
'TYR': 'Y', 'VAL': 'V' }
kidera_factors.index = kidera_factors.index \
.map(lambda x: symbol_lookup[x])
def score_positions(sequence):
return kidera_factors.loc[list(sequence)]
def score_sequence(sequence):
return kidera_factors.loc[list(sequence)].sum() / len(sequence)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | Kidera/kidera.py | IdoSpringer/TCR-PEP-Classification |
from invoke import task
@task
def precheck(ctx):
ctx.run("black .")
ctx.run("pre-commit run -a")
ctx.run("interrogate -c pyproject.toml", pty=True)
@task
def clean(ctx):
ctx.run("python setup.py clean")
ctx.run("rm -rf netcfgbu.egg-info")
ctx.run("rm -rf .pytest_cache .pytest_tmpdir .coverage")
ctx.run("rm -rf htmlcov")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tasks.py | vivekvashist/demo-beginner-concurrency |
# Copyright (c) 2019 NVIDIA Corporation
import torch
import torch.nn as nn
from nemo.backends.pytorch.nm import LossNM
from nemo.core.neural_types import (NeuralType, AxisType, BatchTag, TimeTag,
ChannelTag)
class CTCLossNM(LossNM):
"""
Neural Module wrapper for pytorch's ctcloss
Args:
num_classes (int): Number of characters in ASR model's vocab/labels.
This count should not include the CTC blank symbol.
"""
@staticmethod
def create_ports():
input_ports = {
"log_probs": NeuralType({1: AxisType(TimeTag),
0: AxisType(BatchTag),
2: AxisType(ChannelTag)}),
"targets": NeuralType({0: AxisType(BatchTag),
1: AxisType(TimeTag)}),
"input_length": NeuralType({0: AxisType(BatchTag)}),
"target_length": NeuralType({0: AxisType(BatchTag)})
}
output_ports = {"loss": NeuralType(None)}
return input_ports, output_ports
def __init__(self, *, num_classes, **kwargs):
LossNM.__init__(self, **kwargs)
# self._blank = self.local_parameters.get('blank', 0)
self._blank = num_classes
self._criterion = nn.CTCLoss(blank=self._blank,
reduction='none')
def _loss(self, log_probs, targets, input_length, target_length):
input_length = input_length.long()
target_length = target_length.long()
targets = targets.long()
loss = self._criterion(log_probs.transpose(1, 0), targets,
input_length,
target_length)
# note that this is different from reduction = 'mean'
# because we are not dividing by target lengths
loss = torch.mean(loss)
return loss
def _loss_function(self, **kwargs):
return self._loss(*(kwargs.values()))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | collections/nemo_asr/nemo_asr/losses.py | Giuseppe5/NeMo |
import sys
sys.path.append('../python-mbus')
import pytest
from mbus import MBus
@pytest.fixture
def mbus_tcp():
return MBus.MBus(host="127.0.0.1")
def test_connect(mbus_tcp):
mbus_tcp.connect()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | tests/test_MBus_connect.py | droid4control/python-mbus |
class Solution:
# my solution
def prefixesDivBy5(self, A: List[int]) -> List[bool]:
res = []
tmp = 0
for n in A:
tmp = tmp * 2 + n
res.append(tmp % 5 == 0)
return res
# faster solution
# https://leetcode.com/problems/binary-prefix-divisible-by-5/discuss/265509/Python-Calculate-Prefix-Mod
def prefixesDivBy5(self, A: List[int]) -> List[bool]:
B = A[:]
for i in range(1, len(B)):
B[i] += B[i-1] * 2 % 5
return [b % 5 == 0 for b in B] | [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"an... | 3 | LeetCode/easy - Array/1018. Binary Prefix Divisible By 5/solution.py | vincent507cpu/Comprehensive-Algorithm-Solution |
from django.http import HttpResponse
from django.utils.encoding import force_str
from form_designer.contrib.exporters import FormLogExporterBase
try:
import xlwt
except ImportError: # pragma: no cover
XLWT_INSTALLED = False
else: # pragma: no cover
XLWT_INSTALLED = True
class XlsExporter(FormLogExporterBase):
@staticmethod
def export_format():
return 'XLS'
@staticmethod
def is_enabled():
return XLWT_INSTALLED
def init_writer(self):
self.wb = xlwt.Workbook()
self.ws = self.wb.add_sheet(force_str(self.model._meta.verbose_name_plural))
self.rownum = 0
def init_response(self):
self.response = HttpResponse(content_type='application/ms-excel')
self.response['Content-Disposition'] = 'attachment; filename=%s.xls' % (
self.model._meta.verbose_name_plural
)
def writerow(self, row):
for i, f in enumerate(row):
self.ws.write(self.rownum, i, force_str(f))
self.rownum += 1
def close(self):
self.wb.save(self.response)
def export(self, request, queryset=None):
return super().export(request, queryset)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | form_designer/contrib/exporters/xls_exporter.py | kcsry/django-form-designer |
# ============================================================================
# FILE: matcher_full_fuzzy.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
from deoplete.base.filter import Base
from deoplete.util import fuzzy_escape, Nvim, UserContext, Candidates
class Filter(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.name = 'matcher_full_fuzzy'
self.description = 'full fuzzy matcher'
def filter(self, context: UserContext) -> Candidates:
complete_str = context['complete_str']
if context['ignorecase']:
complete_str = complete_str.lower()
p = re.compile(fuzzy_escape(complete_str, context['camelcase']))
if context['ignorecase']:
return [x for x in context['candidates']
if p.search(x['word'].lower())]
else:
return [x for x in context['candidates']
if p.search(x['word'])]
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer"... | 3 | rplugin/python3/deoplete/filter/matcher_full_fuzzy.py | kazufusa/deoplete.nvim |
import string
import random
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.chat import ChatGarbler
class ToonChatGarbler(ChatGarbler.ChatGarbler):
animalSounds = {'dog': TTLocalizer.ChatGarblerDog,
'cat': TTLocalizer.ChatGarblerCat,
'mouse': TTLocalizer.ChatGarblerMouse,
'horse': TTLocalizer.ChatGarblerHorse,
'rabbit': TTLocalizer.ChatGarblerRabbit,
'duck': TTLocalizer.ChatGarblerDuck,
'monkey': TTLocalizer.ChatGarblerMonkey,
'bear': TTLocalizer.ChatGarblerBear,
'pig': TTLocalizer.ChatGarblerPig,
'deer': TTLocalizer.ChatGarblerDeer,
'default': OTPLocalizer.ChatGarblerDefault}
def garble(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = random.randint(1, 7)
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
def garbleSingle(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = 1
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | toontown/chat/ToonChatGarbler.py | philicheese2003/ToontownProjectAltisServer |
"""String utilities.
"""
# Author Info
__author__ = 'Vishwajeet Ghatage'
__date__ = '17/07/21'
__email__ = 'cloudmail.vishwajeet@gmail.com'
# Library Imports
import random
import string
from datetime import datetime
# Own Imports
from src import settings
def verification_code() -> str:
"""Generate verification code.
Returns:
---------
Alphanumeric code of length 'src.settings.VERIFICATION_CODE_LENGTH'
"""
data = string.ascii_uppercase + string.digits
code = ''.join([random.choice(data) for _ in range(settings.VERIFICATION_CODE_LENGTH)])
return code
def file_name(name: str, key: str) -> str:
"""Generate file name.
Generate a random file name.
Arguments:
---------
name: File name.
key: Key to add for randomness.
Returns:
---------
File name based on current UTC timestamp and specified key.
"""
return datetime.utcnow().strftime(f'%Y%m%d%H%M%S{key}') + '.' + name.split('.')[1]
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"an... | 3 | src/utils/string_utils.py | codespacedot/CodeSpaceAPI |
import numpy as np
import haiku as hk
import jax
import jax.numpy as jnp
class Actor(hk.Module):
def __init__(self,action_size,node=256,hidden_n=2):
super(Actor, self).__init__()
self.action_size = action_size
self.node = node
self.hidden_n = hidden_n
self.layer = hk.Linear
def __call__(self,feature: jnp.ndarray) -> jnp.ndarray:
action = hk.Sequential(
[
self.layer(self.node) if i%2 == 0 else jax.nn.relu for i in range(2*self.hidden_n)
] +
[
self.layer(self.action_size[0]),
jax.nn.tanh
]
)(feature)
return action
class Critic(hk.Module):
def __init__(self,node=256,hidden_n=2):
super(Critic, self).__init__()
self.node = node
self.hidden_n = hidden_n
self.layer = hk.Linear
def __call__(self,feature: jnp.ndarray,actions: jnp.ndarray) -> jnp.ndarray:
concat = jnp.concatenate([feature,actions],axis=1)
q_net = hk.Sequential(
[
self.layer(self.node) if i%2 == 0 else jax.nn.relu for i in range(2*self.hidden_n)
] +
[
self.layer(1)
]
)(concat)
return q_net | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | haiku_baselines/TD3/network.py | TinkTheBoush/haiku-baseline |
# pass test
import numpy as np
def prepare_input(input_size):
return [np.random.rand(input_size), np.random.rand(input_size)]
def test_function(input_data):
return np.convolve(input_data[0], input_data[1])
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | tests/python/tests/conv/test.py | Ashymad/praca.inz |
from typing import Optional, List
import logging
import torch
logger = logging.getLogger(__name__)
class Batch:
def __init__(
self,
x: torch.Tensor,
y: torch.Tensor,
x_len: Optional[torch.Tensor] = None,
y_len: Optional[torch.Tensor] = None,
paths: Optional[List[str]] = None
):
self.x = x
self.y = y
self.x_len = x_len
self.y_len = y_len
self.paths: List[str] = paths
def cuda(self, non_blocking=False):
self.x = self.x.to(device='cuda', non_blocking=non_blocking)
self.y = self.y.to(device='cuda', non_blocking=non_blocking)
self.x_len = self.x_len.to(device='cuda', non_blocking=non_blocking)
self.y_len = self.y_len.to(device='cuda', non_blocking=non_blocking)
return self
def cpu(self):
self.x = self.x.cpu()
self.y = self.y.cpu()
self.x_len = self.x_len.cpu()
self.y_len = self.y_len.cpu()
return self
@property
def size(self) -> int:
return self.x.shape[0]
def __repr__(self):
lines = []
for attr, value in self.__dict__.items():
if value is not None:
lines.append(f"Attr: {attr}:")
if isinstance(value, torch.Tensor):
lines.append("Shape: {}".format(value.shape))
lines.append(repr(value))
lines.append("\n")
return "\n".join(lines)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | src/data/batch.py | marka17/digit-recognition |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import uuid
from st2common import log as logging
from st2common.runners.base import get_metadata as get_runner_metadata
from winrm_runner.winrm_base import WinRmBaseRunner
__all__ = [
'WinRmPsCommandRunner',
'get_runner',
'get_metadata'
]
LOG = logging.getLogger(__name__)
RUNNER_COMMAND = 'cmd'
class WinRmPsCommandRunner(WinRmBaseRunner):
def run(self, action_parameters):
powershell_command = self.runner_parameters[RUNNER_COMMAND]
# execute
return self.run_ps(powershell_command)
def get_runner():
return WinRmPsCommandRunner(str(uuid.uuid4()))
def get_metadata():
metadata = get_runner_metadata('winrm_runner')
metadata = [runner for runner in metadata if
runner['runner_module'] == __name__.split('.')[-1]][0]
return metadata
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | contrib/runners/winrm_runner/winrm_runner/winrm_ps_command_runner.py | nickbaum/st2 |
import ciso8601
import dateutil.parser
from cartographer.field_types import SchemaAttribute
from cartographer.utils.datetime import as_utc, make_naive
class DateAttribute(SchemaAttribute):
@classmethod
def format_value_for_json(cls, value):
return as_utc(value).isoformat()
def from_json(self, serialized_value):
if self.is_nullable and serialized_value is None:
return None
try:
# ciso8601 is significantly faster than dateutil.parser for parsing iso8601 strings, so we try it first
parsed_value = ciso8601.parse_datetime(serialized_value)
assert parsed_value is not None # Caveat: asserts won't run if python is run with -O.
except Exception as e:
parsed_value = dateutil.parser.parse(serialized_value)
return make_naive(parsed_value)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | cartographer/field_types/date_attribute.py | Patreon/cartographer |
# -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from startapp.user.models import User
class LoginForm(Form):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
}... | 3 | startapp/public/forms.py | samsonpaul/startapp |
from dagster import check
from .system import SystemStepExecutionContext
class StepExecutionContext(object):
__slots__ = ['_system_step_execution_context', '_legacy_context']
def __init__(self, system_step_execution_context):
self._system_step_execution_context = check.inst_param(
system_step_execution_context,
'system_step_execution_context',
SystemStepExecutionContext,
)
@property
def file_manager(self):
return self._system_step_execution_context.file_manager
@property
def resources(self):
return self._system_step_execution_context.resources
@property
def run_id(self):
return self._system_step_execution_context.run_id
@property
def environment_dict(self):
return self._system_step_execution_context.environment_dict
@property
def pipeline_def(self):
return self._system_step_execution_context.pipeline_def
@property
def mode_def(self):
return self._system_step_execution_context.mode_def
@property
def log(self):
return self._system_step_execution_context.log
@property
def solid_handle(self):
return self._system_step_execution_context.solid_handle
@property
def solid(self):
return self._system_step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self):
return self._system_step_execution_context.pipeline_def.get_solid(
self.solid_handle
).definition
def has_tag(self, key):
return self._system_step_execution_context.has_tag(key)
def get_tag(self, key):
return self._system_step_execution_context.get_tag(key)
def get_system_context(self):
'''
This allows advanced users (e.g. framework authors) to punch through
to the underlying system context.
'''
return self._system_step_execution_context
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | python_modules/dagster/dagster/core/execution/context/step.py | jake-billings/dagster |
"""The tests for day17."""
from days import day17
from ddt import ddt, data, unpack
import unittest
import helpers
@ddt
class MyTestCase(unittest.TestCase): # noqa D101
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '57'])
@unpack
def test_example_a(self, test_input, expected): # noqa D102
result = day17.part_a(test_input)
self.assertEqual(result, expected)
def test_answer_part_a(self): # noqa D102
result = day17.part_a(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '38021')
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '29'])
@unpack
def test_example_b(self, test_input, expected): # noqa D102
result = day17.part_b(test_input)
self.assertEqual(result, expected)
def test_answer_part_b(self): # noqa D102
result = day17.part_b(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '32069')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | test/test_day17.py | frangiz/AdventOfCode2018 |
# encoding: utf-8
from .item import Item
from .mix import ConfigurationMixIn, DeletionMixIn, DescriptionMixIn
class Views(Item):
'''
classdocs
'''
def __init__(self, owner):
'''
Constructor
'''
self.owner = owner
super().__init__(owner.jenkins, owner.url)
def get(self, name):
for item in self.api_json(tree='views[name,url]')['views']:
if name == item['name']:
return self._new_instance_by_item(__name__, item)
return None
def create(self, name, xml):
self.handle_req('POST', 'createView', params={'name': name},
headers=self.headers, data=xml)
def __iter__(self):
for item in self.api_json(tree='views[name,url]')['views']:
yield self._new_instance_by_item(__name__, item)
class View(Item, ConfigurationMixIn, DescriptionMixIn, DeletionMixIn):
def get_job(self, name):
for item in self.api_json(tree='jobs[name,url]')['jobs']:
if name == item['name']:
return self._new_instance_by_item('api4jenkins.job', item)
return None
def __iter__(self):
for item in self.api_json(tree='jobs[name,url]')['jobs']:
yield self._new_instance_by_item('api4jenkins.job', item)
def include(self, name):
self.handle_req('POST', 'addJobToView', params={'name': name})
def exclude(self, name):
self.handle_req('POST', 'removeJobFromView', params={'name': name})
class AllView(View):
def __init__(self, jenkins, url):
super().__init__(jenkins, url + 'view/all/')
class MyView(View):
pass
class ListView(View):
pass
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | api4jenkins/view.py | pquentin/api4jenkins |
from sys import argv, stdin
def cut(input_file, *args):
options = process_options(*args)
delimiter = d_option(options["-d"])
lines = input_file.readlines()
columns = [item.split(delimiter) for item in lines]
scope = f_option(options["-f"], len(columns[0]))
out_scope = []
for x in scope:
out_scope.append([column[x] for column in columns])
pr = []
for line in range(len(out_scope[0])):
for rec in out_scope:
pr.append(rec[line].strip())
print(delimiter.join(pr), end='')
pr.clear()
print()
def process_options(options):
out_opt = dict()
last_key = ""
for option in options:
if option.startswith('-'):
out_opt[option] = ""
last_key = option
else:
out_opt[last_key] = option
return out_opt
def f_option(params: str, file_size: int):
if not params:
return None
inp = params.split('-') if '-' in params else params
if '-' not in params and ',' not in params:
return int(params)
elif params.startswith('-'):
return [x for x in range(0, int(inp[1]))]
elif params.endswith('-'):
return [x - 1 for x in range(int(inp[0]), file_size + 1)]
elif ',' in params:
return [int(x) for x in params.split(',')]
else:
return [x - 1 for x in range(int(inp[0]), int(inp[1]) + 1)]
def d_option(params):
return params if params else ' '
cut(stdin, argv[1:]) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | SandBox/Practicals_05_Cut.py | MichalKyjovsky/NPRG065_Programing_in_Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
# Local script to test.
from fibonacci import fib
class FibonacciTest(unittest.TestCase):
def test(self):
self.assertEqual(0, fib(0))
self.assertEqual(1, fib(1))
self.assertEqual(1, fib(2))
self.assertEqual(2, fib(3))
self.assertEqual(3, fib(4))
self.assertEqual(5, fib(5))
self.assertEqual(8, fib(6))
self.assertEqual(13, fib(7))
self.assertEqual(21, fib(8))
self.assertEqual(34, fib(9))
self.assertEqual(55, fib(10))
self.assertEqual(89, fib(11))
self.assertEqual(144, fib(12))
def test_invalid_argument(self):
try:
fib(1.0)
self.fail("Did not raise AssertionError")
except AssertionError:
pass
try:
fib("1000")
except AssertionError:
pass
else:
self.fail("Did not raise AssertionError")
if __name__ == '__main__':
unittest.main()
# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | src/fibonacci_test2.py | skitazaki/python-school-ja |
import quart
from views import city_api
from views import home
from config import settings
import services.weather_service
import services.sun_service
import services.location_service
app = quart.Quart(__name__)
is_debug = True
app.register_blueprint(home.blueprint)
app.register_blueprint(city_api.blueprint)
def configure_app():
mode = 'dev' if is_debug else 'prod'
data = settings.load(mode)
services.weather_service.global_init(data.get('weather_key'))
services.sun_service.use_cached_data = data.get('use_cached_data')
services.location_service.use_cached_data = data.get('use_cached_data')
print("Using cached data? {}".format(data.get('use_cached_data')))
def run_web_app():
app.run(debug=is_debug, port=5001)
configure_app()
if __name__ == '__main__':
run_web_app()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | src/10-async-web/acityscape_api/app.py | NissesSenap/async-techniques-python-course |
__author__ = 'patras'
from domain_searchAndRescue import *
from timer import DURATION
from state import state
def GetCostOfMove(r, l1, l2, dist):
return dist
DURATION.TIME = {
'giveSupportToPerson': 15,
'clearLocation': 5,
'inspectPerson': 20,
'moveEuclidean': GetCostOfMove,
'moveCurved': GetCostOfMove,
'moveManhattan': GetCostOfMove,
'fly': 15,
'inspectLocation': 5,
'transfer': 2,
'replenishSupplies': 4,
'captureImage': 2,
'changeAltitude': 3,
'deadEnd': 1,
}
DURATION.COUNTER = {
'giveSupportToPerson': 15,
'clearLocation': 5,
'inspectPerson': 20,
'moveEuclidean': GetCostOfMove,
'moveCurved': GetCostOfMove,
'moveManhattan': GetCostOfMove,
'fly': 15,
'inspectLocation': 5,
'transfer': 2,
'replenishSupplies': 4,
'captureImage': 2,
'changeAltitude': 3,
'deadEnd': 1,
}
rv.WHEELEDROBOTS = ['w1', 'w2']
rv.DRONES = ['a1']
rv.OBSTACLES = { (24, 21)}
def ResetState():
state.loc = {'w1': (24,19), 'w2': (23,29), 'p1': (12,21), 'a1': (24,10)}
state.hasMedicine = {'a1': 0, 'w1': 5, 'w2': 0}
state.robotType = {'w1': 'wheeled', 'a1': 'uav', 'w2': 'wheeled'}
state.status = {'w1': 'free', 'w2': 'free', 'a1': UNK, 'p1': UNK, (12,21): UNK}
state.altitude = {'a1': 'high'}
state.currentImage = {'a1': None}
state.realStatus = {'w1': 'OK', 'p1': 'OK', 'w2': 'OK', 'a1': OK, (12, 21): 'hasDebri'}
state.realPerson = {(12,21): 'p1'}
state.newRobot = {1: None}
state.weather = {(12,21): "rainy"}
tasks = {
6: [['survey', 'a1', (12,21)]]
}
eventsEnv = {
} | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | problems/SR/auto/problem35_SR.py | sunandita/ICAPS_Summer_School_RAE_2020 |
from typing import Iterable, Optional
from app.integrations.mailchimp import exceptions
from app.integrations.mailchimp.http import MailchimpHTTP
from app.integrations.mailchimp.member import MailchimpMember
from users.models import User
class AppMailchimp:
def __init__(self):
self.http = MailchimpHTTP()
def subscribe_django_user(self, list_id: str, user: User, tags: Optional[Iterable] = None):
member = MailchimpMember.from_django_user(user)
self.mass_subscribe(
list_id=list_id,
members=[member],
)
if tags is not None:
self.set_tags(
list_id=list_id,
member=member,
tags=tags,
)
def mass_subscribe(self, list_id: str, members: Iterable[MailchimpMember]):
member_list = list()
for member in members:
member_list.append({
**member.to_mailchimp(),
'status': 'subscribed',
})
response = self.http.post(
url=f'lists/{list_id}',
payload={
'members': member_list,
'update_existing': True,
},
)
if len(response['errors']):
raise exceptions.MailchimpSubscriptionFailed(', '.join([f'{err["email_address"]}: {err["error"]} ({err["error_code"]})' for err in response['errors']]))
def set_tags(self, list_id: str, member: MailchimpMember, tags: Iterable[str]):
self.http.post(
url=f'/lists/{list_id}/members/{member.subscriber_hash}/tags',
payload={
'tags': [{'name': tag, 'status': 'active'} for tag in tags],
},
expected_status_code=204,
)
__all__ = [
AppMailchimp,
]
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritan... | 3 | src/app/integrations/mailchimp/client.py | tlgtaa/education-backend |
# swift_build_support/products/indexstoredb.py -------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import os
import platform
from . import product
from .. import shell
from .. import targets
class IndexStoreDB(product.Product):
@classmethod
def product_source_name(cls):
return "indexstore-db"
@classmethod
def is_build_script_impl_product(cls):
return False
def build(self, host_target):
run_build_script_helper('build', host_target, self, self.args)
def test(self, host_target):
if self.args.test and self.args.test_indexstoredb:
run_build_script_helper('test', host_target, self, self.args)
def run_build_script_helper(action, host_target, product, args):
script_path = os.path.join(
product.source_dir, 'Utilities', 'build-script-helper.py')
toolchain_path = args.install_destdir
if platform.system() == 'Darwin':
# The prefix is an absolute path, so concatenate without os.path.
toolchain_path += \
targets.darwin_toolchain_prefix(args.install_prefix)
configuration = 'debug' if args.build_variant == 'Debug' else 'release'
helper_cmd = [
script_path,
action,
'--verbose',
'--package-path', product.source_dir,
'--build-path', product.build_dir,
'--configuration', configuration,
'--toolchain', toolchain_path,
'--ninja-bin', product.toolchain.ninja,
]
shell.call(helper_cmd)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | utils/swift_build_support/swift_build_support/products/indexstoredb.py | orakaro/swift |
import numpy as np
from unittest import TestCase
from datumaro.components.project import Dataset
from datumaro.components.extractor import DatasetItem
from datumaro.plugins.image_dir import ImageDirConverter
from datumaro.util.test_utils import TestDir, test_save_and_load
class ImageDirFormatTest(TestCase):
def test_can_load(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((10, 6, 3))),
DatasetItem(id=2, image=np.ones((5, 4, 3))),
])
with TestDir() as test_dir:
test_save_and_load(self, dataset, ImageDirConverter.convert,
test_dir, importer='image_dir')
def test_relative_paths(self):
dataset = Dataset.from_iterable([
DatasetItem(id='1', image=np.ones((4, 2, 3))),
DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))),
DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))),
])
with TestDir() as test_dir:
test_save_and_load(self, dataset, ImageDirConverter.convert,
test_dir, importer='image_dir') | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | tests/test_image_dir_format.py | detecttechnologies/datumaro |
from unittest.mock import patch
import pytest
from model.agents.student.activities import IdleActivity, StudySessionActivity
__author__ = 'e.kolpakov'
class TestIdleActivity:
@pytest.mark.parametrize("length", [10, 15, 20, 3, 7, 11])
def test_activate_sends(self, student, env, length):
activity = IdleActivity(student, length, env)
# noinspection PyUnresolvedReferences
with patch.object(env, 'timeout') as patched_timeout:
env.process(activity.run(length))
env.run()
patched_timeout.assert_called_once_with(length)
class TestStudySessionActivity:
@pytest.fixture
def activity(self, student):
return StudySessionActivity(student) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | tests/agents/student/test_activities.py | e-kolpakov/study-model |
from circuit_ring_buffer import RingBuffer
'''
- 2021/12/20 ver.1.00
- Author : emguse
- License: MIT License
'''
class MovingAverage():
def __init__(self, length:int, zero_fill=True) -> None:
self.length = abs(length)
if self.length == 0:
self.length = 1
self.rb = RingBuffer(self.length)
if zero_fill == True:
for i in range(self.length):
self.rb.append(0)
def simple_moving_average(self, new_value) -> float:
self.rb.append(new_value)
sma = sum(self.rb.ring) / self.length
return sma
def main():
ma = MovingAverage(5)
l = [1,2,3,4,5,6,7,8,9,10]
for i in l:
print(ma.simple_moving_average(i))
if __name__ == "__main__":
main() | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | circuit_move_ave.py | emguse/Dec-2021 |
from d2dstore.manager import BaseManager
class StoreManager(BaseManager):
"""
Custom manager store model
"""
def __init__(self, *args, **kwargs):
super(StoreManager, self).__init__(*args, **kwargs)
def get_query_set(self):
query_set = super().get_queryset()
return query_set
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | apps/stores/manager.py | AJAkimana/py-store |
import unittest
from jackedCodeTimerPY import JackedTiming, _Record, JackedTimingError
class TestCodeTimer(unittest.TestCase):
def test__Record(self):
record = _Record()
record.start()
self.assertTrue(record.started)
record.stop()
self.assertFalse(record.started)
self.assertTrue(len(record.times) == 1)
self.assertTrue(record.times[0] > 0)
record.start()
with self.assertRaises(JackedTimingError):
record.start()
record.stop()
self.assertTrue(len(record.times) == 2)
self.assertTrue(record.times[1] > 0)
with self.assertRaises(JackedTimingError):
record.stop()
def test_JackedTiming(self):
timers = JackedTiming()
timers.start('first')
timers.start(_Record)
with self.assertRaises(JackedTimingError):
timers.start('first')
timers.stop('first')
with self.assertRaises(JackedTimingError):
timers.stop('first')
with self.assertRaises(JackedTimingError):
timers.report()
timers.stop(_Record)
timers.report()
timers.report(('first', 'second'))
if __name__ == '__main__': unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | tests.py | brianpallangyo/jackedCodeTimerPY |
import re
import jieba
import jieba.posseg as pseg
def split2sens(text):
pstop = re.compile(rf'[。!??!…]”*')
sens = []
stoplist = pstop.findall(text)
senlist = []
for sen in pstop.split(text):
if len(sen) == 0:
continue
senlist.append(sen)
for i, sen in enumerate(senlist):
try:
sen = sen + stoplist[i]
sens.append(sen)
except IndexError:
continue
return sens
def cut2words(text):
return jieba.lcut(text)
def cut2wpos(text, pos=None):
data = []
for w,p in pseg.cut(text):
if p == pos:
continue
data.append((w,p))
return data
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | nbt/splittext/splittext.py | fcoolish/All4NLP |
from re import X
from flask import Flask,render_template,url_for,request
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import models
import numpy as np
import pickle
french_tokenizer = pickle.load(open('french_tokenizer.pickle', 'rb'))
english_tokenizer = pickle.load(open('english_tokenizer.pickle', 'rb'))
model = models.load_model("translator_model.h5")
y_id_to_word = {value: key for key, value in french_tokenizer.word_index.items()}
y_id_to_word[0] = '<PAD>'
#y_id_to_word
app = Flask(__name__)
@app.route('/')
def hello_World():
return "Hello Soumya"
@app.route('/translator', methods = ['GET', 'POST'])
def eng_to_french():
message = request.args.get("message")
sentence = [english_tokenizer.word_index[word] for word in message.split()]
#sentence
sentence = pad_sequences([sentence], maxlen=15, padding='post')
sentences = np.array([sentence[0]])
predictions = model.predict(sentences, len(sentences))
x = ' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]])
if '<PAD>' in x:
x=x.replace('<PAD>','')
print(x)
return x
if __name__ == '__main__':
app.run(debug=True)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | Model prediction/app.py | choudhury722k/English-to-French-translator |
############################################################################################
# Title: IoT JumpWay Helpers
# Description: Helper functions for IoT JumpWay programs.
# Last Modified: 2018-06-09
############################################################################################
import os, json, cv2
from datetime import datetime
class Helpers():
def __init__(self):
pass
def loadConfigs(self):
with open("required/confs.json") as configs:
_configs = json.loads(configs.read())
return _configs
def saveImage(self,networkPath,frame):
timeDirectory = networkPath + "data/captures/"+datetime.now().strftime('%Y-%m-%d')+'/'+datetime.now().strftime('%H')
if not os.path.exists(timeDirectory):
os.makedirs(timeDirectory)
currentImage=timeDirectory+'/'+datetime.now().strftime('%M-%S')+'.jpg'
print(currentImage)
print("")
cv2.imwrite(currentImage, frame)
return currentImage | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | Caffe/CaffeNet/components/Helpers.py | BreastCancerAI/IDC-Classifier |
#
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License 2.0;
# you may not use this file except in compliance with the Elastic License 2.0.
#
import multiprocessing
from multiprocessing.queues import Queue
BATCH_SIZE = 100
class ConnectorQueue(Queue):
"""Class to support additional queue operations specific to the connector"""
def __init__(self, logger):
ctx = multiprocessing.get_context()
self.logger = logger
super(ConnectorQueue, self).__init__(ctx=ctx)
def end_signal(self):
"""Send an terminate signal to indicate the queue can be closed"""
signal_close = {"type": "signal_close"}
self.put(signal_close)
def put_checkpoint(self, key, checkpoint_time, indexing_type):
"""Put the checkpoint object in the queue which will be used by the consumer to update the checkpoint file
:param key: The key of the checkpoint dictionary
:param checkpoint_time: The end time that will be stored in the checkpoint as {'key': 'checkpoint_time'}
:param indexing_type: The type of the indexing i.e. Full or Incremental
"""
checkpoint = {
"type": "checkpoint",
"data": (key, checkpoint_time, indexing_type),
}
self.put(checkpoint)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | ees_sharepoint/connector_queue.py | elastic/enterprise-search-sharepoint-server-2016-connector |
from __future__ import division, generators, print_function
import torch
import torch.nn as nn
import macarico
import macarico.util as util
from macarico.util import Var, Varng
class BOWActor(macarico.Actor):
def __init__(self, attention, n_actions, act_history_length=1, obs_history_length=0):
self.att_dim = sum((att.dim for att in attention))
super().__init__(n_actions,
self.att_dim +
act_history_length * n_actions + \
obs_history_length * self.att_dim,
attention)
self.act_history_length = act_history_length
self.obs_history_length = obs_history_length
self._reset()
def _forward(self, state, x):
feats = x[:]
if self.act_history_length > 0:
f = util.zeros(self, 1, self.act_history_length * self.n_actions)
for i in range(min(self.act_history_length, len(state._trajectory))):
a = state._trajectory[-i]
f[0, i * self.n_actions + a] = 1
feats.append(Varng(f))
if self.obs_history_length > 0:
for i in range(self.obs_history_length):
feats.append(Varng(self.obs_history[(self.obs_history_pos+i) % self.obs_history_length]))
# update history
self.obs_history[self.obs_history_pos] = torch.cat(x, dim=1).data
self.obs_history_pos = (self.obs_history_pos + 1) % self.obs_history_length
return torch.cat(feats, dim=1)
def _reset(self):
self.obs_history = []
for _ in range(self.obs_history_length):
self.obs_history.append(util.zeros(self, 1, self.att_dim))
self.obs_history_pos = 0
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | macarico/actors/bow.py | bgalbraith/macarico |
# coding: utf-8
"""
spoonacular API
The spoonacular Nutrition, Recipe, and Food API allows you to access over 380,000 recipes, thousands of ingredients, 800,000 food products, and 100,000 menu items. Our food ontology and semantic recipe search engine makes it possible to search for recipes using natural language queries, such as \"gluten free brownies without sugar\" or \"low fat vegan cupcakes.\" You can automatically calculate the nutritional information for any recipe, analyze recipe costs, visualize ingredient lists, find recipes for what's in your fridge, find recipes based on special diets, nutritional requirements, or favorite ingredients, classify recipes into types and cuisines, convert ingredient amounts, or even compute an entire meal plan. With our powerful API, you can create many kinds of food and especially nutrition apps. Special diets/dietary requirements currently available include: vegan, vegetarian, pescetarian, gluten free, grain free, dairy free, high protein, whole 30, low sodium, low carb, Paleo, ketogenic, FODMAP, and Primal. # noqa: E501
The version of the OpenAPI document: 1.0
Contact: mail@spoonacular.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import spoonacular
from spoonacular.com.spoonacular.client.model.inline_response20053_results import InlineResponse20053Results # noqa: E501
from spoonacular.rest import ApiException
class TestInlineResponse20053Results(unittest.TestCase):
"""InlineResponse20053Results unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20053Results(self):
"""Test InlineResponse20053Results"""
# FIXME: construct object with mandatory attributes with example values
# model = spoonacular.models.inline_response20053_results.InlineResponse20053Results() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | test/test_inline_response20053_results.py | Lowe-Man/spoonacular-python-api |
import komand
from .schema import DeleteTagInput, DeleteTagOutput, Input, Output, Component
from ...util import project
class DeleteTag(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='delete_tag',
description=Component.DESCRIPTION,
input=DeleteTagInput(),
output=DeleteTagOutput())
def run(self, params={}):
project.Project(self.connection.config, params.get(Input.PROJECT_NAME)).get_tag(params.get(Input.ID)).delete()
return {}
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | viper/icon_viper/actions/delete_tag/action.py | killstrelok/insightconnect-plugins |
def sum(a, b):
"""Returns sum
Arguments:
a {int} -- Input 1
b {int} -- Input 2
Returns:
sum -- Sum
"""
return a + b
def difference(a, b):
"""Returns difference
Arguments:
a {int} -- Input 1
b {int} -- Input 2
Returns:
diff -- Difference
"""
return a - b
def product(a, b):
"""Returns product
Arguments:
a {int} -- Input 1
b {int} -- Input 2
Returns:
product -- Product
"""
return a * b
def quotient(a, b):
"""Returns quotient
Arguments:
a {float} -- Input 1
b {float} -- Input 2
Returns:
quotient -- Quotient
"""
if (b == 0):
return 0
return a / b
def modulus(a, b):
"""Returns modulus
Arguments:
a {int} -- Input 1
b {int} -- Input 2
Returns:
mod -- Modulus
"""
if (b == 0):
return 0
return a % b
def getInputs():
"""Accepts user inputs and returns them
Returns:
tuple -- set of inputs
"""
return int(input("Enter X: ")), int(input("Enter Y: "))
if __name__ == "__main__":
x, y = getInputs()
for i in range(5):
if (i == 0):
print(f"\nSum: {sum(x, y)}")
elif (i == 1):
print(f"Difference: {difference(x, y)}")
elif (i == 2):
print(f"Product: {product(x, y)}")
elif (i == 3):
print(f"Quotient: {quotient(float(x), float(y))}")
elif (i == 4):
print(f"Modulus: {modulus(x, y)}")
print("\nExiting...\n")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer"... | 3 | YearI/SemesterI/C++/Other/26-07-2019/numbers/main.py | sudiptog81/ducscode |
from netapp.netapp_object import NetAppObject
class DefaultGetIterKeyTd(NetAppObject):
"""
Key typedef for table ntdtest_multiple_with_default
"""
_key_2 = None
@property
def key_2(self):
"""
Field sfield3
"""
return self._key_2
@key_2.setter
def key_2(self, val):
if val != None:
self.validate('key_2', val)
self._key_2 = val
_key_1 = None
@property
def key_1(self):
"""
Field sfield2
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field sfield1
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "default-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-2',
'key-1',
'key-0',
]
def describe_properties(self):
return {
'key_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | generated-libraries/python/netapp/ntdtest/default_get_iter_key_td.py | radekg/netapp-ontap-lib-get |
#!/usr/bin/env python3
#!/usr/bin/python3
def reciprocal(n):
return 1.0 / n
def main():
v = 1
t = 1
l = t
for n in range(100):
r = reciprocal(t)
t = v + r
if l == t:
break
print(("t = %s" % str(t)))
l = t
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | python3/continued/continued1.py | jtraver/dev |
#
# VAZ Projects
#
#
# Author: Marcelo Tellier Sartori Vaz <marcelotsvaz@gmail.com>
from functools import partial
import re
from django.template import loader
def linkAttributes( self, tokens, index, options, env ):
'''
Add target and rel attributes to links.
'''
tokens[index].attrSet( 'rel', 'noopener' )
return self.renderToken( tokens, index, options, env )
def imageGalleryPlugin( md, markdownImages ):
'''
Plugin for rendering image galleries using Django UserImage.
Syntax: #[cssClass1 cssClass2](identifier1, identifier2, identifier3)
'''
md.block.ruler.before(
'paragraph',
'imageGallery',
partial( imageGallery, markdownImages = markdownImages ),
{ 'alt': [ 'paragraph', 'reference', 'blockquote', 'list' ] }
)
def imageGallery( state, startLine, endLine, silent, markdownImages ):
'''
Rule for image gallery.
'''
lineContent = state.getLines( startLine, startLine + 1, 0, False ).strip()
# Only run the regex if the first two characters match.
if not lineContent.startswith( '#[' ):
return False
match = re.match( r'^#\[(.*)\]\((.*)\)$', lineContent )
if not match:
return False
cssClasses = match[1]
identifiers = match[2]
if not silent:
state.line = startLine + 1
if identifiers.strip() == '*':
images = markdownImages
else:
identifiers = { identifier.strip() for identifier in identifiers.split( ',' ) }
images = [ image for image in markdownImages if image.identifier in identifiers ]
renderedTemplate = loader.render_to_string(
'commonApp/image_gallery.html',
{ 'images': images, 'cssClasses': cssClasses },
)
token = state.push( 'html_block', '', 0 )
token.content = renderedTemplate
token.map = [startLine, state.line]
return True | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lin... | 3 | application/commonApp/markdown_it_extensions.py | Marcelotsvaz/vaz-projects |
import os
from conda_build import api
def test_output_with_noarch_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch'] = 'python'
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_output_with_noarch_python_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch_python'] = True
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | tests/test_render.py | Bezier89/conda-build |
import gzip, zlib, base64
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def decompress_zlib(s):
return zlib.decompress(base64.decodestring(s), 15)
def zlib_compress(s):
return base64.encodestring(zlib.compress(s, 9))
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui |
import re
from dataclasses import dataclass
import parse
INTEGER_REGEX = r'-?[0-9]+(,[0-9]{3})*'
NUMBER_REGEX = rf'({INTEGER_REGEX}(\.[0-9]+)?|infinity|-infinity)'
@parse.with_pattern(NUMBER_REGEX)
def parse_number(text: str) -> float:
number_text = re.compile(NUMBER_REGEX).search(text)
return float(number_text[0].replace(',', ''))
RANGE_DELIMITER = '-'
RANGE_REGEX = rf'({NUMBER_REGEX})({RANGE_DELIMITER}({NUMBER_REGEX}))?'
@dataclass
class NumberRange:
high: float
low: float
@parse.with_pattern(RANGE_REGEX)
def parse_number_range(text: str) -> NumberRange:
range_text = re.compile(RANGE_REGEX).search(text)[0]
low_text = re.compile(NUMBER_REGEX).search(range_text)[0]
high_text = range_text.replace(f'{low_text}-', '')
return NumberRange(
high=parse_number(text=high_text),
low=parse_number(text=low_text)
)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | src/harness/cu_pass/dpa_calculator/helpers/parsers.py | NSF-Swift/Spectrum-Access-System |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The CreateSignaturePayload command for Binary Authorization signatures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container.binauthz import flags as binauthz_flags
from googlecloudsdk.command_lib.container.binauthz import util as binauthz_command_util
class CreateSignaturePayload(base.Command):
r"""Create a JSON container image signature object.
Given a container image URL specified by the manifest digest, this command
will produce a JSON object whose signature is expected by Cloud Binary
Authorization.
## EXAMPLES
To output serialized JSON to sign, run:
$ {command} \
--artifact-url="gcr.io/example-project/example-image@sha256:abcd"
"""
@classmethod
def Args(cls, parser):
binauthz_flags.AddArtifactUrlFlag(parser)
parser.display_info.AddFormat('object')
def Run(self, args):
# Dumping a bytestring to the object formatter doesn't output the string in
# PY3 but rather the repr of the object. Decoding it to a unicode string
# achieves the desired result.
payload_bytes = binauthz_command_util.MakeSignaturePayload(
args.artifact_url)
return payload_bytes.decode('utf-8')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | lib/surface/container/binauthz/create_signature_payload.py | google-cloud-sdk-unofficial/google-cloud-sdk |
# Django Imports
from django import forms
from django.utils.html import format_html
from django.utils.safestring import mark_safe
# Deprecated in Django 1.11
# forms.widgets.RadioChoiceInput
# https://docs.djangoproject.com/en/2.0/releases/1.11/#changes-due-to-the-introduction-of-template-based-widget-rendering
#class StarRatingRadioChoiceInput(forms.widgets.RadioChoiceInput):
class StarRatingRadioChoiceInput(object):
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
# TODO: some kind of double encoding is happening, somehow
#result = format_html(
# '<label{}></label>{}', label_for, self.tag(attrs)
#)
result = mark_safe('<label%s></label>%s' % (label_for, self.tag(attrs),))
return result
# Deprecated
#class StarRatingRadioChoiceFieldRenderer(forms.widgets.RadioFieldRenderer):
class StarRatingRadioChoiceFieldRenderer(object):
choice_input_class = StarRatingRadioChoiceInput
outer_html = '<span{id_attr} class="star-rating">{content}</span>'
inner_html = '{choice_value}'
class StarRatingRadioSelect(forms.RadioSelect):
renderer = StarRatingRadioChoiceFieldRenderer
def __init__(self, *args, **kwargs):
#super(StarRatingRadioSelect, self).__init__(choices=self.get_choices(min_value, max_value), *args, **kwargs)
super(StarRatingRadioSelect, self).__init__(*args, **kwargs)
def get_choices(self, min_value, max_value):
choices = [('', '',),]
for rating in range(min_value, max_value + 1):
choices.append((rating, rating,))
return choices
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | forms/widgets.py | goztrk/django-htk |
import torch
import numpy as np
def map_per_batch(fun, values, batch_indices):
result = []
for start, stop, value_slice in sliced_per_batch(values, batch_indices):
result.append(fun(start, stop, value_slice))
return torch.cat(result)
def sliced_per_batch(values, batch_indices):
slices = torch.where(batch_indices[:-1] - batch_indices[1:] != 0)[0] + 1
slices = slices.tolist()
slices = zip([0] + slices, slices + [batch_indices.shape[0]])
for start, stop in slices:
yield start, stop, values[start:stop]
def sliced_per_batch_np(values, batch_indices):
slices = np.where(batch_indices[:-1] - batch_indices[1:] != 0)[0] + 1
slices = slices.tolist()
slices = zip([0] + slices, slices + [batch_indices.shape[0]])
for start, stop in slices:
yield start, stop, values[start:stop]
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | leanai/core/indexed_tensor_helpers.py | penguinmenac3/leanai |
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import torch.nn as nn
class FeatureMatchingLoss(nn.Module):
r"""Compute feature matching loss"""
def __init__(self, criterion='l1'):
super(FeatureMatchingLoss, self).__init__()
if criterion == 'l1':
self.criterion = nn.L1Loss()
elif criterion == 'l2' or criterion == 'mse':
self.criterion = nn.MSELoss()
else:
raise ValueError('Criterion %s is not recognized' % criterion)
def forward(self, fake_features, real_features):
r"""Return the target vector for the binary cross entropy loss
computation.
Args:
fake_features (list of lists): Discriminator features of fake images.
real_features (list of lists): Discriminator features of real images.
Returns:
(tensor): Loss value.
"""
num_d = len(fake_features)
dis_weight = 1.0 / num_d
loss = fake_features[0][0].new_tensor(0)
for i in range(num_d):
for j in range(len(fake_features[i])):
tmp_loss = self.criterion(fake_features[i][j],
real_features[i][j].detach())
loss += dis_weight * tmp_loss
return loss
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": fals... | 3 | imaginaire/losses/feature_matching.py | hw07216/imaginaire |
from freezegun import freeze_time
from salesforce_timecard.core import TimecardEntry
import pytest
import json
@freeze_time("2020-9-18")
@pytest.mark.vcr()
@pytest.mark.block_network
def test_list_timecard():
te = TimecardEntry("tests/fixtures/cfg_user_password.json")
rs = te.list_timecard(False, "2020-09-14", "2020-09-20")
print(rs[0])
assert rs[0]["pse__Project_Name__c"] == "PX2143 - Company something"
@freeze_time("2020-9-18")
@pytest.mark.vcr()
@pytest.mark.block_network
def test_add_timecard():
te = TimecardEntry("tests/fixtures/cfg_user_password.json")
rs = te.add_time_entry("a6Z0J000000PhLaUAK", "Monday", 8, "Test Notes")
assert rs["success"] == True | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/test_timeentry.py | giuliocalzolari/pse_timecard |
#!/usr/bin/env python3
import serial
import re
from flask import Flask
app = Flask(__name__)
@app.route('/')
def return_heartbeat():
try:
data = readHeartbeat()
except Exception as err:
app.logger.error("Error: %s",err)
data = {'heartbeat': 'UNK', 'color': '#007fbf'}
return '''
<html>
<head>
<meta http-equiv="refresh" content="5">
<title>hearbeat</title>
</head>
<body>
<div style="background-color: transparent; font-size: 5em; text-align: center; font-family: Play; color: ''' + data['color'] + ''';"><link href="https://fonts.googleapis.com/css?family=Play" rel="stylesheet">''' + data['heartbeat'] + ''' </div>
</body>
</html>'''
def readHeartbeat():
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=1)
ser.flush()
while True:
if ser.in_waiting > 0:
line = ser.readline().strip().decode('ascii')
chunks = line.split(',')
vals = chunks[0]
if vals and vals[0].isdigit():
val = int(vals)
if val > 40 and val < 150:
hb = str(val)
if val < 75:
ret = { 'heartbeat': hb, 'color': '#3df400'}
elif val < 85:
ret = { 'heartbeat': hb, 'color': '#e9ff00'}
elif val < 95:
ret = { 'heartbeat': hb, 'color': '#ff9400'}
elif val < 110:
ret = { 'heartbeat': hb, 'color': '#ff0000'}
else:
ret = { 'heartbeat': hb, 'color': '#ae0709'}
return ret
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | webserver.py | mikewenk/heartbeat |
# Global Imports
import json
from collections import defaultdict
# Metaparser
from genie.metaparser import MetaParser
# =============================================
# Collection for '/mgmt/tm/cm/device-group' resources
# =============================================
class CmDevicegroupSchema(MetaParser):
schema = {}
class CmDevicegroup(CmDevicegroupSchema):
""" To F5 resource for /mgmt/tm/cm/device-group
"""
cli_command = "/mgmt/tm/cm/device-group"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | src/genie/libs/parser/bigip/get_cm_device_group.py | balmasea/genieparser |
# Copyright (c) 2017-2020 Wenyi Tang.
# Author: Wenyi Tang
# Email: wenyitang@outlook.com
# Update: 2020 - 2 - 7
from importlib import import_module
from ..Backend import BACKEND
__all__ = [
'get_model',
'list_supported_models'
]
def get_model(name: str):
name = name.lower()
try:
if BACKEND == 'pytorch':
return import_module('.Models', 'VSR.Backend.Torch').get_model(name)
elif BACKEND == 'tensorflow':
return import_module('.Models', 'VSR.Backend.TF').get_model(name)
elif BACKEND == 'tensorflow2':
pass
except (KeyError, ImportError):
raise ImportError(f"Using {BACKEND}, can't find model {name}.")
def list_supported_models():
if BACKEND == 'pytorch':
return import_module('.Models', 'VSR.Backend.Torch').list_supported_models()
elif BACKEND == 'tensorflow':
return import_module('.Models', 'VSR.Backend.TF').list_supported_models()
elif BACKEND == 'tensorflow2':
pass
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | VSR/Model/__init__.py | soufiomario/VideoSuperResolution |
from . import nodes
from ..fields import MongoengineConnectionField
def test_article_field_args():
field = MongoengineConnectionField(nodes.ArticleNode)
field_args = {"id", "headline", "pub_date"}
assert set(field.field_args.keys()) == field_args
reference_args = {"editor", "reporter"}
assert set(field.reference_args.keys()) == reference_args
default_args = {"after", "last", "first", "before"}
args = field_args | reference_args | default_args
assert set(field.args) == args
def test_reporter_field_args():
field = MongoengineConnectionField(nodes.ReporterNode)
field_args = {"id", "first_name", "last_name", "email", "awards"}
assert set(field.field_args.keys()) == field_args
def test_editor_field_args():
field = MongoengineConnectionField(nodes.EditorNode)
field_args = {"id", "first_name", "last_name", "metadata", "seq"}
assert set(field.field_args.keys()) == field_args
def test_field_args_with_property():
field = MongoengineConnectionField(nodes.PublisherNode)
field_args = ["id", "name"]
assert set(field.field_args.keys()) == set(field_args)
def test_field_args_with_unconverted_field():
field = MongoengineConnectionField(nodes.PublisherNode)
field_args = ["id", "name"]
assert set(field.field_args.keys()) == set(field_args)
def test_default_resolver_with_colliding_objects_field():
field = MongoengineConnectionField(nodes.ErroneousModelNode)
connection = field.default_resolver(None, {})
assert 0 == len(connection.iterable)
def test_default_resolver_connection_array_length(fixtures):
field = MongoengineConnectionField(nodes.ArticleNode)
connection = field.default_resolver(None, {}, **{"first": 1})
assert hasattr(connection, "array_length")
assert connection.array_length == 3
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | graphene_mongo/tests/test_fields.py | pfrantz/graphene-mongo |
"""Solid definitions for the simple_pyspark example."""
import dagster_pyspark
from pyspark.sql import DataFrame, Window
from pyspark.sql import functions as f
from dagster import make_python_type_usable_as_dagster_type, solid
# Make pyspark.sql.DataFrame map to dagster_pyspark.DataFrame
make_python_type_usable_as_dagster_type(
python_type=DataFrame, dagster_type=dagster_pyspark.DataFrame
)
@solid(required_resource_keys={"pyspark_step_launcher", "pyspark"})
def make_weather_samples(context, file_path: str) -> DataFrame:
"""Loads the weather data from a CSV"""
return (
context.resources.pyspark.spark_session.read.format("csv")
.options(header="true")
.load(file_path)
)
@solid(required_resource_keys={"pyspark_step_launcher"})
def make_daily_temperature_highs(_, weather_samples: DataFrame) -> DataFrame:
"""Computes the temperature high for each day"""
valid_date = f.to_date(weather_samples["valid"]).alias("valid_date")
return weather_samples.groupBy(valid_date).agg(f.max("tmpf").alias("max_tmpf"))
@solid(required_resource_keys={"pyspark_step_launcher"})
def make_daily_temperature_high_diffs(_, daily_temperature_highs: DataFrame) -> DataFrame:
"""Computes the difference between each day's high and the previous day's high"""
window = Window.orderBy("valid_date")
return daily_temperature_highs.select(
"valid_date",
(
daily_temperature_highs["max_tmpf"]
- f.lag(daily_temperature_highs["max_tmpf"]).over(window)
).alias("day_high_diff"),
)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | examples/legacy_examples/dagster_examples/simple_pyspark/solids.py | bitdotioinc/dagster |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
from log_elements import LogElements
class WordsAlphabet:
def __init__(self, alphabet, fruit, country):
self.alphabet = alphabet
self.fruit = fruit
self.country = country
def __str__(self):
return "WordsAlphabet(alphabet:'%s', fruit='%s', country='%s')" % (self.alphabet, self.fruit, self.country)
def apply_transforms(fruits, countries):
def map_to_alphabet_kv(word):
return (word[0], word)
def cogbk_result_to_wordsalphabet(cgbk_result):
(alphabet, words) = cgbk_result
return WordsAlphabet(alphabet, words['fruits'][0], words['countries'][0])
fruits_kv = (fruits | 'Fruit to KV' >> beam.Map(map_to_alphabet_kv))
countries_kv = (countries | 'Country to KV' >> beam.Map(map_to_alphabet_kv))
return ({'fruits': fruits_kv, 'countries': countries_kv}
| beam.CoGroupByKey()
| beam.Map(cogbk_result_to_wordsalphabet))
p = beam.Pipeline()
fruits = p | 'Fruits' >> beam.Create(['apple', 'banana', 'cherry'])
countries = p | 'Countries' >> beam.Create(['australia', 'brazil', 'canada'])
(apply_transforms(fruits, countries)
| LogElements())
p.run()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | learning/katas/python/Core Transforms/CoGroupByKey/CoGroupByKey/task.py | charithe/beam |
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: Yi Zhang. Created on Mon Jul 10 20:12:27 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: Yi Zhang (张仪). Created on Thu Jul 6 16:00:33 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from assemble import assemble
from _assembling import assemble_, integral1d_
import matplotlib.pyplot as plt
from quadrature import extended_gauss_quad
from scipy.integrate import quad
from sympy import Matrix
import scipy.io
from scipy import sparse
import scipy as sp
from inner_product import inner
# %% exact solution define
# u^{(1)} = { u, v }^T
def u(x,y):
return +np.cos(np.pi*x) * np.sin(np.pi*y)
def v(x,y):
return -np.sin(np.pi*x) * np.cos(np.pi*y)
def r_u(x,y):
return -2* np.pi**2 * np.cos(np.pi*x) * np.sin(np.pi*y)
def r_v(x,y):
return 2* np.pi**2 * np.sin(np.pi*x) * np.cos(np.pi*y)
# %% define the mesh
mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 )
func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False)
func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False)
form_1_gauss = Form(func_space_gauss1)
form_1_lobatto = Form(func_space_lobatto1)
M = inner(form_1_lobatto.basis,form_1_gauss.basis)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py | Idate96/Mimetic-Fem |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, MetaData, Table)
from umbrella.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, create_tables, drop_tables) # noqa
def define_mem_table(meta):
mem = Table('mem',
meta,
Column('id', Integer(), primary_key=True, autoincrement=True,
nullable=False),
Column('instance_uuid', String(40), nullable=False,
index=True),
Column('tenant_id', String(40), nullable=False, index=True),
Column('mem_used', Integer()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
mysql_engine='InnoDB',
extend_existing=True)
return mem
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_mem_table(meta)]
create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_mem_table(meta)]
drop_tables(tables)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | umbrella/db/sqlalchemy/migrate_repo/versions/003_add_mem_table.py | xww/umbrella |
# coding: utf-8
"""
katib
swagger description for katib # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import katib
from katib.models.v1alpha3_trial_assignment import V1alpha3TrialAssignment # noqa: E501
from katib.rest import ApiException
class TestV1alpha3TrialAssignment(unittest.TestCase):
"""V1alpha3TrialAssignment unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha3TrialAssignment(self):
"""Test V1alpha3TrialAssignment"""
# FIXME: construct object with mandatory attributes with example values
# model = katib.models.v1alpha3_trial_assignment.V1alpha3TrialAssignment() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | sdk/python/test/test_v1alpha3_trial_assignment.py | PoornimaDevii/katib |
from typing import Union
from mpfmc.core.config_collection import ConfigCollection
class SlideCollection(ConfigCollection):
config_section = 'slides'
collection = 'slides'
class_label = 'SlideConfig'
def process_config(self, config: Union[dict, list]) -> dict:
# config is localized to an single slide name entry
if isinstance(config, list):
new_dict = dict()
new_dict['widgets'] = config
config = new_dict
elif isinstance(config, dict):
if 'widgets' not in config:
new_dict = dict()
new_dict['widgets'] = [config]
config = new_dict
elif not isinstance(config['widgets'], list):
if config['widgets']:
config['widgets'] = [config['widgets']]
else:
config['widgets'] = []
for i, widget in enumerate(config['widgets']):
# since dict is mutable it updates in place
config['widgets'][i] = self.mc.widgets.process_widget(widget)
config = self.mc.config_validator.validate_config('slides', config)
config = self.mc.transition_manager.validate_transitions(config)
return config
def validate_config(self, config):
# since dict is mutable it updates in place
self.mc.widgets.validate_config(config['widgets'])
CollectionCls = SlideCollection
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | mpfmc/config_collections/slide.py | atummons/mpf-mc |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import sys
# codecov.io project token
import pypandoc
codecov_token = '' or os.environ.get('FORGIVE_DB_CODECOV_TOKEN')
base_dir = os.path.dirname(os.path.abspath(__file__))
sub_commands = {}
def run(*commands):
os.system('cd {} && {}'.format(base_dir, ' && '.join(commands)))
def cmd(name):
def decorator(func):
sub_commands[name] = func
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
def usage():
print('Usage: {} <sub_command> <args...>'.format(sys.argv[0]))
print('Sub command: [{}]'.format(', '.join(sub_commands)))
exit(1)
@cmd('test')
def test():
run('pytest --cov=./', 'codecov --token={}'.format(codecov_token))
@cmd('release')
def release(*setup_commands):
markdown_file = os.path.join(base_dir, 'README.md')
rst_file = os.path.join(base_dir, 'README.rst')
rst_content = pypandoc.convert(markdown_file, 'rst')
with open(rst_file, 'wb') as f:
f.write(rst_content.encode('utf-8'))
run('python setup.py {}'.format(' '.join(setup_commands)))
os.unlink(rst_file)
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sub_command = sys.argv[1]
if sub_command not in sub_commands:
usage()
func = sub_commands[sub_command]
func(*sys.argv[2:])
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | release.py | hui-z/ForgiveDB |
# -*- coding: utf-8 -*-
import os
import sys
import random
sourceDir = '/data/deresute-face'
trFile = 'train.txt'
teFile = 'test.txt'
mapFile = 'config/classes.py'
if len(sys.argv) != 3:
print ("usage %s trainNum testNum" % (sys.argv[0]))
exit()
datanum = int(sys.argv[1])
testnum = int(sys.argv[2])
def listClass(dir):
ret = []
for file in os.listdir(dir):
if(file == "." or file == ".."):
# -1 は 全部 0 のラベルにしたかった。
# if(file == "." or file == ".." or file == "-1"):
continue;
ret.append(file)
return ret
def find(dir, dirs):
ret = dirs
for file in os.listdir(dir):
realfile = os.path.join("%s","%s")%(dir,file)
if (os.path.isdir(realfile)):
ret = find(realfile, ret)
else:
ret.append(realfile)
return ret
def ref(dict, key, default):
try:
return dict[key]
except:
return default
def addDict(dict, key):
try:
dict[key] += 1
except:
dict[key] = 1
dirs = listClass(sourceDir)
def getId(className):
return dirs.index(className)
images = find(sourceDir, [])
random.shuffle(images);
fp = open(mapFile, "w")
fp.write("classList = {}\n")
i = 0
for className in dirs:
fp.write("classList[%d] = \"%s\"\n"% (i, className))
i += 1
fp.close()
teFp = open(teFile, "w")
trFp = open(trFile, "w")
limits = {};
limits2 = {};
for image in images:
className = os.path.basename(os.path.dirname(image))
isTest = False
if ref(limits2, className, 0) >= testnum:
continue
elif ref(limits, className, 0) >= datanum:
addDict(limits2, className)
isTest = True
else:
addDict(limits, className)
# if className == "-1":
# continue
id = getId(className);
if isTest:
teFp.write("%s,%d\n" % (image, id));
else:
trFp.write("%s,%d\n" % (image, id));
trFp.close()
teFp.close()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | make-test.py | todorokit/tensorflow_cnn_image_sample |
import os
import yaml
import tensorflow as tf
from NMTmodel.NMT.dataset import data_util
cur_dir = os.path.dirname(os.path.abspath(__file__))
par_dir = os.path.dirname(cur_dir)
class DatasetTest(tf.test.TestCase):
def setUp(self):
self.config_file = os.path.join(par_dir, "config.yml")
def test_dataset(self):
with tf.gfile.GFile(self.config_file, "rb") as f:
params = yaml.load(stream=f.read(), Loader=yaml.FullLoader)
data_util.get_dataset(params, mode=tf.estimator.ModeKeys.PREDICT)
if __name__ == '__main__':
tf.test.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | tests/dataset_test.py | MISStingting/NMTmodel |
"""friends.py: Implementation of class AbstractTwitterFriendCommand
and its subclasses.
"""
from . import AbstractTwitterCommand
from ..parsers import (
parser_user_single,
parser_count_users,
parser_count_users_many,
parser_cursor,
parser_skip_status,
parser_include_user_entities)
# GET friends/ids
# GET friends/list
FRIENDS_IDS = ('friends/ids', 'fri')
FRIENDS_LIST = ('friends/list', 'frl')
# pylint: disable=abstract-method
class AbstractTwitterFriendCommand(AbstractTwitterCommand):
"""n/a"""
pass
class CommandIds(AbstractTwitterFriendCommand):
"""Print user IDs for every user the specified user is
following.
"""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
FRIENDS_IDS[0],
aliases=FRIENDS_IDS[1:],
parents=[parser_user_single(),
parser_count_users_many(), # 20, 5000
parser_cursor()],
help=self.__doc__)
return parser
def __call__(self):
"""Request GET friends/ids for Twitter."""
self.list_ids(self.twhandler.friends.ids)
class CommandList(AbstractTwitterFriendCommand):
"""List all of the users the specified user is following."""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
FRIENDS_LIST[0],
aliases=FRIENDS_LIST[1:],
parents=[parser_user_single(),
parser_count_users(), # 20, 200
parser_cursor(),
parser_skip_status(),
parser_include_user_entities()],
help=self.__doc__)
return parser
def __call__(self):
"""Request GET friends/list for Twitter."""
self._list_common(self.twhandler.friends.list)
def make_commands(manager):
"""Prototype"""
# pylint: disable=no-member
return (cmd_t(manager) for cmd_t in
AbstractTwitterFriendCommand.__subclasses__())
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",... | 3 | twmods/commands/friends.py | showa-yojyo/bin |
from enum import Enum
from unittest import TestCase
from marshy import dump, load, get_default_context
from marshy.errors import MarshallError
from marshy.factory.enum_marshaller_factory import EnumMarshallerFactory
class VehicleTypes(Enum):
CAR = 'car'
TRUCK = 'truck'
BIKE = 'bike'
class TestMarshallEnum(TestCase):
def test_marshall(self):
dumped = dump(VehicleTypes.CAR)
assert VehicleTypes.CAR.value == dumped
loaded = load(VehicleTypes, dumped)
assert VehicleTypes.CAR == loaded
def test_unknown_value_not_permitted(self):
with self.assertRaises(MarshallError):
load(VehicleTypes, 'spaceship')
def test_unknown_value_permitted(self):
# Allow unknown values to be placed in the enum
marshaller = EnumMarshallerFactory(allow_unknown=True).create(get_default_context(), VehicleTypes)
loaded = marshaller.load('spaceship')
assert loaded.value == 'spaceship'
assert loaded.__class__ == VehicleTypes
dumped = marshaller.dump(loaded)
assert dumped == 'spaceship'
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | test/test_marshall_enum.py | tofarr/marshy |
from django.test import TestCase
from lxml import html
import pytest
from projects.tests.factories import ProjectFactory, NominationFactory, ClaimFactory
@pytest.mark.django_db
class TestNominationDetailView:
def test_loads(self, client):
response = client.get(NominationFactory().get_absolute_url())
assert response.status_code == 200
def test_all_claims_listed(self, client):
nomination = NominationFactory()
for _ in range(5):
nomination.claims.add(ClaimFactory())
response = client.get(nomination.get_absolute_url())
tree = html.fromstring(response.content)
# claim_urls = {c.get_absolute_url() for c in nomination.claims.all()}
for claim in nomination.claims.all():
url = claim.get_absolute_url()
assert len(tree.xpath(f'//a[@href="{url}"]')) == 1
@pytest.mark.django_db
def test_nomination_create_view_get(client):
project = ProjectFactory(nomination_policy='Public')
response = client.get(project.get_add_nomination_url())
assert response.status_code == 200
# def test_anonymous_cannot_nominate_to_restricted_project(self):
# pass
# def test_user_creates_project(self):
# """...
# Should autmatically set: User"""
# pass
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | projects/tests/tests_nomination_views.py | CobwebOrg/cobweb-django |
class Person:
name='zhangsan'
age=20
p = Person()
print(p) # <__main__.Person object at 0x10073e668>
print('⭐️ ' * 20)
class Stu:
name='zhangsan'
age=20
def __str__(self):
return "name: %s; age: %d"%(self.name, self.age)
s = Stu()
print(s) # name: zhangsan; age: 20 | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | Basic-Python/code/test_magic/3.py | johnnynode/AI-LEARNING-MATERIAL |
from nitorch.core.py import make_list
from nitorch.tools.cli import commands
from .main import crop
from .parser import parse, help, ParseError
import sys
def cli(args=None):
f"""Command-line interface for `nicrop`
{help}
"""
# Exceptions are dealt with here
try:
_cli(args)
except ParseError as e:
print(help)
print(f'[ERROR] {str(e)}', file=sys.stderr)
except Exception as e:
print(f'[ERROR] {str(e)}', file=sys.stderr)
def _cli(args):
"""Command-line interface for `nicrop` without exception handling"""
args = args or sys.argv[1:]
options = parse(args)
if not options:
return
options.output = make_list(options.output, len(options.files))
options.transform = make_list(options.transform, len(options.files))
for fname, ofname, tfname in zip(options.files, options.output, options.transform):
crop(fname, size=options.size, center=options.center,
space=(options.size_space, options.center_space), like=options.like,
output=ofname, transform=tfname)
commands['crop'] = cli
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | nitorch/tools/misc/crop/cli.py | liamchalcroft/nitorch |
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import time
def sim_rand_walks_parallel(n_runs):
# Get rank of process and overall size of communicator:
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# Start time:
t0 = time.time()
# Evenly distribute number of simulation runs across processes
N = int(n_runs/size)
# Simulate N random walks and specify as a NumPy Array
r_walks = []
for i in range(N):
steps = np.random.normal(loc=0, scale=1, size=100)
steps[0] = 0
r_walks.append(100 + np.cumsum(steps))
r_walks_array = np.array(r_walks)
# Gather all simulation arrays to buffer of expected size/dtype on rank 0
r_walks_all = None
if rank == 0:
r_walks_all = np.empty([N*size, 100], dtype='float')
comm.Gather(sendbuf = r_walks_array, recvbuf = r_walks_all, root=0)
# Print/plot simulation results on rank 0
if rank == 0:
# Calculate time elapsed after computing mean and std
average_finish = np.mean(r_walks_all[:,-1])
std_finish = np.std(r_walks_all[:,-1])
time_elapsed = time.time() - t0
# Print time elapsed + simulation results
print("Simulated %d Random Walks in: %f seconds on %d MPI processes"
% (n_runs, time_elapsed, size))
print("Average final position: %f, Standard Deviation: %f"
% (average_finish, std_finish))
# Plot Simulations and save to file
plt.plot(r_walks_all.transpose())
plt.savefig("r_walk_nprocs%d_nruns%d.png" % (size, n_runs))
return
def main():
sim_rand_walks_parallel(n_runs = 10000)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | Labs/Lab 1 Midway RCC and mpi4py/mpi_rand_walk.py | cindychu/LargeScaleComputing_S20 |
import os
import click
from dotenv import load_dotenv
from app import create_app
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
@app.shell_context_processor
def make_shell_context():
return dict()
@app.cli.command()
@click.argument('test_names', nargs=-1)
def test(test_names):
"""Run the unit tests."""
import unittest
if test_names:
tests = unittest.TestLoader().loadTestsFromNames(test_names)
else:
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | mock_server.py | Lameaux/mock_server |
import time
import pcap
import struct
import threading
import subprocess as sp
import Queue
from scapy.all import *
# def parse(tup):
# time, data = tup
# class BtPacket(object):
# @classmethod
# def is_valid(cls, tup):
# return len(tup[1]) == 15+8
# def __init__(tup):
# t, data = tup
# rssi = data[-1]
# if rssi not in {0, 4}:
# self.rssi = rssi-0x100 if rssi & 0x80 else rssi
# else:
# self.rssi = None
# self.time = t
# if len(data) == 18+8:
# rawaddr = data[8+4:8+4+6]
# else:
class Sniffer(threading.Thread):
def __init__(self, queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.__queue = queue
def run(self):
def record(packet, ignore = set()):
self.__queue.put(("Bluetooth", packet.src, time.time()))
sniff(prn=record)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | btsniffer.py | zdavidli/HomeHoneypot |
import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
dataset = load_boston()
X = dataset.data
y = dataset.target
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X-mean)/std
# print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n_train = X_train.shape[0]
n_features = X_train.shape[1]
# 权重初始化
w = np.random.rand(n_features)
b = 1.1
lr = 0.001
epoches = 3000
def model(x):
y_hat = w.dot(x)+b
return y_hat
def loss_funtion(X, y):
total_loss = 0
n_samples = len(X)
for i in range(n_samples):
xi = X[i]
yi = y[i]
yi_hat = model(xi)
total_loss += abs(yi_hat-yi)**2
avg_loss = (1/n_samples)*total_loss
return avg_loss
reg = 0.5
for epoch in range(epoches):
sum_w = 0.0
sum_b = 0.0
for i in range(n_train):
xi = X_train[i]
yi = y_train[i]
yi_hat = model(xi)
sum_w += (yi_hat-yi)*xi
sum_b += (yi_hat - yi)
grad_w = (2/n_train)*sum_w+(2.0*reg*w)
grad_b = (2/n_train)*sum_b # 偏置项不做正则化处理
w = w-lr*grad_w
b = b-lr*grad_b
train_loss = loss_funtion(X_train, y_train)
test_loss = loss_funtion(X_test, y_test)
print(train_loss)
print(test_loss)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | 1_boston.py | ZXTFINAL/deeplearning |
# -*- coding: utf-8 -*-
"""
Example controller for SSE (server-side events) with gevent.
Builds on the simple SSE controller.
"""
import sys
import time
import gevent.queue
from tg import expose, request, response
from tg import url
from tg.decorators import with_trailing_slash
from eventstream import EventstreamController
class GeventEventstreamController(EventstreamController):
# set containing a gevent queue for each of the clients (browsers) listening for events
client_queues = set()
@expose()
@with_trailing_slash
def index(self):
"""whenever a new client opens this page, sends an event to all listening clients"""
# put a gevent event in each client's queue
for q in GeventEventstreamController.client_queues:
q.put("visit received from %(REMOTE_ADDR)s with user agent %(HTTP_USER_AGENT)s" % request.environ)
# return the page for listening
return self.load_js(url('visitstream'))
@expose()
def visitstream(self):
"""sends a SSE whenever somebody visits index"""
# set charset appropriately
response.headers['Content-type'] = 'text/event-stream'
# disable charset (see EventstreamController)
response.charset = ""
# create a new queue for this new listening client
q = gevent.queue.Queue()
GeventEventstreamController.client_queues.add(q)
def stream():
while True:
yield "data: %s %s\n\n" % (q.get(), time.time())
return stream()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | eventstreamexamples/controllers/geventeventstream.py | nh2/eventstreamexamples |
from typing import Any
from district42 import GenericSchema
from district42.types import Schema
from ._abstract_formatter import AbstractFormatter
from ._formatter import Formatter
from ._validation_result import ValidationResult
from ._validator import Validator
from ._version import version
__version__ = version
__all__ = ("validate", "validate_or_fail", "eq", "Validator", "ValidationResult",
"ValidationException", "Formatter", "AbstractFormatter",)
_validator = Validator()
_formatter = Formatter()
def validate(schema: GenericSchema, value: Any, **kwargs: Any) -> ValidationResult:
return schema.__accept__(_validator, value=value, **kwargs)
class ValidationException(Exception):
pass
def validate_or_fail(schema: GenericSchema, value: Any, **kwargs: Any) -> bool:
result = validate(schema, value, **kwargs)
errors = [e.format(_formatter) for e in result.get_errors()]
if len(errors) == 0:
return True
message = "\n - " + "\n - ".join(errors)
raise ValidationException(message)
def eq(schema: GenericSchema, value: Any) -> bool:
if isinstance(value, Schema):
return isinstance(value, schema.__class__) and (schema.props == value.props)
return not validate(schema, value=value).has_errors()
Schema.__override__(Schema.__eq__.__name__, eq)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | valera/__init__.py | nikitanovosibirsk/valera |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import call, patch, MagicMock
from piccolo.apps.migrations.commands.new import (
_create_new_migration,
BaseMigrationManager,
new,
)
from piccolo.conf.apps import AppConfig
from piccolo.utils.sync import run_sync
from tests.base import postgres_only
from tests.example_app.tables import Manager
class TestNewMigrationCommand(TestCase):
def test_create_new_migration(self):
"""
Create a manual migration (i.e. non-auto).
"""
migration_folder = os.path.join(
tempfile.gettempdir(), "piccolo_migrations"
)
if os.path.exists(migration_folder):
shutil.rmtree(migration_folder)
os.mkdir(migration_folder)
app_config = AppConfig(
app_name="music",
migrations_folder_path=migration_folder,
table_classes=[Manager],
)
run_sync(_create_new_migration(app_config, auto=False))
migration_modules = BaseMigrationManager().get_migration_modules(
migration_folder
)
self.assertTrue(len(migration_modules.keys()) == 1)
@postgres_only
@patch("piccolo.apps.migrations.commands.new.print")
def test_new_command(self, print_: MagicMock):
"""
Call the command, when no migration changes are needed.
"""
with self.assertRaises(SystemExit) as manager:
run_sync(new(app_name="example_app", auto=True))
self.assertEqual(manager.exception.code, 0)
self.assertTrue(
print_.mock_calls[-1] == call("No changes detected - exiting.")
)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | tests/apps/migrations/commands/test_new.py | aminalaee/piccolo |
import pytest
from mau.parsers.base_parser import ParseError
from mau.parsers.arguments_parser import ArgumentsParser
from tests.helpers import init_parser_factory
init_parser = init_parser_factory(ArgumentsParser)
def test_named_argument():
p = init_parser("argument1=value1")
p.parse()
assert p.args == []
assert p.kwargs == {"argument1": "value1"}
def test_multiple_named_arguments():
p = init_parser("argument1=value1,argument2=value2")
p.parse()
assert p.args == []
assert p.kwargs == {"argument1": "value1", "argument2": "value2"}
def test_multiple_named_arguments_with_spaces():
p = init_parser("argument1=value1, argument2=value2")
p.parse()
assert p.args == []
assert p.kwargs == {"argument1": "value1", "argument2": "value2"}
def test_multiple_unnamed_arguments():
p = init_parser("value1,value2")
p.parse()
assert p.args == ["value1", "value2"]
assert p.kwargs == {}
def test_quotes_with_named_arguments():
p = init_parser('argument="value1,value2"')
p.parse()
assert p.args == []
assert p.kwargs == {"argument": "value1,value2"}
def test_quotes_with_unnamed_arguments():
p = init_parser('"value1,value2"')
p.parse()
assert p.args == ["value1,value2"]
assert p.kwargs == {}
def test_escaped_quotes():
p = init_parser(r'"value \"with quotes\""')
p.parse()
assert p.args == ['value "with quotes"']
assert p.kwargs == {}
def test_multiple_unnamed_and_named_arguments():
p = init_parser("value1,argument2=value2")
p.parse()
assert p.args == ["value1"]
assert p.kwargs == {"argument2": "value2"}
def test_multiple_named_arguments_before_unnamed_ones():
p = init_parser("argument1=value1,value2")
with pytest.raises(ParseError):
p.parse()
def test_raw_argument():
p = init_parser("value0, argument1=value1", raw=True)
p.parse()
assert p.args == ["value0, argument1=value1"]
assert p.kwargs == {}
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/parsers/test_arguments_parser.py | xrmx/mau |
"""
Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by
level from leaf to root).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
"""
__author__ = 'Danyang'
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrderBottom(self, root):
"""
bfs
:param root: TreeNode
:return: Integers
"""
if not root:
return []
result = []
next_level = [root]
while next_level:
current_level = next_level
result.insert(0, map(lambda x: x.val, current_level)) # current level, only difference with Binary Tree Level Order Traversal I
next_level = []
for element in current_level:
if element.left:
next_level.append(element.left)
if element.right:
next_level.append(element.right)
return result
if __name__=="__main__":
Solution().levelOrderBottom(TreeNode(1))
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | 107 Binary Tree Level Order Traversal II.py | scorpionpd/LeetCode-all |
from datetime import datetime
from pydantic.main import BaseModel
from factory import db
from utils.models import OrmBase
from typing import List
class Post(db.Model):
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.UnicodeText)
created = db.Column(db.DateTime, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
def __repr__(self) -> str:
return f"<Post {self.id}>"
class PostCreate(BaseModel):
text: str
class PostResponse(OrmBase):
text: str
created: datetime
author_id: int
class PostResponseList(BaseModel):
page: int
pages: int
total: int
posts: List[PostResponse]
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | API_course/models/post.py | ThiNepo/neps-guide-flask-1 |
"""List options for creating Placement Groups"""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers.vs_placement import PlacementManager as PlacementManager
@click.command()
@environment.pass_env
def cli(env):
"""List options for creating a placement group."""
manager = PlacementManager(env.client)
routers = manager.get_routers()
env.fout(get_router_table(routers))
rules = manager.get_all_rules()
env.fout(get_rule_table(rules))
def get_router_table(routers):
"""Formats output from _get_routers and returns a table. """
table = formatting.Table(['Datacenter', 'Hostname', 'Backend Router Id'], "Available Routers")
for router in routers:
datacenter = router['topLevelLocation']['longName']
table.add_row([datacenter, router['hostname'], router['id']])
return table
def get_rule_table(rules):
"""Formats output from get_all_rules and returns a table. """
table = formatting.Table(['Id', 'KeyName'], "Rules")
for rule in rules:
table.add_row([rule['id'], rule['keyName']])
return table
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | SoftLayer/CLI/virt/placementgroup/create_options.py | dvzrv/softlayer-python |
"Profile the performance of downloading fresh chain from peers"
from skepticoin.networking.threading import NetworkingThread
from skepticoin.scripts.utils import (
configure_logging_from_args,
DefaultArgumentParser,
check_chain_dir,
read_chain_from_disk,
)
import cProfile
from datetime import datetime
from time import time
def test_main():
parser = DefaultArgumentParser()
args = parser.parse_args(args=[])
configure_logging_from_args(args)
# Initially were using an empty chain for this test.
# And then we found that the performance of newer blocks is different!
# coinstate = CoinState.zero()
check_chain_dir()
coinstate = read_chain_from_disk()
# we need to run in the current thread to profile it
nt = NetworkingThread(coinstate, port=None)
def runner():
started = datetime.now()
nt.local_peer.running = True
print("start height = %d" % nt.local_peer.chain_manager.coinstate.head().height)
while (datetime.now() - started).seconds <= 100:
current_time = int(time())
nt.local_peer.step_managers(current_time)
nt.local_peer.handle_selector_events()
print("final height = %d" % nt.local_peer.chain_manager.coinstate.head().height)
with cProfile.Profile() as pr:
pr.runcall(runner)
pr.print_stats()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | performance/profile_fresh_chain.py | kryptocurrency/skepticoin |
"""
Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Solution:
- It's a typical binary search with a slight variation
- Ensure that all the indices are right.
"""
class Solution(object):
def find_first_occurence(self, nums, target, left, right):
if left > right: return -1
mid = (left+right)//2
if mid == 0 and nums[mid]==target: return 0
if nums[mid]==target and nums[mid-1]<target: return mid
if target > nums[mid]:
return self.find_first_occurence(nums, target, mid+1, right)
else:
return self.find_first_occurence(nums, target, left, mid-1)
def find_last_occurence(self, nums, target, left, right):
if left > right: return -1
mid = (left+right)//2
if mid == len(nums)-1 and nums[mid]==target:
return len(nums)-1
if nums[mid]==target and nums[mid+1]>target:
return mid
if target >= nums[mid]:
return self.find_last_occurence(nums, target, mid+1, right)
else:
return self.find_last_occurence(nums, target, left, mid-1)
def searchRange(self, nums, target):
if not nums: return [-1, -1]
first = self.find_first_occurence(nums, target, 0, len(nums)-1)
if first == -1: return [-1, -1]
last = self.find_last_occurence(nums, target, first, len(nums)-1)
return [first, last]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | Python/Binary_Search/med_first_last_sorted.py | animeshramesh/interview-prep |
# -*- coding: UTF-8 -*-
'''
Created on 2020-03-08
@author: daizhaolin
'''
from .config import Config
from .helper import cached_property
from .logging import create_logger
class ScriptEngine(object):
def __init__(self):
self.name = __name__
self.config = Config({
'DEBUG': False
})
self.extensions = dict()
self.controller_queue = list()
@property
def debug(self):
return self.config['DEBUG']
@cached_property
def logger(self):
return create_logger(self)
def register_controller(self, controller):
self.controller_queue.append(controller)
def run(self):
for controller in self.controller_queue:
controller(self)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | ScriptEngine/app.py | daizhaolin/scriptengine |
from listener import Listener
class Kudos:
def __init__(self, client):
self.client = client
Listener.register(self.on_message, "on_message")
pass
def on_message(self, ctx):
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | src/kudos.py | tinlun/Helpdesk-slackbot |
def calc_fuel(mass, recurse=True):
n = mass/3-2
if n <= 0:
return 0
elif recurse:
return n + calc_fuel(n)
else:
return n
def solve(recurse=True):
total = 0
with open('input.txt') as f:
for li in f:
total += calc_fuel(int(li), recurse)
print(total)
if __name__ == '__main__':
solve(recurse=False)
solve(recurse=True)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | 01/01.py | stevenpclark/aoc2019 |
"""Generate emojione data."""
import os
import json
current_dir = os.path.dirname(os.path.abspath(__file__))
LICENSE = """
MIT license.
Copyright (c) http://www.emojione.com
"""
def get_unicode_alt(value):
"""Get alternate Unicode form or return the original."""
return value['unicode_alt']
def parse(repo, tag):
"""Save test files."""
# Load emoji database
with open(os.path.join(current_dir, 'tags', repo, repo, 'emoji.json'), 'r') as f:
emojis = json.loads(f.read())
emoji_db = {}
shortnames = set()
aliases = {}
for v in emojis.values():
shortnames.add(v['shortname'])
emoji_db[v['shortname']] = {
'name': v['name'],
'unicode': v['unicode'],
'category': v['category']
}
alt = get_unicode_alt(v)
if alt:
emoji_db[v['shortname']]['unicode_alt'] = alt
for alias in v['aliases']:
aliases[alias] = v['shortname']
# Save test files
for test in ('png', 'png sprite', 'svg', 'svg sprite', 'entities', 'long title', 'no title'):
with open('../tests/extensions/emoji/emoji1 (%s).txt' % test, 'w') as f:
f.write('# Emojis\n')
count = 0
for emoji in sorted(shortnames):
f.write(''.join('%s %s<br>\n' % (emoji[1:-1], emoji)))
count += 1
if test != 'png' and count == 10:
break
# Write out essential info
with open('../pymdownx/emoji1_db.py', 'w') as f:
# Dump emoji db to file and strip out PY2 unicode specifiers
f.write('"""Emojione autogen.\n\nGenerated from emojione source. Do not edit by hand.\n%s"""\n' % LICENSE)
f.write('version = "%s"\n' % tag)
f.write('name = "emojione"\n')
f.write('emoji = %s\n' % json.dumps(emoji_db, sort_keys=True, indent=4, separators=(',', ': ')))
f.write('aliases = %s\n' % json.dumps(aliases, sort_keys=True, indent=4, separators=(',', ': ')))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | tools/gen_emoji1.py | rkeulemans/pymdown-extensions |
import sys
import time
from ctypes import windll, wintypes, create_unicode_buffer, byref
def file1(n):
for i in range(n):
try:
with open(r'U:\bin\RunRoot\Debug64\dbxsdk\dbxsdkrereg.wixout:aaaa','r') as f:
a = f.read()
pass
except:pass
CreateFileW = windll.kernel32.CreateFileW
CloseHandle = windll.kernel32.CloseHandle
def file3(n):
for i in range(n):
a = CreateFileW(
r'U:\bin\RunRoot\Debug64\dbxsdk\dbxsdkrereg.wixout:aaaa',
1, # FILE_APPEND_DATA | SYNCHRONIZE
1, # FILE_SHARE_READ | FILE_SHARE_WRITE
None,
3, # OPEN_ALWAYS
128, # FILE_ATTRIBUTE_NORMAL
None)
def main():
start = time.time()
file3(15000)
end = time.time()
print(end - start)
if __name__ == '__main__':
sys.exit(main())
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | clcache/testfile.py | univert/aclcache |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.