source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
#@+leo-ver=5-thin
#@+node:tbrown.20171029210211.1: * @file ../plugins/editpane/clicky_splitter.py
#@@language python
"""
clicky_splitter.py - a QSplitter which allows flipping / rotating of
content by clicking on the splitter handle
Terry Brown, TerryNBrown@gmail.com, Sun Oct 29 21:02:25 2017
"""
from leo.core.leoQt import QtCore, QtWidgets
class ClickySplitterHandle(QtWidgets.QSplitterHandle):
"""Handle which notifies splitter when it's clicked"""
def mouseReleaseEvent(self, event):
"""mouse event - mouse released on splitter handle,
Args:
event (QMouseEvent): mouse event
"""
if event.button() == QtCore.Qt.LeftButton:
return # might have been resizing panes
self.splitter().flip_spin()
class ClickySplitter(QtWidgets.QSplitter):
"""Splitter that rotates / flips when its handle's clicked"""
def __init__(self, *args, **kwargs):
"""set initial state"""
super().__init__(*args, **kwargs)
self._click_state = 'spin'
def createHandle(self):
"""use custom handle"""
return ClickySplitterHandle(self.orientation(), self)
def flip_spin(self):
"""swap or rotate"""
if self._click_state == 'flip':
self.insertWidget(0, self.widget(1))
self._click_state = 'spin'
else:
self.setOrientation(
QtCore.Qt.Vertical
if self.orientation() == QtCore.Qt.Horizontal
else QtCore.Qt.Horizontal
)
self._click_state = 'flip'
#@-leo
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
... | 3 | leo/plugins/editpane/clicky_splitter.py | leonidborisenko/leo-editor |
"""Main library."""
from typing import Optional
# Import module
import jpype
# Enable Java imports
import jpype.imports
# Pull in types
from jpype.types import *
import importlib
class JavaLib:
ROBOT_LIBRARY_SCOPE = "GLOBAL"
"""General library documentation."""
def __init__(
self,
library: str,
classpath: Optional[str] = None):
if jpype.isJVMStarted():
print("JVM running")
else:
jpype.startJVM(classpath=classpath.split(":"))
JavaLibrary = importlib.import_module(library)
self.javaLibrary = JavaLibrary()
def get_keyword_names(self):
keywords = []
# AnnotationLibrary return Java's ArrayList with Java's Strings, converting to Python
for keyword in self.javaLibrary.getKeywordNames():
keywords.append(str(keyword))
return keywords
def run_keyword(self, keyword: str, args, kwargs):
import java
return self.javaLibrary.runKeyword(JString(keyword), java.util.ArrayList(args), java.util.HashMap(kwargs))
def get_keyword_documentation(self, keyword: str):
try:
# AnnotationLibrary returns java.lang.String
documentation = str(self.javaLibrary.getKeywordDocumentation(keyword))
except:
documentation = ""
return documentation
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython |
from Frame.baseFunctions import *
from Frame.gui.Gui import Gui
class Button(Gui):
def __init__(self, function, functionArgs, *args, **kwargs):
super().__init__(*args, **kwargs)
output("Button: Creating " + self.text + " button...", "debug")
self.wasPressed = False
self.function = function
self.functionArgs = functionArgs
self.oncePressMouse = True
def update(self):
super().update()
output("Button: Creating a click animastion if pressed...", "complete")
if self.pressed and not self.wasPressed and self.rightCoords:
if self.noAnimations:
if type(self.functionArgs) == tuple:
self.function(*self.functionArgs)
else:
self.function(self.functionArgs)
else:
self.window.guiHandler.createClickAnim(self)
self.wasPressed = self.pressed
def render(self):
super().render()
if self.window.guiHandler.clickAnim != None:
self.window.guiHandler.clickAnim.render()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | Frame/gui/Button.py | PyRectangle/GreyRectangle |
class DataGridViewCellMouseEventArgs(MouseEventArgs):
"""
Provides data for mouse events raised by a System.Windows.Forms.DataGridView whenever the mouse is moved within a System.Windows.Forms.DataGridViewCell.
DataGridViewCellMouseEventArgs(columnIndex: int,rowIndex: int,localX: int,localY: int,e: MouseEventArgs)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return DataGridViewCellMouseEventArgs()
@staticmethod
def __new__(self,columnIndex,rowIndex,localX,localY,e):
""" __new__(cls: type,columnIndex: int,rowIndex: int,localX: int,localY: int,e: MouseEventArgs) """
pass
ColumnIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the zero-based column index of the cell.
Get: ColumnIndex(self: DataGridViewCellMouseEventArgs) -> int
"""
RowIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the zero-based row index of the cell.
Get: RowIndex(self: DataGridViewCellMouseEventArgs) -> int
"""
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewCellMouseEventArgs.py | tranconbv/ironpython-stubs |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class Reverse(Op):
op = 'Reverse'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None,
'axis': None,
'op': self.op,
'in_ports_count': 2,
'out_ports_count': 1,
'infer': self.infer,
}
super().__init__(graph, mandatory_props, attrs)
@staticmethod
def infer(node):
input_shape = node.in_port(0).data.get_shape()
input_value = node.in_port(0).data.get_value()
assert input_shape is not None
if not node.has_valid('axis'):
assert 1 in node.in_nodes()
assert node.in_node(1).has_valid('value')
assert node.in_node(1).value.size == 1
node['axis'] = node.in_node(1).value.item()
node.in_port(1).disconnect()
assert node.has_valid('axis')
assert len(node.out_nodes()) == 1
if input_value is not None:
node.out_port(0).data.set_value(np.flip(input_value, node.axis))
else:
node.out_port(0).data.set_shape(input_shape)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | tools/mo/openvino/tools/mo/ops/Reverse.py | ieliz/openvino |
import webbrowser
from googletrans import Translator
class Network:
@staticmethod
def Download(download_keyword):
webbrowser.open(f"https://store.steampowered.com/search/?term={download_keyword}")
@staticmethod
def Translate(word, when_lang):
translator = Translator()
return translator.translate(word, dest=when_lang).text
@staticmethod
def youtubeSearch(search_keyword):
webbrowser.open(f"https://www.youtube.com/results?search_query={search_keyword}")
@staticmethod
def netSearch(search_keyword):
webbrowser.open(f"https://www.google.com/search?q={search_keyword}")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exclud... | 3 | Network/Network.py | ibrahimAlbyrk/PythonProjects |
import predictive_punter
import pytest
@pytest.fixture(scope='module')
def seed_command(database_uri):
predictive_punter.SeedCommand.main(['-d', database_uri, '2016-2-1', '2016-2-2'])
def test_samples(database, seed_command):
"""The seed command should populate the database with the expected number of samples"""
assert database['samples'].count() == database['runners'].count({'is_scratched': False})
def test_values(database, seed_command):
"""The seed command should set normalized query data values for all samples"""
for sample in database['samples'].find():
assert sample['normalized_query_data'] is not None
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (ex... | 3 | tests/seed_test.py | justjasongreen/predictive_punter |
#h. Send data to Flask template (Jinja2)
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def student():
return render_template('student.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("result.html", result=result)
if __name__ == '__main__':
app.run(debug=True)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | item_h/server.py | augustocarrlos10/Flask_TecWeb |
from core.advbase import *
from module.bleed import Bleed
from slot.a import *
def module():
return Botan
class Botan(Adv):
a3 = [('prep',1.00), ('scharge_all', 0.05)]
conf = {}
conf['slots.a'] = RR() + United_by_One_Vision()
conf['acl'] = """
`dragon.act('c3 s end')
`s3, not self.s3_buff and prep
`s4
`s2
`s1, cancel
"""
coab = ['Blade','Wand','Dagger']
share = ['Ranzal']
def d_coabs(self):
if self.sim_afflict:
self.coab = ['Blade','Wand','Bow']
def init(self):
self.buff_class = Teambuff if self.condition('buff all team') else Selfbuff
def prerun(self):
self.bleed = Bleed("g_bleed",0).reset()
def s1_proc(self, e):
Bleed(e.name, 1.46).on()
def s2_proc(self, e):
self.buff_class(e.name,0.1,15,'crit','chance').on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | adv/botan.py | slushiedee/dl |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .video_record import VideoRecord
from datetime import timedelta
import time
def timestamp_to_sec(timestamp):
x = time.strptime(timestamp, '%H:%M:%S.%f')
sec = float(timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()) + float(
timestamp.split('.')[-1]) / 100
return sec
class EpicKitchensVideoRecord(VideoRecord):
def __init__(self, tup):
self._index = str(tup[0])
self._series = tup[1]
@property
def participant(self):
return self._series['participant_id']
@property
def untrimmed_video_name(self):
return self._series['video_id']
@property
def start_frame(self):
return int(round(timestamp_to_sec(self._series['start_timestamp']) * self.fps))
@property
def end_frame(self):
return int(round(timestamp_to_sec(self._series['stop_timestamp']) * self.fps))
@property
def fps(self):
is_100 = len(self.untrimmed_video_name.split('_')[1]) == 3
return 50 if is_100 else 60
@property
def num_frames(self):
return self.end_frame - self.start_frame
@property
def label(self):
return {'verb': self._series['verb_class'] if 'verb_class' in self._series else -1,
'noun': self._series['noun_class'] if 'noun_class' in self._series else -1}
@property
def metadata(self):
return {'narration_id': self._index} | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | slowfast/datasets/epickitchens_record.py | dylan-campbell/Motionformer |
__author__ = 'patras'
from domain_searchAndRescue import *
from timer import DURATION
from state import state
def GetCostOfMove(r, l1, l2, dist):
return dist
DURATION.TIME = {
'giveSupportToPerson': 15,
'clearLocation': 5,
'inspectPerson': 20,
'moveEuclidean': GetCostOfMove,
'moveCurved': GetCostOfMove,
'moveManhattan': GetCostOfMove,
'fly': 15,
'inspectLocation': 5,
'transfer': 2,
'replenishSupplies': 4,
'captureImage': 2,
'changeAltitude': 3,
'deadEnd': 1,
}
DURATION.COUNTER = {
'giveSupportToPerson': 15,
'clearLocation': 5,
'inspectPerson': 20,
'moveEuclidean': GetCostOfMove,
'moveCurved': GetCostOfMove,
'moveManhattan': GetCostOfMove,
'fly': 15,
'inspectLocation': 5,
'transfer': 2,
'replenishSupplies': 4,
'captureImage': 2,
'changeAltitude': 3,
'deadEnd': 1,
}
rv.WHEELEDROBOTS = ['w1', 'w2']
rv.DRONES = ['a1']
rv.OBSTACLES = { (24, 21)}
def ResetState():
state.loc = {'w1': (24,19), 'w2': (23,29), 'p1': (12,21), 'a1': (24,10)}
state.hasMedicine = {'a1': 0, 'w1': 0, 'w2': 0}
state.robotType = {'w1': 'wheeled', 'a1': 'uav', 'w2': 'wheeled'}
state.status = {'w1': 'free', 'w2': 'free', 'a1': UNK, 'p1': UNK, (12,21): UNK}
state.altitude = {'a1': 'high'}
state.currentImage = {'a1': None}
state.realStatus = {'w1': 'OK', 'p1': 'OK', 'w2': 'OK', 'a1': OK, (12, 21): 'hasDebri'}
state.realPerson = {(12,21): 'p1'}
state.newRobot = {1: None}
state.weather = {(12,21): "rainy"}
tasks = {
2: [['survey', 'a1', (12,21)]]
}
eventsEnv = {
} | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | problems/SR/auto/problem33_SR.py | sunandita/ICAPS_Summer_School_RAE_2020 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisionDataset that allows user to get example indices."""
import torchvision
class VisionDatasetWithIndices(torchvision.datasets.vision.VisionDataset):
"""VisionDataset that allows user to get example indices.
Dataset that returns a triple (data, targets, indices)
instead of just (data, targets). Indices of training examples can be
used to track model performance on individual examples, for instance to find
training examples that are learned faster than others.
"""
def __init__(self, dataset):
super(VisionDatasetWithIndices, self).__init__(None)
self.dataset = dataset
def __getitem__(self, index):
data, target = self.dataset.__getitem__(index)
return data, target, index
def __len__(self):
return len(self.dataset)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | coherent_gradients/weak_and_strong/datasets/dataset_with_indices.py | pedersor/google-research |
"""
The Ramer-Douglas-Peucker algorithm roughly ported from the pseudo-code provided
by http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) - (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
"""
Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | src/api/util/RDP.py | hostbaby/RedisLive |
import numpy as np
import cv2
def load_batch(ls,od,data_path,batch_size,which_batch):
image_list=[]
for i in range(which_batch*batch_size,(which_batch+1)*batch_size):
image=[]
image.append(cv2.imread(data_path+ls[od[i]]+'_red.png',0))
image.append(cv2.imread(data_path+ls[od[i]]+'_green.png',0))
image.append(cv2.imread(data_path+ls[od[i]]+'_blue.png',0))
image.append(cv2.imread(data_path+ls[od[i]]+'_yellow.png',0))
image=np.asarray(image).T
image_list.append(image)
image_list=np.asarray(image_list)
return image_list
def normalize(image_list):
ma=max(image_list.flatten())
mi=min(image_list.flatten())
mean = float((ma+mi)/2.0)
output = (image_list-mean)/(ma-mean)
return output
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | code/resnet_approach/prepare.py | liuchangtai1996/MLIP |
# Harmonize functionality between the different OS implementations
import errno
def err_bind_inuse(err):
return err == errno.EADDRINUSE
def err_conn_refused(errex):
return (errex.errno in [errno.ECONNREFUSED, errno.EPIPE] or
(hasattr(errex, 'winerror') and
errex.winerror == 10057)) # 10057 == WSAENOTCONN
def err_send_inprogress(err):
return err in [errno.EINPROGRESS, errno.EAGAIN]
def err_send_connrefused(errex):
return err_conn_refused(errex)
def err_recv_retry(err):
return err == errno.EAGAIN
def err_recv_connreset(errex):
return (errex.errno in [errno.ECONNRESET, errno.EPIPE] or
(hasattr(errex, 'winerror') and
errex.winerror == 10053)) # 10053 == WSAECONNABORTED
def err_send_connreset(errex):
return err_recv_connreset(errex)
def err_select_retry(err):
return err in [errno.EINVAL, errno.EINTR]
def err_bad_fileno(err):
return err == errno.EBADF
def err_too_many_open_sockets(errex):
return errex.errno == errno.EMFILE
try:
# Access these to see if the exist
errno.WSAEINVAL # type: ignore
errno.WSAEWOULDBLOCK # type: ignore
# They exist, so use them
def err_inprogress(err):
return err in [errno.EINPROGRESS,
errno.WSAEINVAL,
errno.WSAEWOULDBLOCK]
def err_recv_inprogress(err):
return err in [errno.EAGAIN, errno.EWOULDBLOCK,
errno.WSAEWOULDBLOCK]
except Exception:
# The above constants don't exist; use Linux standards
def err_inprogress(err):
return err == errno.EINPROGRESS
def err_recv_inprogress(err):
return err in [errno.EAGAIN, errno.EWOULDBLOCK]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | thespian/system/transport/errmgmt.py | aytchell/Thespian |
import aiohttp
from flask import Flask
from aio_executor import run_with_asyncio
app = Flask(__name__)
async def get_random_quote():
async with aiohttp.ClientSession() as session:
async with session.get('https://api.quotable.io/random') as response:
quote = await response.json()
return f'{quote["content"]} ({quote["author"]})'
@app.route('/')
@run_with_asyncio
async def index():
return await get_random_quote()
if __name__ == '__main__':
app.run()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | examples/quotes.py | miguelgrinberg/aio-executor |
# encoding=utf-8
"""
@desc: 将自然语言转为SPARQL查询语句
"""
from kgqa.KB_query import question_drug_template
from kgqa.KB_query import word_tagging
class Question2Sparql:
def __init__(self, dict_paths):
self.tw = word_tagging.Tagger(dict_paths)
self.rules = question_drug_template.rules
def get_sparql(self, question):
"""
进行语义解析,找到匹配的模板,返回对应的SPARQL查询语句
:param question:
:return:
"""
word_objects = self.tw.get_word_objects(question)
queries_dict = dict()
for rule in self.rules:
#print(rule)
# word_objects是一个列表,元素为是包含词语和词语对应词性的对象
query, num = rule.apply(word_objects)
if query is not None:
queries_dict[num] = query
if len(queries_dict) == 0:
return None
elif len(queries_dict) == 1:
return list(queries_dict.values())[0]
else:
# TODO 匹配多个语句,以匹配关键词最多的句子作为返回结果
sorted_dict = sorted(queries_dict.items(), key=lambda item: item[1])
return sorted_dict[0][1]
if __name__ == '__main__':
q2s = Question2Sparql(['./external_dict/jibing_pos_name.txt', './external_dict/drug_pos_name.txt','./external_dict/symptom_pos.txt'])
#question = '喉插管损伤有什么症状?'
#question = '马来酸罗格列酮片的批准文号是什么?'
#question = '怎么预防不完全性肠梗阻?'
question = '我出现喉痒咳嗽,应该得了什么病?'
my_query = q2s.get_sparql(question.encode('utf-8'))
print(my_query)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | kgqa/KB_query/question2sparql.py | couldandblow/Intelligent-QA-in-medicine |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pylab
import csv
import random
import sys
import os
def get_data(path):
epoch = []
num_reach_target = []
num_hit_obs = []
num_out_of_range = []
num_moving = []
with open(path, "r+", encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
epoch.append(int(row['epoch']))
num_reach_target.append(int(row['num_reach_target']))
num_hit_obs.append(int(row['num_hit_obs']))
num_out_of_range.append(int(row['num_out_of_range']))
num_moving.append(int(row['num_moving']))
return epoch, num_reach_target, num_hit_obs, num_out_of_range, num_moving
def draw(epoch, num_reach_target, num_hit_obs, num_out_of_range, num_moving, color_list, pic_name):
plt.plot(epoch, num_reach_target, color=color_list[7])
plt.plot(epoch, num_hit_obs, color=color_list[4])
# plt.plot(epoch, num_out_of_range, color=color_list[0])
plt.plot(epoch, num_moving, color=color_list[3])
plt.legend(['num_reach_target', 'num_hit_obs', 'num_moving'], loc='lower right', fontsize=10) # 图例
plt.ylabel('number', fontsize=10)
plt.xlabel('episodes', fontsize=10)
plt.title('result', fontsize=10)
plt.savefig(pic_name)
if __name__ == "__main__":
path = '../output/result.csv'
pic_name = '../output/result.png'
color_list = sns.hls_palette(8, l=.3, s=.8)
epoch, num_reach_target, num_hit_obs, num_out_of_range, num_moving = get_data(path)
draw(epoch, num_reach_target, num_hit_obs, num_out_of_range, num_moving, color_list, pic_name)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | vis/draw_result.py | ChangQingAAS/MFDDPG_Path_Planning |
from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
# We need to check first if the package is installed. yum update of a
# non-existing package has a returncode of 0. We need to raise an exception
# if the package is not installed !
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name))
return ret['returncode'] == 0
| [
{
"point_num": 1,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent |
import wx
from .misc.helpers import deg_to_rad, rad_to_deg
from .misc.vector import Vector
# Size of the turtle canvas. We assume no user will have a screen
# so big that the canvas will be bigger than this.
BITMAP_SIZE = Vector((2000, 1200))
# Center of the canvas.
origin = BITMAP_SIZE / 2.0
def to_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return rad_to_deg(-angle) - 180
def from_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return deg_to_rad(-angle + 180)
def from_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
def to_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
class Turtle:
"""
A Turtle object defines a turtle by its attributes, such as
position, orientation, color, etc. See source of __init__ for
a complete list.
"""
def __init__(self):
self.pos = Vector((0, 0))
self.orientation = 180
self.color = "red"
self.width = 3
self.visible = True
self.pen_down = True
# the `clear` attribute is only made True momentarily when
# the `clear()` function is called by the user to clear the screen.
self.clear = False
self.SPEED = 400.0 # Pixels per second
self.ANGULAR_SPEED = 360.0 # Degrees per second
def give_pen(self):
"""
Gives a wxPython pen that corresponds to the color, width,
and pen_downity of the Turtle instance.
"""
return wx.Pen(self.color,
self.width,
wx.SOLID if self.pen_down else wx.TRANSPARENT)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | pythonturtle/my_turtle.py | Cleverect/PythonTurtle |
from minidoc import minidoc
from minidoc import tst
import argparse
from efdir import fs
parser = argparse.ArgumentParser()
parser.add_argument('-tst','--test_file', default="code.tst.py",help=".tst.py file name")
parser.add_argument('-codec','--codec', default="utf-8",help=".tst.py file codec")
parser.add_argument('-still','--still_frames', default="True",help="generate screen shot")
parser.add_argument('-rows','--rownums', default="30",help="screen height")
parser.add_argument('-dst','--dst_dir', default="./images",help="destination svg dir")
parser.add_argument('-title','--title', default="Usage",help="parent title")
parser.add_argument('-tbot','--title_bot', default="=",help="parent title bottom char")
parser.add_argument('-ebot','--entry_bot', default="-",help="entry title bottom char")
def boolize(s):
s = s.lower()
if(s=="true"):
return(True)
elif(s=="false"):
return(False)
else:
return(False)
args = parser.parse_args()
still_frames = boolize(args.still_frames)
def main():
kl,vl = tst.tst2kvlist(fn=args.test_file,codec=args.codec)
minidoc.creat_svgs(kl,vl,still_frames=still_frames,rownums=int(args.rownums),dst_dir=args.dst_dir)
rst_str = tst.creat_rst(kl,vl,title=args.title,title_bot=args.title_bot,entry_bot=args.entry_bot)
fs.wfile(args.title+".rst",rst_str,codec=args.codec)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | minidoc/bin.py | ihgazni2/minidoc |
import pandas as pd
from tidyframe import nvl
def test_nvl_series():
test_list = [0, 1, None, pd.np.NaN]
test_series = pd.Series(test_list)
nvl(test_series, 10)
def test_nvl_list():
test_list = [0, 1, None, pd.np.NaN]
nvl(test_list, 10)
def test_nvl_int():
nvl(None, 10)
def test_nvl_str():
nvl(None, 'abc')
def test_nvl_int_v2():
nvl(1, 10)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/test_nvl.py | Jhengsh/tidyframe |
from tkinter import *
root = Tk()
root.title("Listbox Demo")
def on_add():
text = txtContent.get()
listbox.insert(END, text)
def on_del():
selections = listbox.curselection()
for i in reversed(selections):
listbox.delete(i)
def on_show():
# items = [ l.split() for l in listbox.get(0, END) ]
items = []
for line in listbox.get(0, END):
items.append(line.split())
price = 0
for name, p, n in items:
price += int(p) * int(n)
txtContent.delete(0, END)
txtContent.insert(0, "Price = " + str(price))
btnAdd = Button(root, text = 'Add', command = on_add)
txtContent = Entry(root, width = 20)
btnDel = Button(root, text = 'Del', command = on_del)
btnShow = Button(root, text = 'Show', command = on_show)
listbox = Listbox(root, selectmode = EXTENDED)
listbox.grid(columnspan = 4)
listbox['width'] = 40
grids = [
(btnAdd, 0, 0),
(txtContent, 0, 1),
(btnDel, 0, 2),
(btnShow, 0, 3),
(listbox, 1, 0)
]
for obj, r, c in grids:
obj.grid(row = r, column = c)
for i in range(1, 10):
listbox.insert(END, (chr(65 + i - 1) * 3) + " " + str(int(80 * i ** 0.5)) + " " + str(i))
root.mainloop()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | src/menu.py | Inndy/tkinter_samples |
from spInDP.Behavior import Behavior
import time
from spInDP.AnimationController import AnimationController
class PushBehavior(Behavior):
frameNr = 1
lastZ = 0
animationController = None
remoteContext = None
def __init__(self, spider):
print("Initializing push (spider gap) behavior.")
super(PushBehavior, self).__init__(spider)
self.safeTransition()
def update(self):
jMagnitude = self.spider.remoteController.context.jMagnitude
angleModifier = 1
if(jMagnitude > 0.4):
speedModifier = jMagnitude * 2
if self.spider.remoteController.context.jX > 0:
time.sleep(self.spider.animationController.push(frameNr=self.frameNr, speedMod=speedModifier))
else:
time.sleep(self.spider.animationController.push(frameNr=self.frameNr, speedMod=speedModifier))
self.frameNr += 1
return
def safeTransition(self):
for x in [2,14,17]:
self.spider.servoController.move(servo=x,angle=-10, speed=100)
for x in [5,8,11]:
self.spider.servoController.move(servo=x,angle=10, speed=100) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | spInDP/PushBehavior.py | henkmollema/spInDP |
import jinja2
class SPMObject(object):
""" Abstract Base Class for all SPM objects.
Even though SPM objects are not Spire tasks (as some of them will modify
in-place their file_dep, which is not compatible with doit's task
semantics), they nonetheless include task-related properties: file_dep
and targets. Subclasses will have to override the _get_file_dep and
_get_targets functions to return the correct values.
"""
def __init__(self, name):
self.name = name
self.environment = jinja2.Environment()
self.environment.globals.update(id=__class__._get_id)
def get_script(self, index):
template = self.environment.from_string(self.template)
return template.render(index=index, **vars(self))
@property
def file_dep(self):
return self._get_file_dep()
@property
def targets(self):
return self._get_targets()
@staticmethod
def _get_id(index, name):
return "matlabbatch{"+str(index)+"}."+name
def _get_file_dep(self):
return []
def _get_targets(self):
return []
def __getstate__(self):
state = self.__dict__.copy()
del state["environment"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.environment = jinja2.Environment()
self.environment.globals.update(id=__class__._get_id)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | spire/spm/spm_object.py | lamyj/spire |
def cast_kwargs(kwargs):
kwargs_copy = kwargs.copy()
for arg, value in kwargs_copy.items():
if isinstance(value, dict):
kwargs_copy[arg] = cast_kwargs(value)
else:
kwargs_copy[arg] = cast_string(kwargs_copy[arg])
return kwargs_copy
def cast_string(s):
if s == "True":
return True
elif s == "False":
return False
elif s == "None":
return None
elif is_int(s):
return int(s)
elif is_float(s):
return float(s)
else:
return s
def is_int(s):
try:
int(s)
return True
except (ValueError, TypeError):
return False
def is_float(s):
try:
float(s)
return True
except (ValueError, TypeError):
return False
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | python-lib/gluonts_forecasts/custom_models/utils.py | RedaAffane/dss-plugin-timeseries-forecast |
from tests import splice_tests
def splice_1():
'''For each question, replace the "None" with your answer!'''
hello_string = "Hello World!"
name_string = "My name is Fernando."
# q_1: splice the "Hello" from hello_string
q_1 = "None"
# q_2: splice the "World" from hello_string
q_2 = "None"
# q_3: splice the "!" from hello_string
q_3 = "None"
# q_4: splice the "name" from name_string
q_4 = "None"
# q_5: splice the "Fernando." from name_string
q_5 = "None"
# q_6: splice the "My name is " from name_string
q_6 = "None"
# q_7: splice the "." from name_string
q_7 = "None"
# q_8: splice the "My name is " from name_string and add your name to the end
q_8 = "None"
# q_9: splice the "World" from hello_string and concatenate it with " is " from name_string. Then, add "beautiful!" at the end.
q_9 = "None"
# q_10: splice the "Hello " from hello_string and concatenate it with "Fernando" from name_string. Then, concatenate it with the "!" from hello_string
q_10 = "None"
return q_1, q_2, q_3, q_4, q_5, q_6, q_7, q_8, q_9, q_10
def main():
results = splice_1()
for i in range(7):
if results[i] == splice_tests[i]:
print("Question " + str(i + 1) + ".........ok")
else:
print("Question " + str(i + 1) + ".........FAIL: " + results[i])
if (len(results[7]) > 12) and (results[7][:11] == "My name is "):
print("Question 8.........ok")
else:
print("Question 8.........FAIL: " + results[7])
for i in range(8, len(results)):
if results[i] == splice_tests[i]:
print("Question " + str(i + 1) + ".........ok")
else:
print("Question " + str(i + 1) + ".........FAIL: " + results[i])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | lesson-2/string_splice.py | laurendayoun/intro-to-python |
###################################
# CS B551 Fall 2018, Assignment #3
#
# Scoring code by D. Crandall
#
# PLEASE DON'T MODIFY THIS FILE.
# Edit pos_solver.py instead!
#
class Score:
def __init__(self):
self.word_scorecard = {}
self.sentence_scorecard = {}
self.word_count = 0
self.sentence_count = 0
def score(self, algo_outputs, gt):
self.word_count += len(gt)
self.sentence_count += 1
for algo,labels in algo_outputs.items():
correct = 0
for j in range(0, len(gt)):
correct += 1 if gt[j] == labels[j] else 0
self.word_scorecard[algo] = self.word_scorecard.get(algo, 0) + correct
self.sentence_scorecard[algo] = self.sentence_scorecard.get(algo, 0) + (correct == len(gt))
def print_scores(self):
print("\n==> So far scored %d sentences with %d words." % (self.sentence_count, self.word_count))
print(" Words correct: Sentences correct: ")
for i in sorted(self.word_scorecard):
print("%18s: %7.2f%% %7.2f%%" % (i, self.word_scorecard[i]*100 / float(self.word_count), self.sentence_scorecard[i]*100 / float(self.sentence_count)))
@staticmethod
def print_helper(description, list, sentence):
print (("%40s" % description) + " " + " ".join([(("%-" + str(max(4,len(sentence[i]))) + "s") % list[i]) for i in range(0,len(list)) ] ) )
@staticmethod
def print_results(sentence, outputs, posteriors, models):
Score.print_helper(" ".join([("%7s" % model) for model in models]), sentence, sentence)
for algo in sorted(outputs.keys()):
Score.print_helper(algo + " "+" ".join(["%7.2f" % posteriors[algo][model] for model in models]), outputs[algo], sentence)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | pos_scorer.py | animeshsagar/Part-of-Speech-Tagging |
import math
def pra(n):
for i in range (2,(int)(math.sqrt(n))):
if (n%i==0):
return False
return True
def izpisi():
for i in range (2,200):
if (pra(i)):
print(i)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | test.py | BlackPhoenixSlo/vislice |
from application import db
from flask import Blueprint
from flask_login import current_user
from flask import current_app as app
from flask import request, jsonify, make_response
from flask_jwt_extended import jwt_required
import logging
# Blueprint Configuration
friends_bp = Blueprint('friends_bp', __name__)
@friends_bp.route('/api/friends', methods=['GET'])
@jwt_required
def getAllFriends():
response = {}
if request.method == 'GET':
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
# TODO
response["items"] = "Mocked GET /api/friends response"
return make_response(jsonify(response), 200)
else:
response["message"] = "Method not allowed"
return make_response(jsonify(response), 405)
# TODO: change id to query parameter
@friends_bp.route('/api/friend/id', methods=['DELETE'])
@jwt_required
def removeFriend():
response = {
"message": "",
}
if request.method == 'DELETE':
username = get_jwt_identity()
user = User.query.filter(User.username == username).first()
# TODO
response["message"] = "Mocked DELETE /api/friend/id response"
return make_response(jsonify(response), 200)
else:
response["message"] = "Method not allowed"
return make_response(jsonify(response), 405) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | application/friends/routes.py | Sebex0/friendship-diary-backend-python |
""" director subsystem's configuration
- config-file schema
- settings
"""
from typing import Dict
import trafaret as T
from aiohttp import ClientSession, web
from yarl import URL
from servicelib.application_keys import APP_CLIENT_SESSION_KEY, APP_CONFIG_KEY
APP_DIRECTOR_API_KEY = __name__ + ".director_api"
CONFIG_SECTION_NAME = "director"
schema = T.Dict(
{
T.Key("enabled", default=True, optional=True): T.Bool(),
T.Key("host", default="director",): T.String(),
T.Key("port", default=8001): T.ToInt(),
T.Key("version", default="v0"): T.Regexp(
regexp=r"^v\d+"
), # storage API version basepath
}
)
def build_api_url(config: Dict) -> URL:
api_baseurl = URL.build(
scheme="http", host=config["host"], port=config["port"]
).with_path(config["version"])
return api_baseurl
def get_config(app: web.Application) -> Dict:
return app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
def get_client_session(app: web.Application) -> ClientSession:
return app[APP_CLIENT_SESSION_KEY]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | services/web/server/src/simcore_service_webserver/director/config.py | KZzizzle/osparc-simcore |
from dataclasses import asdict, dataclass, field
from datetime import date, datetime
from typing import List, TypeVar, Union
from uuid import UUID
import ujson
T = TypeVar("T")
@dataclass
class MetaState:
exclude: List[str] = field(default_factory=list)
class BaseModel:
__state__: MetaState
def __post_init__(self) -> None:
self.__state__ = MetaState()
def __json__(self) -> str:
return ujson.dumps(
{
k: self._clean(v)
for k, v in asdict(self).items()
if k not in self.__state__.exclude
}
)
@staticmethod
def _clean(value: T) -> Union[str, T]:
if isinstance(value, (date, datetime)):
return value.isoformat()
elif isinstance(value, UUID):
return str(value)
return value
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | Chapter10/graphql/application/world/common/base_model.py | maggias/Python-Web-Development-with-Sanic |
from functools import wraps
def singleton(cls):
"""装饰器,被装饰的类为单例模式"""
instances = {}
@wraps(cls)
def getinstance(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return getinstance
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | cuclasses/SingletonDecoration.py | HHHHhgqcdxhg/cuclasses |
import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNetwork(nn.Module):
def __init__(self,input_size,hidden_size,action_size):
super(ActorNetwork, self).__init__()
self.fc1 = nn.Linear(input_size,hidden_size)
self.fc2 = nn.Linear(hidden_size,hidden_size)
self.fc3 = nn.Linear(hidden_size,action_size)
def forward(self,x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out))
return out
class ValueNetwork(nn.Module):
def __init__(self,input_size,hidden_size,output_size):
super(ValueNetwork, self).__init__()
self.fc1 = nn.Linear(input_size,hidden_size)
self.fc2 = nn.Linear(hidden_size,hidden_size)
self.fc3 = nn.Linear(hidden_size,output_size)
def forward(self,x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | models.py | IandRover/meta-gradient_RL |
import logging
import requests
import moment
import utils
import time
import json
class weather(utils.utils):
def message_callback(self,ch, method, properties, body):
logging.info('messgae received weather')
time.sleep(1)
self.__get_weather(body)
ch.basic_ack(delivery_tag=method.delivery_tag) # 执行完再ack消息
logging.info('weather messgae is ack on :'+str(moment.now()))
def __get_weather(self,body):
params = self.get_params(body)
req = requests.get(params.request_url)
data = json.loads(req.text)
if(data['status'] == 200):
wea = data['data']['forecast'][0]
params.content='天气:'+wea['type']+' '+wea['high']+','+wea['low']+' '+wea['notice']
else:
params.content='错误'
self.notification(params)
self.notification_to_zhouyu(params)
def notification_to_zhouyu(self,params):
url='http://push.devzhou.t.cn/Push/'+params.title+'/'+params.content+'?url='+params.red_url
requests.get(url)
logging.info('messgae send successfully')
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | Reptile/weather.py | hqs666666/python |
from dateutil import parser
import preprocessor as p
def timestamp_to_date(timestamp):
"""
Conver a twitter timestamp to a datetime object
:param timestamp: a string represent the timestamp
:return: a datetime object
"""
return parser.parse(timestamp)
def day_diff(timestamp1, timestamp2):
"""
Number of days between 2 timestamps
:param timestamp1: first timestamp
:param timestamp2: second timestamp
:return: An integer indicating number of days between 2 timestamps
"""
return (timestamp_to_date(timestamp1) - timestamp_to_date(timestamp2)).days
def read_brown_cluster_file(brown_cluster_text_file):
"""
Read brown cluster text file and save into a dict
:param brown_cluster_text_file: brown cluster text file
:return: A dict, which keys are tokens and values are cluster ids
"""
brown_cluster_dict = dict()
cluster_id_dict = dict()
cluster_count = 0
for line in brown_cluster_text_file.read().splitlines():
arr = line.split('\t')
cluster_str = arr[0]
token = arr[1]
if not cluster_id_dict.has_key(cluster_str):
cluster_id_dict[cluster_str] = cluster_count
cluster_count+=1
brown_cluster_dict[token] = cluster_id_dict[cluster_str]
return brown_cluster_dict
def preprocess_tweet(tweet):
"""
Clean the tweet before feeding to other functions
:param tweet: a raw tweet
:return: tweet with URL, MENTIONS, EMOJI, HASTHTAGS removed
"""
cleaned_tweet = tweet.lower() # lowercase the tweet
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION, p.OPT.HASHTAG) # set options for the preprocessor
cleaned_tweet = p.clean(cleaned_tweet.encode("ascii", "ignore"))
return cleaned_tweet;
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | src/features/utils.py | iamhuy/rumour-veracity-verification |
import unittest
from translator import english_to_french,french_to_english
class Testfr2eng(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english('Bonjour'), 'Hello')
self.assertNotEqual(french_to_english('Bonjour'), 'potato')
class Testeng2fr(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french('Hello'), 'Bonjour')
self.assertNotEqual(english_to_french('Hello'), 'bonbon')
unittest.main() | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | final_project/machinetranslation/tests.py | Stephanie041996/xzceb-flask_eng_fr |
class Solution:
def searchMatrix(self, matrix, target) -> bool:
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return False
row = 0
col = len(matrix[0]) - 1
while row <= len(matrix) - 1 and col >= 0:
if target == matrix[row][col]:
return True
elif target < matrix[row][col]:
col -= 1
else:
row += 1
return False
def searchMatrix2(self, matrix, target) -> bool:
return any(target in row for row in matrix) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | leetcode/[cutz][2darray.py | cutz-j/AlgorithmStudy |
import re
from math import log10
class Vulcanizer():
def __init__(self, log_fold_re_list, p_value_re_list):
self.log_fold_re_list = log_fold_re_list
self.p_value_re_list = p_value_re_list
def vulcanize(self, dataframe):
"""
Given a dataframe,
identify the columns for log-fold-change and p-value,
remove all other columns,
and replace p-value with its negative log.
"""
log_fold_col = self._pick_log_fold(dataframe)
p_value_col = self._pick_p_value(dataframe)
log_p_value_col = '-log10({})'.format(p_value_col)
dataframe[log_p_value_col] =\
dataframe[p_value_col].apply(_neg_log)
two_columns = dataframe[[log_fold_col, log_p_value_col]]
return two_columns.dropna(axis='rows', how='all')
# Plotly fails to show anything if rows with missing data are present,
# (I think.)
def _pick_log_fold(self, dataframe):
return _pick_col(self.log_fold_re_list, dataframe)
def _pick_p_value(self, dataframe):
return _pick_col(self.p_value_re_list, dataframe)
def _pick_col(name_re_list, df):
for name_re in name_re_list:
match_cols = [
col for col in df.columns
if re.search(name_re, col, flags=re.IGNORECASE)
]
if len(match_cols) == 1:
break
else:
raise Exception('expected one match for {} in {}'.format(
name_re_list, df.columns.tolist()))
return match_cols[0]
def _neg_log(x):
try:
return -log10(x)
except ValueError:
return None
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | context/app/utils/vulcanize.py | mccalluc/heatmap-scatter-dash |
"""
This module contains shared fixtures.
"""
import json
import pytest
import selenium.webdriver
@pytest.fixture
def config(scope='session'):
# Read JSON file
with open('config.json') as config_file:
config = json.load(config_file)
# Assert values are acceptable
assert config['browser'] in ['Firefox', 'Chrome', 'Headless Chrome']
assert isinstance(config['implicit_wait'],int)
assert config['implicit_wait']>0
#Return config
return config
@pytest.fixture
def browser(config):
# Initialize the ChromeDriver instance
if config['browser'] == 'Firefox':
b = selenium.webdriver.Firefox()
elif config['browser'] == 'Chrome':
b = selenium.webdriver.Chrome()
elif config['browser'] == 'Headless Chrome':
opts = selenium.webdriver.ChromeOptions()
opts.add_argument('headless')
b = selenium.webdriver.Chrome(options=opts)
else:
raise Exception(f'Browser "{config["browser"]}" is not supported')
# Make its calls wait up to 10 seconds for elements to appear
b.implicitly_wait(config['implicit_wait'])
# Return the WebDriver instance for the setup
yield b
# Quit the WebDriver instance for the cleanup
b.quit()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/conftest.py | jaswanth-amili/RailYatri-Automation |
import hashlib
from corehq.apps.integration.models import (
DialerSettings,
GaenOtpServerSettings,
HmacCalloutSettings,
)
def domain_uses_dialer(domain):
try:
settings = DialerSettings.objects.get(domain=domain)
return settings.is_enabled
except DialerSettings.DoesNotExist:
return False
def get_hmac_callout_settings(domain):
try:
settings = HmacCalloutSettings.objects.get(domain=domain)
return settings if settings.is_enabled else None
except HmacCalloutSettings.DoesNotExist:
pass
def get_gaen_otp_server_settings(domain):
try:
settings = GaenOtpServerSettings.objects.get(domain=domain)
return settings if settings.is_enabled else None
except GaenOtpServerSettings.DoesNotExist:
pass
def get_dialer_settings(domain):
return DialerSettings.objects.get(domain=domain)
def integration_contexts(domain):
context = {'dialer_enabled': domain_uses_dialer(domain)}
gaen_otp_server_settings = get_gaen_otp_server_settings(domain)
if gaen_otp_server_settings:
context.update({
'gaen_otp_enabled': True
})
hmac_settings = get_hmac_callout_settings(domain)
if hmac_settings:
context.update({
'hmac_root_url': hmac_settings.destination_url,
'hmac_api_key': hmac_settings.api_key,
'hmac_hashed_secret': hash_secret(hmac_settings.api_secret),
})
return context
def hash_secret(secret):
return hashlib.sha512(secret.encode()).hexdigest()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | corehq/apps/integration/util.py | dimagilg/commcare-hq |
from joerd.mkdir_p import mkdir_p
from joerd.plugin import plugin
from os import link
from contextlib2 import contextmanager
import os.path
class CacheStore(object):
"""
Every tile that gets generated requires ETOPO1. And most require a GMTED
tile, which are pretty large and cover much of the world. Rather than
re-download them every time (ETOPO1 alone is 446MB), we cache ETOPO1 and
GMTED.
This is a bit of a hack, and would be better replaced by a generic
fixed-size LRU/LFU cache. Even better if the cache could be shared
between multiple Joerd processes on the same host.
"""
def __init__(self, cfg):
store_type = cfg['store']['type']
create_fn = plugin('store', store_type, 'create')
self.store = create_fn(cfg['store'])
self.cache_dir = cfg['cache_dir']
def upload_all(self, d):
self.store.upload_all(d)
@contextmanager
def upload_dir(self):
with tmpdir() as t:
yield t
self.upload_all(t)
def exists(self, filename):
return self.store.exists(filename)
def get(self, source, dest):
if 'ETOPO1' in source or 'gmted' in source:
cache_path = os.path.join(self.cache_dir, source)
if not os.path.exists(cache_path):
mkdir_p(os.path.dirname(cache_path))
self.store.get(source, cache_path)
# hard link to dest. this makes it non-portable, but means that
# we don't have to worry about whether GDAL supports symbolic
# links, and we don't have to worry about deleting files, as they
# are reference counted by the OS.
link(cache_path, dest)
else:
self.store.get(source, dest)
def create(cfg):
return CacheStore(cfg)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | joerd/store/cache.py | Hivemapper/HM-colony-joerd |
"""
Module: 'utimeq' on esp8266 v1.11
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.11-8-g48dcbbe60 on 2019-05-29', machine='ESP module with ESP8266')
# Stubber: 1.2.0
class utimeq:
''
def peektime():
pass
def pop():
pass
def push():
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | packages/micropython-official/v1.11/esp8266/stubs/utimeq.py | TheVinhLuong102/micropy-stubs |
from typing import Any
from checkov.common.models.enums import CheckCategories
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.consts import ANY_VALUE
class SQSQueueEncryption(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure all data stored in the SQS queue is encrypted"
id = "CKV_AWS_27"
supported_resources = ['AWS::SQS::Queue']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return 'Properties/KmsMasterKeyId'
def get_expected_value(self) -> Any:
return ANY_VALUE
check = SQSQueueEncryption()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | checkov/cloudformation/checks/resource/aws/SQSQueueEncryption.py | pmalkki/checkov |
# -*- test-case-name: axiom.test.test_upgrading -*-
from axiom.item import Item
from axiom.attributes import text, integer, reference, inmemory
from axiom.upgrade import registerUpgrader
class ActivateHelper:
activated = 0
def activate(self):
self.activated += 1
class Adventurer(ActivateHelper, Item):
typeName = 'test_app_player'
schemaVersion = 2
name = text()
activated = inmemory()
class Sword(ActivateHelper, Item):
typeName = 'test_app_sword'
schemaVersion = 2
name = text()
damagePerHit = integer()
owner = reference()
activated = inmemory()
def upgradePlayerAndSword(oldplayer):
newplayer = oldplayer.upgradeVersion('test_app_player', 1, 2)
newplayer.name = oldplayer.name
oldsword = oldplayer.sword
newsword = oldsword.upgradeVersion('test_app_sword', 1, 2)
newsword.name = oldsword.name
newsword.damagePerHit = oldsword.hurtfulness * 2
newsword.owner = newplayer
return newplayer, newsword
def player1to2(oldplayer):
newplayer, newsword = upgradePlayerAndSword(oldplayer)
return newplayer
def sword1to2(oldsword):
oldPlayerType = oldsword.store.getOldVersionOf('test_app_player', 1)
oldplayer = list(oldsword.store.query(oldPlayerType,
oldPlayerType.sword == oldsword))[0]
newplayer, newsword = upgradePlayerAndSword(oldplayer)
return newsword
registerUpgrader(sword1to2, 'test_app_sword', 1, 2)
registerUpgrader(player1to2, 'test_app_player', 1, 2)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | axiom/test/newapp.py | hawkowl/axiom |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, dim_in, dim_hidden, dim_out):
super(MLP, self).__init__()
self.layer_input = nn.Linear(dim_in, dim_hidden)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
self.layer_hidden = nn.Linear(dim_hidden, dim_out)
def forward(self, x):
x = x.view(-1, x.shape[1]*x.shape[-2]*x.shape[-1])
x = self.layer_input(x)
x = self.dropout(x)
x = self.relu(x)
x = self.layer_hidden(x)
return x
class CNNMnist(nn.Module):
def __init__(self, args):
super(CNNMnist, self).__init__()
self.conv1 = nn.Conv2d(args.num_channels, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, args.num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return x
class CNNCifar(nn.Module):
def __init__(self, args):
super(CNNCifar, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, args.num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | models/Nets.py | yss-al/dssgd |
######################################################################
#
# File: test/unit/test_base.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from contextlib import contextmanager
import re
import unittest
class TestBase(unittest.TestCase):
@contextmanager
def assertRaises(self, exc, msg=None):
try:
yield
except exc as e:
if msg is not None:
if msg != str(e):
assert False, "expected message '%s', but got '%s'" % (msg, str(e))
else:
assert False, 'should have thrown %s' % (exc,)
@contextmanager
def assertRaisesRegexp(self, expected_exception, expected_regexp):
try:
yield
except expected_exception as e:
if not re.search(expected_regexp, str(e)):
assert False, "expected message '%s', but got '%s'" % (expected_regexp, str(e))
else:
assert False, 'should have thrown %s' % (expected_exception,)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | test/unit/test_base.py | ehossack/b2-sdk-python |
# -*- coding: utf-8 -*-
"""
获取YCY图片
"""
import json
import os
import requests
from settings import PROJECT_PATH
class YCYImage(object):
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
# "Content-Type": "application/x-www-form-urlencoded",
}
def get_img(self):
"""获取100页的图片链接"""
url = "https://www.duitang.com/napi/blog/list/by_search/"
result = []
for page in range(0, 240, 24):
data = {
'kw': '杨超越',
'type': 'feed',
'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id',
'_type': '',
'start': str(page),
}
r = requests.get(url, headers=self.headers, params=data, verify=False)
d = json.loads(r.text)
if d.get('data').get('object_list'):
d = d['data']['object_list']
result.extend(d)
return result
def download_img_and_save(self, result):
"""下载图片并保存"""
if not result:
return
for index, d in enumerate(result):
r = requests.get(url=d['photo']['path'])
file_name = os.path.join(PROJECT_PATH, "pics", "ycy_{}.jpg".format(index))
with open(file_name, 'wb') as f:
f.write(r.content)
def run(self):
result = self.get_img()
self.download_img_and_save(result)
if __name__ == '__main__':
ycy = YCYImage()
ycy.run()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | utils/imgs_getter.py | albertschr/wechat_robot_supported_blockchain |
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.config import cfg
from ceilometer import service
from ceilometer import storage
cfg.CONF.import_opt('time_to_live', 'ceilometer.storage',
group='database')
LOG = logging.getLogger(__name__)
def dbsync():
service.prepare_service()
storage.get_connection_from_config(cfg.CONF, 'metering').upgrade()
storage.get_connection_from_config(cfg.CONF, 'alarm').upgrade()
storage.get_connection_from_config(cfg.CONF, 'event').upgrade()
def expirer():
service.prepare_service()
if cfg.CONF.database.time_to_live > 0:
LOG.debug(_("Clearing expired metering data"))
storage_conn = storage.get_connection_from_config(cfg.CONF)
storage_conn.clear_expired_metering_data(
cfg.CONF.database.time_to_live)
else:
LOG.info(_("Nothing to clean, database time to live is disabled"))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | ceilometer/cmd/storage.py | vmturbo/ceilometer |
from ert_gui.tools.manage_cases.case_init_configuration import CaseInitializationConfigurationPanel
from ert_gui.tools import Tool
from ert_gui.widgets import util
from ert_gui.widgets.closable_dialog import ClosableDialog
class ManageCasesTool(Tool):
def __init__(self):
super(ManageCasesTool, self).__init__("Manage Cases", "tools/manage_cases", util.resourceIcon("ide/database_gear"))
def trigger(self):
case_management_widget = CaseInitializationConfigurationPanel()
dialog = ClosableDialog("Manage Cases", case_management_widget, self.parent())
dialog.exec_()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | data/test/python/f718df30f82beb6e6e21b89a28a1753ce7989932manage_cases_tool.py | harshp8l/deep-learning-lang-detection |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from tiveU.articles.models import Article
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['articles'] = Article.objects.all()
return context
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', 'email', 'picture', 'job_title', 'bio', 'phone', 'gender']
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
... | 3 | tiveU/users/views.py | rds0751/newtiveu |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""add slug to task group task
Revision ID: 3605dca868e4
Revises: 1431e7094e26
Create Date: 2015-07-14 22:06:05.063945
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "3605dca868e4"
down_revision = "1431e7094e26"
_table_name = "task_group_tasks"
_column_name = "slug"
_slug_prefix = "TASK-"
_constraint_name = "unique_{}".format(_column_name)
def upgrade():
""" Add and fill a unique slug column """
op.add_column(
_table_name,
sa.Column(_column_name, sa.String(length=250), nullable=True)
)
op.execute("UPDATE {table_name} SET slug = CONCAT('{prefix}', id)".format(
table_name=_table_name,
prefix=_slug_prefix
))
op.alter_column(
_table_name,
_column_name,
existing_type=sa.String(length=250),
nullable=False
)
op.create_unique_constraint(_constraint_name, _table_name, [_column_name])
def downgrade():
""" Remove slug column from task group tasks """
op.drop_constraint(_constraint_name, _table_name, type_="unique")
op.drop_column(_table_name, _column_name)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": tru... | 3 | src/ggrc_workflows/migrations/versions/20150714220605_3605dca868e4_add_slug_to_task_group_task.py | sbilly/ggrc-core |
#!/usr/bin/env python3
import wpilib
import ctre
class MyRobot(wpilib.IterativeRobot):
"""
This is a short sample program demonstrating how to use the basic throttle
mode of the TalonSRX
"""
def robotInit(self):
self.motor = ctre.WPI_TalonSRX(1) # Initialize the TalonSRX on device 1.
def disabledPeriodic(self):
# Ensure the motor is disabled when the robot is disabled.
self.motor.disable()
def teleopPeriodic(self):
# Set the motor's output to half power.
# This takes a number from -1 (100% speed in reverse) to +1 (100%
# speed going forward)
self.motor.set(0.5)
if __name__ == "__main__":
wpilib.run(MyRobot)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | examples/basic/robot.py | benjiboy50fonz/robotpy-ctre-draft |
"""
Rhino Python Script Tutorial
Advanced
Exercise 00
Analytical surfaces - the roof of the British Museum.
Bibliography:
Williams, Christopher JK. "The analytic and numerical definition of the geometry of the British Museum Great Court Roof." (2001): 434-440.
"""
import rhinoscriptsyntax as rs
import math
nx = 30
ny = 30
ax = 80
ay = 80
a = 22
b = 36
c = 50
d = 50
def z2(x, y):
r = (x ** 2 + y ** 2) ** 0.5
z = (r / a - 1) * (1 - x / b) * (1 + x / b) * (1 - y / c) * (1 + y / d)
return z
def Main():
for ix in range(0, nx):
for iy in range(0, ny):
x = (ix / (nx - 1) - 0.5) * ax
y = (iy / (ny - 1) - 0.5) * ay
rs.AddPoint(x, y, 5 * z2(x, y))
Main() | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | 2 Scriptissimo/00/main.py | peterszerzo/rhino-pythonscript-tutorials |
import redis
import frappe
import logging
from zlib import compress
from six import string_types
from pygelf import GelfUdpHandler
def publish_redis_event(queue, compressed):
client = get_redis_client()
if not client:
return
client.lpush(f'events:queue:{queue}', compressed)
def publish_gelf_event(queue, compressed):
writer = get_gelf_writer(queue)
if not writer:
return
writer.debug(compressed)
style_map = {
'redis': publish_redis_event,
'gelf': publish_gelf_event,
}
def publish_event(event, payload='', doctype='', queue='default', buffer=False):
event_style = frappe.local.conf.event_style
event_dict = {
'event': event,
'payload': payload if isinstance(payload, string_types) else frappe.as_json(payload),
'doctype': doctype,
'queue': queue,
}
payload = frappe.as_json(event_dict)
# compressed = compress(payload.encode())
compressed = payload
style_map.get(event_style, publish_gelf_event)(queue, compressed)
redis_client_map = {}
def get_redis_client():
redis_event_writer = frappe.local.conf.redis_event_writer
if not redis_event_writer:
frappe.logger().warn('Redis event fired but writer not configured')
try:
return redis_client_map[redis_event_writer]
except KeyError:
client = redis.StrictRedis.from_url(redis_event_writer)
return client
GELF_WRITERS = {}
def get_gelf_writer(queue):
try:
return GELF_WRITERS[queue]
except KeyError:
pass
logger = logging.getLogger(f'events:queue:{queue}')
gelf_config = frappe.local.conf.gelf_event_writer
if not gelf_config:
frappe.logger().warn('Gelf event queue is not configured, while events are being fired')
return
gelf_host = gelf_config.get('host', '127.0.0.1')
gelf_port = gelf_config.get('port', 32001)
handler = GelfUdpHandler(host=gelf_host, port=gelf_port)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.propagate = True
GELF_WRITERS[queue] = logger
return logger
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | latte/utils/background/event.py | sunnyakaxd/latte |
import os.path
from PIL import Image
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
class SingleDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt)
def __getitem__(self, index):
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A_img = self.transform(A_img)
return {'A': A_img, 'A_paths': A_path}
def __len__(self):
return len(self.A_paths)
def name(self):
return 'SingleImageDataset'
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | data/single_dataset.py | troyliu0105/DeblurGAN |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
ZetCode PyQt4 tutorial
This program creates a skeleton of
a classic GUI application with a menubar,
toolbar, statusbar and a central widget.
author: Jan Bodnar
website: zetcode.com
last edited: September 2011
"""
import sys
from PyQt4 import QtGui
class Example(QtGui.QMainWindow):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
textEdit = QtGui.QTextEdit()
self.setCentralWidget(textEdit)
exitAction = QtGui.QAction(QtGui.QIcon('004-python.png'), 'Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
toolbar = self.addToolBar('Exit')
toolbar.addAction(exitAction)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('Main window')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | app10.py | pacpac1992/pyqt4_examples |
import pytest
from riotwatcher import ValWatcher
@pytest.mark.val
@pytest.mark.usefixtures("reset_globals")
class TestValWatcher:
def test_require_api_key(self):
with pytest.raises(ValueError):
ValWatcher(None)
def test_allows_positional_api_key(self):
ValWatcher("RGAPI-this-is-a-fake")
def test_allows_keyword_api_key(self):
ValWatcher(api_key="RGAPI-this-is-a-fake")
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | tests/test_ValWatcher.py | TheBoringBakery/Riot-Watcher |
"""Algorithm to invert a binary tree"""
import unittest
import queue
class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def invert_binary_tree_recursive(tree):
"""Recursive implementation of the binary tree inversion algorithm"""
if not tree:
return
invert_binary_tree_recursive(tree.left)
invert_binary_tree_recursive(tree.right)
tree.left, tree.right = tree.right, tree.left
return tree
def invert_binary_tree_iterative(tree):
"""Iterative implementation of the binary tree inversion algorithm"""
if not tree:
return
to_visit = queue.Queue()
to_visit.put(tree)
while not to_visit.empty():
node = to_visit.get()
if not node:
continue
# invert the left and right sub trees
node.left, node.right = node.right, node.left
# add nodes to be processed
to_visit.put(node.left)
to_visit.put(node.right)
return tree
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | algorithms/invert_binary_tree.py | avg-2049-joe/py-algo-ds |
#!/usr/bin/env python3
"""
Main module for the deployable project.
"""
# Bootstrap to be able to perform absolute imports as standalone code
if __name__ == "__main__":
from absolute_import import absolute_import
absolute_import(file=__file__, name=__name__, path=__path__)
# Normal imports
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from deployable.defaults.args import description, epilog
from typing import Any, Tuple
def get_args() -> Tuple[Any]:
"""
Retrieves arguments from command line.
"""
# Create parser and groups
parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter)
def main() -> None:
"""
Entrypoint.
"""
# Call main method
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | src/deployable/__main__.py | cpuabuse/py-deployment-automation |
#standard
import unittest
import math
# from collections import OrderedDict
from random import uniform
#external
import pandas as pd
from scipy.spatial import KDTree
def Find_RSE_range(df, RSEs, minrange):
sub_df = df[['vehicle_ID', 'location_x', 'location_y']]
tree = KDTree(sub_df[['location_x', 'location_y']].values)
rse_points = list(RSEs.RSEListLocations.values())
locs_index = tree.query_ball_point(rse_points, r=minrange)
#link RSE back to vehicles
rse_vehicles = {}
for c, RSE in enumerate(RSEs.RSEListLocations.keys()):
if len(locs_index[c]) > 0:
vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist()
rse_vehicles[RSE] = vlist
else:
rse_vehicles[RSE] = []
return rse_vehicles
class BufferContentCheck(unittest.TestCase):
def setUp(self):
pass
def test_whole(self):
minrange = 4.00
num_vehicles = 10000
num_RSE = 30
# Vehicles_loc = {x:(uniform(0, 200), uniform(0, 200)) for x in range(num_vehicles)}
# df = pd.DataFrame({
# 'Vid' : ['V' + str(x) for x in Vehicles_loc.keys()],
# 'x' : [Vehicles_loc[x][0] for x in Vehicles_loc],
# 'y' : [Vehicles_loc[x][1] for x in Vehicles_loc],
# })
# df = df.set_index(['Vid'], drop=False)
# RSEs = OrderedDict({'RSE' + str(x):(uniform(0, 200), uniform(0, 200)) for x in range(num_RSE)})
# rse_info = Find_RSE_range(df, RSEs, minrange)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py | OSADP/TCA |
from flask import redirect, render_template, url_for
from flask_login import login_required
from app import db
from app.models import User
from . import user
@user.route("/")
def index():
users = User.query.all() # Select * from users;
return render_template("users.html", users=users)
@user.route("/user/<int:id>")
@login_required
def unique(id):
user = User.query.get(id)
return render_template("user.html", user=user)
@user.route("/user/delete/<int:id>")
def delete(id):
user = User.query.filter_by(id=id).first()
db.session.delete(user)
db.session.commit()
return redirect(url_for(".index"))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | app/user/views.py | EmersonsfDev/Flask_login |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_resource_quota_spec import V1ResourceQuotaSpec
class TestV1ResourceQuotaSpec(unittest.TestCase):
""" V1ResourceQuotaSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ResourceQuotaSpec(self):
"""
Test V1ResourceQuotaSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_resource_quota_spec.V1ResourceQuotaSpec()
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | kubernetes/test/test_v1_resource_quota_spec.py | woqer/python |
""" App database access information """
class sa_db_access:
def username(self):
""" Get database username """
sa_usr = "__db_user_name__"
return sa_usr
def password(self):
""" Get database password """
sa_pwd = "__mysql_user_password__"
return sa_pwd
def db_name(self):
""" Get database name """
sa_db_name = "smartalpha"
return sa_db_name
def db_server(self):
""" Get database server address """
sa_srv = "localhost"
return sa_srv
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | sa_db.py | project-k-0-1/project-k |
import theano
import theano.tensor as T
import numpy as np
from utility.utility import *
def activation_init(prefix, params, layer_setting):
#do Nothing
return params
def activation_calc(prefix, params, layer_setting, state_below, mask_below = None):
actFunc = eval(layer_setting['activation'])
flag = False
shp = state_below.shape
if state_below.ndim == 3:
flag = True
shp0 = shp[0]
shp1 = shp[1]
shp2 = shp[2]
state_below = T.reshape(state_below, [shp0 * shp1, shp2], ndim = 2)
if mask_below == None:
result = actFunc(state_below)
else:
result = actFunc(state_below, mask_below)
if flag:
result = T.reshape(result, [shp0, shp1, shp2], ndim = 3)
return result
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | Layers/activation_layer.py | KaiQiangSong/Structure-Infused-Copy-Mechanism |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..epi import ApplyTOPUP
def test_ApplyTOPUP_inputs():
input_map = dict(args=dict(argstr='%s',
),
datatype=dict(argstr='-d=%s',
),
encoding_file=dict(argstr='--datain=%s',
mandatory=True,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(argstr='--imain=%s',
mandatory=True,
sep=',',
),
in_index=dict(argstr='--inindex=%s',
sep=',',
),
in_topup_fieldcoef=dict(argstr='--topup=%s',
copyfile=False,
requires=['in_topup_movpar'],
),
in_topup_movpar=dict(copyfile=False,
requires=['in_topup_fieldcoef'],
),
interp=dict(argstr='--interp=%s',
),
method=dict(argstr='--method=%s',
),
out_corrected=dict(argstr='--out=%s',
name_source=['in_files'],
name_template='%s_corrected',
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = ApplyTOPUP.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyTOPUP_outputs():
output_map = dict(out_corrected=dict(),
)
outputs = ApplyTOPUP.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| [
{
"point_num": 1,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
... | 3 | nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py | effigies/nipype |
# Copyright (c) 2020 Pavel Vavruska
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from space.vectors import Vec2d
class Projectile(object):
def __init__(self, position_vec2d, direction_vec2d):
self.position_vec2d = position_vec2d # type: Vec2d
self.direction_vec2d = direction_vec2d # type: Vec2d
def move(self):
self.position_vec2d.add_vec2d(self.direction_vec2d) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | projectile.py | PavelVavruska/binary-space-partitioning |
import aioredis
from sanic import Sanic
class RedisWorker:
def __init__(self):
self.__host = None
self.__pool = None
async def init(self, app: Sanic):
self.__host = app.config.REDIS_HOST
self.__pool = await aioredis.create_redis(self.__host)
async def check_session(self, token):
return await self.__pool.expire(token, 300)
async def set_conf_msg(self, phone, msg):
await self.__pool.set(phone, msg)
await self.__pool.expire(phone, 60)
async def get_conf_msg(self, phone, msg):
real_code = self.__pool.get(phone)
if real_code == msg:
self.__pool.delete(phone)
return True
else:
return False
async def get_user(self, token):
return await self.__pool.get(token)
async def create_session(self, user_id, token):
cur_token = await self.__pool.get(user_id)
if not cur_token:
await self.__pool.set(token, user_id)
await self.__pool.expire(token, 300)
else:
token = cur_token
return token
async def close(self):
self.__pool.close()
await self.__pool.wait_closed()
redis = RedisWorker()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | service_api/domain/redis.py | 123456789-dnipro/hackaton |
import ila
from gb_arch import GBArch
from gb_nxt_wri import WRI
from gb_nxt_wr0 import WRU0
from gb_nxt_wr0b import WRU0b
from gb_nxt_wr1 import WRU1
from gb_rdi import defNext as rdDefNext
def defUSts (gb):
m = gb.abst
gb.pre_pix = m.reg ('pre_pix', gb.DATA_SIZE)
gb.pre_pix_nxt = gb.pre_pix
gb.st_ready = m.reg ('st_ready', 1)
gb.st_ready_nxt = gb.st_ready
gb.proc_in = m.reg ('proc_in', gb.slice_size * gb.stencil_size)
gb.proc_in_nxt = gb.proc_in
# Define next state function for each instruction/child-instruction
def defNext (gb):
WRI (gb)
WRU0 (gb)
WRU1 (gb)
# Connect next state function to the abstraction
def setNext (gb):
gb.setNext ()
m = gb.abst
m.set_next ('proc_in', gb.proc_in_nxt)
m.set_next ('pre_pix', gb.pre_pix_nxt)
m.set_next ('st_ready', gb.st_ready_nxt)
if __name__ == '__main__':
gb = GBArch ()
defUSts (gb)
defNext (gb)
rdDefNext (gb)
setNext (gb)
verilogFile = 'gb_verilog_all.v'
gb.exportVerilog (verilogFile)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | examples/GB-Halide/ila_spec_a/all.py | pramodsu/ILA-Tools |
import numpy as np
import hypothesis
import strax.testutils
import straxen
def channel_split_naive(r, channel_ranges):
"""Slower but simpler implementation of straxen.split_channel_ranges"""
results = []
for left, right in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, right + 1))])
return results
@hypothesis.settings(deadline=None)
@hypothesis.given(strax.testutils.several_fake_records)
def test_channel_split(records):
channel_range = np.asarray([[0, 0], [1, 2], [3, 3], [4, 999]])
result = list(straxen.split_channel_ranges(records, channel_range))
result_2 = channel_split_naive(records, channel_range)
assert len(result) == len(result_2)
for i, _ in enumerate(result):
np.testing.assert_array_equal(
np.unique(result[i]['channel']),
np.unique(result_2[i]['channel']))
np.testing.assert_array_equal(result[i], result_2[i])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | tests/test_channel_split.py | zhut19/straxen |
#Contains all the functions required for assessing flood risk
#Exercise 2B - assessing flood risk by level:
def stations_level_over_threshold(stations, tol):
"""returns a list of tuples of stations with relative water level over tol."""
stations_over_threshold = []
for station in stations:
try: # containing the if statement within a try/except block automatically discards stations with no typical range data
if station.relative_water_level()>tol:
stations_over_threshold.append((station.name, station.relative_water_level()))
if type(station.relative_water_level()) == None:
stations_over_threshold.remove(station)
except:
if station.relative_water_level() == None:
pass
stations_over_threshold.sort(key=lambda x: x[1], reverse=True) # sorts the stations tuples by their second values, then reverses tuples to their original internal orders ([0],[1])
return stations_over_threshold
#Exercise 2C - identifying the most at-risk stations:
"""Returns a list of the N stations at which the water level (relative to the typical water level) is highest:"""
def stations_highest_rel_level(stations, N):
filteredList = []
for station in stations:
if station.relative_water_level() != None:
filteredList.append(station)
else:
pass
filteredList.sort(key=lambda x: x.relative_water_level(), reverse=True)
return filteredList[:N]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | floodsystem/flood.py | HSCam/Flood-Warning-Sytem |
import os
import time
import traceback
# import functools
def getobj(s):
return open(s, "r", encoding='utf-8').read()
def getobjs(s):
objs = []
fs = os.listdir(s)
for f in fs:
absf = os.path.join(s, f)
if os.path.isfile(absf) and os.path.splitext(f)[1] == '.py':
objs.append(absf)
elif os.path.isdir(absf):
objs += getobjs(absf)
return objs
class gameplay(object):
def __init__(self, scenario="__general", _basedir=None):
print("A new game object is constructed.")
if _basedir is None:
_basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
self.__basedir = _basedir
self.var = {
"load_script": self.load_script,
"load_scripts": self.load_scripts,
"running": True
# "output":self.output
}
self.load_scripts("__commons")
self.load_script(os.path.join("scenarios", scenario + '.py'))
self.paused = True
self.speed = 0
def end(self):
print("A game has ended.")
def run(self):
print("A game has started.")
try:
# definition
# execution
while self.var['running']:
self.var['play_round']()
self.pause_game()
while self.paused:
# self.output()
ope = input()
# print("Game object got operation:" + ope)
exec(ope)
time.sleep(2 * (0.5 ** self.speed))
except:
print("!!!!! --- 游戏体抛出异常 --- !!!!!")
traceback.print_exc()
self.end()
def output(self):
print(self.var)
def load_script(self, scriptpath):
exec(getobj(os.path.join(self.__basedir, scriptpath)), self.var, self.var)
def load_scripts(self, scriptdir):
objs = getobjs(os.path.join(self.__basedir, scriptdir))
objs.sort()
for i in objs:
exec(getobj(i), self.var, self.var)
def pause_game(self):
self.paused = True
def continue_game(self):
self.paused = False
def set_speed(self, speed):
self.speed = speed
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | oiasg_base/lib/game.py | will7101/OIASG |
from progress_passthrough.iterator_wrappers import (
IteratorWrapper,
AttachedLengthIterator,
CallbackIterator,
wrap_source,
wrap_source_len,
)
from unittest.mock import Mock
def test_iterator_wrapper_on_iterable():
it = range(10)
w = IteratorWrapper(it)
assert len(w) == len(range(10))
assert list(w) == list(range(10))
def test_iterator_wrapper_on_iterator():
it = range(10).__iter__()
w = IteratorWrapper(it)
assert list(w) == list(range(10))
def test_attached_length_iterator():
it = (x for x in range(10) if x % 2 == 0)
w = AttachedLengthIterator(it, 3) # give it a wrong length on purpose
assert len(w) == 3
assert list(w) == [0, 2, 4, 6, 8]
def test_callback_iterator_on_iterable():
mock_cb = Mock()
inner_it = range(10)
w = CallbackIterator(inner_it)
w.callbacks.append(mock_cb)
outer_it = (x for x in w if x % 2 == 0)
assert len(w) == 10
assert list(outer_it) == [0, 2, 4, 6, 8]
assert mock_cb.call_count == 10
def test_callback_iterator_on_iterator():
mock_cb = Mock()
inner_it = (x for x in range(10))
w = CallbackIterator(inner_it)
w.callbacks.append(mock_cb)
outer_it = (x for x in w if x % 2 == 0)
assert list(outer_it) == [0, 2, 4, 6, 8]
assert mock_cb.call_count == 10
# test convenience functions
def test_wrap_source():
mock_cb = Mock()
inner_it = range(10)
w = wrap_source(inner_it)
w.source.callbacks.append(mock_cb)
outer_it = w(x for x in w if x % 2 == 0)
assert len(w.source) == 10
assert list(outer_it) == [0, 2, 4, 6, 8]
assert mock_cb.call_count == 10
def test_wrap_source_len():
mock_cb = Mock()
inner_it = (x for x in range(10))
w = wrap_source_len(inner_it, 10)
w.source.callbacks.append(mock_cb)
outer_it = w(x for x in w if x % 2 == 0)
assert len(w.source) == 10
assert list(outer_it) == [0, 2, 4, 6, 8]
assert mock_cb.call_count == 10
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | tests/test_iterator_wrappers.py | smheidrich/progress-passthrough |
"""Warnings raised by the library.
"""
class ProntoWarning(Warning):
"""The class for all warnings raised by `pronto`.
"""
pass
class NotImplementedWarning(ProntoWarning, NotImplementedError):
"""Some part of the code is yet to be implemented.
"""
pass
class UnstableWarning(ProntoWarning):
"""The behaviour of the executed code might change in the future.
"""
pass
class SyntaxWarning(ProntoWarning, SyntaxError):
"""The parsed document contains incomplete or unsound constructs.
"""
def __init__(self, *args, **kwargs):
ProntoWarning.__init__(self, *args, **kwargs)
SyntaxError.__init__(self, *args, **kwargs)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
... | 3 | pronto/utils/warnings.py | jiggylepcha/pronto |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.result_directories_extended import ResultDirectoriesExtended # noqa: E501
from isi_sdk_9_0_0.rest import ApiException
class TestResultDirectoriesExtended(unittest.TestCase):
"""ResultDirectoriesExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResultDirectoriesExtended(self):
"""Test ResultDirectoriesExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_9_0_0.models.result_directories_extended.ResultDirectoriesExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | isi_sdk_9_0_0/test/test_result_directories_extended.py | mohitjain97/isilon_sdk_python |
import os
import traceback
from uuid import uuid4
import cv2
from cvlog import log
from cvlog.config import Config
from cvtest.csv_reporter import CsvReporter
class Reporter:
def __new__(cls):
if not hasattr(cls, 'instance') or not cls.instance:
cls.instance = super().__new__(cls)
cls.instance.__initialised = False
return cls.instance
def __init__(self):
if not self.__initialised:
self.__initialised = True
report_path = Config().log_path() + "/report"
self.image_path = report_path + '/images/'
self.reporter = CsvReporter(report_path + "/report.csv")
def result(self, input_image, key_pressed, output_img):
message = ""
if (key_pressed == ord("y")):
result = "Pass"
else:
result = "Fail"
message = self.__save_image(output_img)
self.reporter.log_report([input_image, result.upper(), message])
def __save_image(self, img):
if not os.path.exists(self.image_path):
os.makedirs(self.image_path)
output_path = self.image_path + str(uuid4()) + '.png'
cv2.imwrite(output_path, img)
return output_path
def error(self, input_image, ex):
self.reporter.log_report([input_image, "ERROR", self.__stack_trace(ex)])
def __stack_trace(self, ex):
stacks = traceback.extract_tb(ex.__traceback__)[1:]
stack_trace = ""
for x in stacks[:10]:
stack_trace += x.filename + ":" + str(x.lineno) + ";"
return stack_trace
def report(input_image_path, processing_method):
for image_path in input_image_path:
try:
img = processing_method(image_path)
key_pressed = log.show_image(image_path, img)
except Exception as e:
Reporter().error(image_path, e)
else:
Reporter().result(image_path, key_pressed, img)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | cvtest/reporter.py | AndreyChertckov/opencv-log |
import numpy as np
from irf.ensemble import wrf
from sklearn.base import BaseEstimator
class IRFClassifier(BaseEstimator):
def __init__(self):
self.model = wrf()
self.predict = self.model.predict
self.predict_proba = self.model.predict_proba
def fit(self, X, y, lambda_reg=0.1, sample_weight=None):
'''fit a linear model with integer coefficient and L1 regularization
Params
------
_sample_weight: np.ndarray (n,)
weight for each individual sample
'''
if 'pandas' in str(type(X)):
X = X.values
if 'pandas' in str(type(y)):
y = y.values
assert type(X) == np.ndarray, 'inputs should be ndarrays'
assert type(y) == np.ndarray, 'inputs should be ndarrays'
self.model.fit(X, y, keep_record=False)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | imodels/tree/iterative_random_forest/iterative_random_forest.py | csinva/interpretability-workshop |
from fasture import specs
import time
def some_func(one, two):
"""This takes time to compute"""
time.sleep(25)
return one + two
@specs.fasture_job_impl
def fasture_job(json_payload: dict) -> dict:
"""Show this as summary in openAPI
and the rest as long description
"""
# some validation
res = some_func(**json_payload)
return res
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | plugin_example/plugin.py | nlaurance/fasture |
class Solution:
def search(self, nums, target: int) -> int:
def find_rotate_index(start, end):
if nums[start] < nums[end]:
return 0
while start <= end:
mid = (start + end) // 2
if nums[mid] > nums[mid + 1]:
return mid + 1
else:
if nums[mid] < nums[start]:
end = mid - 1
else:
start = mid + 1
return 0
def binary_search(start, end):
while start <= end:
mid = int((start + end) / 2)
if nums[mid] == target:
return mid
elif nums[mid] < target:
start = mid + 1
else:
end = mid - 1
return -1
n = len(nums)
if n == 0:
return -1
if n == 1:
return 0 if nums[0] == target else -1
rotate_index = find_rotate_index(0, n - 1)
# print(rotate_index)
if nums[rotate_index] == target:
return rotate_index
left = binary_search(0, rotate_index - 1)
right = binary_search(rotate_index, n - 1)
# print(left, right)
if left != -1:
return left
elif right != -1:
return right
else:
return -1
slu = Solution()
print(slu.search([1,3], 1))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | leetcode/python/medium/p033_search.py | kefirzhang/algorithms |
import pandas as pd
from pandas import DataFrame
from ..transformers import Transformer
from ..internal import ColumnResolver, listify
class TransformerExecutor:
def validate(self, df, columnDefinitions):
for columnDef in columnDefinitions:
for transformer in listify(columnDef['transformers']):
assert(isinstance(transformer, Transformer))
def transform(self, df, transformations):
# convert the transformations to an array (so people can pass
# either an array of definitions or just one)
transformations = listify(transformations)
# validate the call
self.validate(df, transformations)
# loop and execute the transformations
col_resolver = ColumnResolver()
# This df holds each column that undergoes a transformation during this call.
# At the end, we'll append its columns to the original dataframe.
df_transformed = DataFrame()
for transformation in transformations:
# resolve column names (could be Columns.All, a single column name, or an array of them)
transform_column_names = col_resolver.resolve(transformation['columns'], df)
for transform_column_name in transform_column_names:
df_transformed[f"{transform_column_name}_transformed"] = self.__transformColumn(
df[transform_column_name],
listify(transformation['transformers'])
)
# after we're done transforming, append all transformed columns to the original df
return pd.concat([df, df_transformed], axis=1)
def __transformColumn(self, column, transformers):
for transformer in transformers:
column = transformer.transform(column)
return column
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | src/pandashape/internal/TransformerExecutor.py | jammerware/pandashape |
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from gallery.models import Photo
class Post(models.Model):
title = models.CharField(max_length=100, null=False, blank=False)
author = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField(null=False, blank=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_at.editable = True
photos = models.ManyToManyField(Photo, related_name='post')
def __str__(self):
return self.title
def get_absoulte_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | blog/models.py | PsychedelicMonkey/Portfolio-v2 |
import json
def flatten(data):
output_text = []
beg_index = 0
end_index = 0
text = data["text"]
all_labels = sorted(data["labels"])
for ind in range(len(all_labels)):
next_label = all_labels[ind]
output_text += [(label_word, "O") for label_word in text[end_index:next_label[0]].strip().split()]
label = next_label
beg_index = label[0]
end_index = label[1]
label_text = text[beg_index:end_index]
output_text += [(label_word, "B-" + label[2]) if not i else (label_word, "I-" + label[2]) for i, label_word in enumerate(label_text.split(" "))]
output_text += [(label_word, "O") for label_word in text[end_index:].strip().split()]
return output_text
def flatten_all(datas):
return [flatten(data) for data in datas]
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | jsonl_to_conll/convert.py | joeyism/jsonl-to-conll |
import fileinput
f = fileinput.input()
cases = int(next(f))
next(f)
def solve(n, jobs):
result = [i + 1 for i in range(n)]
result.sort(key=lambda i: (jobs[i - 1][0]/jobs[i - 1][1], i))
print(*result)
def solve_case(f):
n = int(next(f))
jobs = [tuple(map(int, next(f).split())) for i in range(n)]
solve(n, jobs)
for i in range(cases):
if i > 0:
print()
next(f)
solve_case(f)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | python/UVA/10026_shoemaker.py | gineer01/programming-challenges |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
__author__ = 'gjp'
from django.test import TestCase
class MyTests(TestCase):
def test_views_fail_without_server_info(self):
""" This test should return a 500 error if there is no information about server created in the
data base
"""
response = self.client.get("/info")
self.assertEqual(response.status_code, 500)
def test_views_fail(self):
response = self.client.post("/fail", data={})
self.assertEqual(response.status_code, 400)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | fiware_cloto/cloto/tests/test_views.py | telefonicaid/fiware-cloto |
import pytest
from azureml_appinsights_logger.console_logger import ConsoleLogger
from azureml_appinsights_logger.logger_interface import Severity
@pytest.fixture
def mock_run(mocker):
mocker.patch.object(ConsoleLogger,
'get_run_id_and_set_context',
return_value="MYRUN")
return mocker.MagicMock()
def test_log_metric_calls_log(mocker, mock_run, capsys):
# arrange:
logger = ConsoleLogger(mock_run)
# act:
logger.log_metric("FOO", "BAZ", "BAR", False)
# assert:
captured = capsys.readouterr()
assert "MYRUN" in captured.out
assert "FOO" in captured.out
assert "BAR" in captured.out
assert "BAZ" in captured.out
def test_logs_nothing_when_severity_lower(mocker, mock_run, capsys):
# arrange:
logger = ConsoleLogger(mock_run)
logger.level = Severity.WARNING
logger.custom_dimensions = 'BAR'
# act:
logger.log("FOO", Severity.INFO)
# assert:
captured = capsys.readouterr()
assert captured.out == ""
def test_logs_when_severity_higher(mocker, mock_run, capsys):
# arrange:
logger = ConsoleLogger(mock_run)
logger.level = Severity.WARNING
# act:
logger.log("FOO", Severity.WARNING)
# assert:
captured = capsys.readouterr()
assert "FOO" in captured.out
def test_logs_with_default_severity(mocker, mock_run, capsys):
# arrange:
mocker.patch.object(ConsoleLogger.log, "__defaults__", (Severity.WARNING,))
logger = ConsoleLogger(mock_run)
logger.level = Severity.INFO
spy = mocker.spy(logger, 'log')
# act:
logger.log("FOO")
# assert:
spy.assert_called_once_with("FOO")
captured = capsys.readouterr()
assert "FOO" in captured.out
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | common/azureml_appinsights_logger/tests/test_console_logger.py | h2floh/MLOpsManufacturing-1 |
def extract_history(input):
"""extract the past metrics"""
prev = input['prev']
prev['prev'] = True
return input['_id'], prev
def extract_update(input):
"""extract the update data, from which we compute new metrics"""
update = input['update']
update['_id'] = input['_id']
return update
def rolling_score(d1, d2, alpha=0.5):
"""computes rolling scores, decaying the past by alpha.
the past metrics are identified by the `prev` key.
any keys present in the update dict that are not in the past
dict are carried over."""
# figure out which dict is the previous metrics
if 'prev' in d1 and d1['prev']:
prev, update = d1, d2
else:
prev, update = d2, d1
del prev['prev']
new = {}
for k, v in prev.items():
if k in update:
new[k] = v + (alpha * (update[k] - v))
else:
new[k] = v
for k in set(update.keys()) - set(new.keys()):
new[k] = update[k]
return new
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": tru... | 3 | coral/metrics/rolling.py | coralproject/atoll |
import pytest
from .solution import solve
INPUT = """5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
"""
def test_solve():
assert solve(INPUT) == 1656
def test_solve2():
assert solve(INPUT, 999999999999) == 195
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?... | 3 | 2021/day_11/test_solution.py | krother/advent_of_code |
class Fragment:
def __init__(self):
pass
def populate(self, data):
return None
def name(self):
return "noname" | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | src/template/fragment.py | filipfur/smoothsource |
from djangular import middleware
from django.test import SimpleTestCase
from django.http import HttpRequest, HttpResponse
class AngularJsonVulnerabilityMiddlewareTest(SimpleTestCase):
def test_that_middleware_does_nothing_to_html_requests(self):
resp = HttpResponse(content_type='text/html', content='<html></html>')
mware = middleware.AngularJsonVulnerabilityMiddleware()
mware.process_response(HttpRequest(), resp)
self.assertEqual(resp.content, '<html></html>')
def test_that_middleware_does_nothing_to_js_requests(self):
resp = HttpResponse(content_type='text/javascript', content='var blah = [];')
mware = middleware.AngularJsonVulnerabilityMiddleware()
mware.process_response(HttpRequest(), resp)
self.assertEqual(resp.content, 'var blah = [];')
def test_that_middleware_does_nothing_to_invalid_json_requests(self):
resp = HttpResponse(content_type='application/json', content='[1, 2, 3]', status=400)
mware = middleware.AngularJsonVulnerabilityMiddleware()
mware.process_response(HttpRequest(), resp)
self.assertEqual(resp.content, '[1, 2, 3]')
def test_that_middleware_adds_prefix_to_valid_json_requests(self):
resp = HttpResponse(content_type='application/json', content='[1, 2, 3]')
mware = middleware.AngularJsonVulnerabilityMiddleware()
mware.process_response(HttpRequest(), resp)
self.assertEqual(resp.content, mware.CONTENT_PREFIX + '[1, 2, 3]')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | djangular/tests/test_middleware.py | mohamedmehdigara/djangular |
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
# def was_published_recently(self):
# now = timezone.now()
# return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | polls/models.py | ibowditch/polls |
def strip(value):
if not value:
return value
return value.strip()
def logical_xor(a, b):
return bool(a) ^ bool(b)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | neurovault/utils.py | aphroditepv/NeuroVault |
# © 2019 Nokia
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
from radish_ext.radish.template_with_nested_data_replacer import TemplateForNestedDict
from radish_ext.sdk.l import Logging
class TestDataBase(object):
def __init__(self, cfg) -> None:
super().__init__()
self.data = {'cfg': cfg,
'generate': {}
}
self.log = Logging.get_object_logger(self)
def replace_test_data(self, template, **kwargs):
data = TemplateForNestedDict(template).safe_substitute(**self.data, **kwargs)
return data | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | pkg_radish_ext/radish_ext/radish/base_test_data.py | bbielicki/radish-bdd-extensions |
import struct
import unittest
from typing import List
import pyparcel
DATA: List[int] = [
-1 << 31,
-1000,
-57,
-26,
-20,
-5,
-2,
-1,
0,
1,
2,
5,
20,
57,
1000,
(1 << 31) - 1,
]
class MyTestCase(unittest.TestCase):
def test_pack(self):
for i in DATA:
self.assertEqual(pyparcel.pack(i), struct.pack("i", i))
def test_pack_unpack(self):
for i in DATA:
self.assertEqual(i, pyparcel.unpack(pyparcel.pack(i), int()))
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | tests/test_int.py | stephend017/pyparcel |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from keystone.common.sql import migration_helpers
def list_constraints(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
assignment_table = sqlalchemy.Table('assignment', meta, autoload=True)
role_table = sqlalchemy.Table('role', meta, autoload=True)
constraints = [{'table': assignment_table,
'fk_column': 'role_id',
'ref_column': role_table.c.id}]
return constraints
def upgrade(migrate_engine):
# SQLite does not support constraints, and querying the constraints
# raises an exception
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py | yanheven/keystone |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# See the full API here:
# https://poloniex.com/support/api/
__author__ = 'maxim'
from six import string_types
import pandas as pd
from util import *
AVAILABLE_PERIODS = [300, 900, 1800, 7200, 14400, 86400]
def get_chart_data(pair, start_time, end_time, period):
url = 'https://poloniex.com/public?command=returnChartData¤cyPair=%s&start=%d&end=%d&period=%d' % \
(pair, start_time, end_time, period_to_seconds(period))
info('Fetching %s: %s' % (pair, url))
df = pd.read_json(url, convert_dates=False)
info('Fetched %s (%s)' % (pair, period_to_human(period)))
return df
def get_24h_volume():
url = 'https://poloniex.com/public?command=return24hVolume'
info('Fetching %s' % url)
return pd.read_json(url)
def period_to_human(period):
if isinstance(period, string_types):
return period
if period == 300:
return '5m'
if period == 900:
return '15m'
if period == 1800:
return '30m'
if period == 7200:
return '2h'
if period == 14400:
return '4h'
if period == 86400:
return 'day'
return str(period)
def period_to_seconds(period):
if isinstance(period, int):
return period
if period == '5m':
return 300
if period == '15m':
return 900
if period == '30m':
return 1800
if period == '2h':
return 7200
if period == '4h':
return 14400
if period == 'day':
return 86400
return int(period)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | poloniex/api.py | maxim5/time_series_machine_learning |
import unittest
import tfexpt
import expt
from tensorlog import matrixdb
from tensorlog import program
from tensorlog import dataset
class TestNative(unittest.TestCase):
def setUp(self):
(self.n,self.maxD,self.epochs) = (16,8,20)
(self.factFile,trainFile,testFile) = expt.genInputs(self.n)
# (self.factFile,self.trainFile,self.testFile) = ('inputs/g16.cfacts','inputs/g16-train.exam','inputs/g16-test.exam')
self.db = matrixdb.MatrixDB.loadFile(self.factFile)
self.prog = program.Program.loadRules("grid.ppr",self.db)
self.trainData = dataset.Dataset.loadExamples(self.prog.db,trainFile)
self.testData = dataset.Dataset.loadExamples(self.prog.db,testFile)
def testIt(self):
acc,loss = expt.accExpt(self.prog,self.trainData,self.testData,self.n,self.maxD,self.epochs)
print('acc',acc)
self.assertTrue(acc >= 0.85)
times = expt.timingExpt(self.prog)
for t in times:
print('time',t)
self.assertTrue(t < 0.05)
class TestAccTF(unittest.TestCase):
def setUp(self):
(self.n,self.maxD,self.epochs) = (16,8,20)
(self.factFile,self.trainFile,self.testFile) = expt.genInputs(self.n)
(self.tlog,self.trainData,self.testData) = tfexpt.setup_tlog(self.maxD,self.factFile,self.trainFile,self.testFile)
def testIt(self):
acc = tfexpt.trainAndTest(self.tlog,self.trainData,self.testData,self.epochs)
print('acc',acc)
self.assertTrue(acc >= 0.85)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
... | 3 | datasets/grid/testexpt.py | saraswat/TensorLog |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
import pytest
import spack.main
import spack.binary_distribution
buildcache = spack.main.SpackCommand('buildcache')
@pytest.fixture()
def mock_get_specs(database, monkeypatch):
specs = database.query_local()
monkeypatch.setattr(
spack.binary_distribution, 'get_specs', lambda x: specs
)
@pytest.mark.skipif(
platform.system().lower() != 'linux',
reason='implementation for MacOS still missing'
)
@pytest.mark.db
def test_buildcache_preview_just_runs(database):
buildcache('preview', 'mpileaks')
@pytest.mark.skipif(
platform.system().lower() != 'linux',
reason='implementation for MacOS still missing'
)
@pytest.mark.db
@pytest.mark.regression('13757')
def test_buildcache_list_duplicates(mock_get_specs, capsys):
with capsys.disabled():
output = buildcache('list', 'mpileaks', '@2.3')
assert output.count('mpileaks') == 3
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | lib/spack/spack/test/cmd/buildcache.py | ttroy50/spack |
# -*- coding: utf-8 -*-
from SMLite import SMLite
from ItemStruct._SMLite_ConfigState import _SMLite_ConfigState
class SMLiteBuilder (object):
def __init__ (self):
self.__states = {}
self.__builded = False
def Configure (self, state):
if self.__builded:
raise Exception ("shouldn't configure builder after builded.")
if state in self.__states:
raise Exception ("state is already exists.")
_state = _SMLite_ConfigState (state)
self.__states [state] = _state
return _state
def Build (self, init_state):
self.__builded = True
return SMLite (init_state, self.__states)
if __name__ == '__main__':
print (SMLite.__doc__)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | src_python/PySMLite/SMLiteBuilder.py | fawdlstty/smlite |
# -*- coding: utf-8 -*-
"""
sphinx.writers.websupport
~~~~~~~~~~~~~~~~~~~~~~~~~
sphinx.websupport writer that adds comment-related annotations.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx.writers.html import HTMLTranslator
from sphinx.util.websupport import is_commentable
class WebSupportTranslator(HTMLTranslator):
"""
Our custom HTML translator.
"""
def __init__(self, builder, *args, **kwargs):
HTMLTranslator.__init__(self, builder, *args, **kwargs)
self.comment_class = 'sphinx-has-comment'
def dispatch_visit(self, node):
if is_commentable(node):
self.handle_visit_commentable(node)
HTMLTranslator.dispatch_visit(self, node)
def handle_visit_commentable(self, node):
# We will place the node in the HTML id attribute. If the node
# already has an id (for indexing purposes) put an empty
# span with the existing id directly before this node's HTML.
self.add_db_node(node)
if node.attributes['ids']:
self.body.append('<span id="%s"></span>'
% node.attributes['ids'][0])
node.attributes['ids'] = ['s%s' % node.uid]
node.attributes['classes'].append(self.comment_class)
def add_db_node(self, node):
storage = self.builder.storage
if not storage.has_node(node.uid):
storage.add_node(id=node.uid,
document=self.builder.cur_docname,
source=node.rawsource or node.astext())
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | docs/target/sphinx/sphinx/writers/websupport.py | SergeyParamoshkin/sqoop2 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.tpu_exporters import utils
class UtilsTest(tf.test.TestCase):
def testBfloat16ToFloat32(self):
bfloat16_tensor = tf.random.uniform([2, 3], dtype=tf.bfloat16)
float32_tensor = utils.bfloat16_to_float32(bfloat16_tensor)
self.assertEqual(float32_tensor.dtype, tf.float32)
def testOtherDtypesNotConverted(self):
int32_tensor = tf.ones([2, 3], dtype=tf.int32)
converted_tensor = utils.bfloat16_to_float32(int32_tensor)
self.assertEqual(converted_tensor.dtype, tf.int32)
def testBfloat16ToFloat32Nested(self):
tensor_dict = {
'key1': tf.random.uniform([2, 3], dtype=tf.bfloat16),
'key2': [
tf.random.uniform([1, 2], dtype=tf.bfloat16) for _ in range(3)
],
'key3': tf.ones([2, 3], dtype=tf.int32),
}
tensor_dict = utils.bfloat16_to_float32_nested(tensor_dict)
self.assertEqual(tensor_dict['key1'].dtype, tf.float32)
for t in tensor_dict['key2']:
self.assertEqual(t.dtype, tf.float32)
self.assertEqual(tensor_dict['key3'].dtype, tf.int32)
if __name__ == '__main__':
tf.test.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | research/object_detection/tpu_exporters/utils_test.py | duncanriach-nvidia/tensorflow-models |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.