content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#Isaiah Lawlor - Lab Two
# Strings
# assigned outputs to 'speak' when input is inserted by user
name = "Isaiah Lawlor"
# Integers
# This is a numeric value that is assigned as the particular output for a command inserted by the user
age = 21
# Floats
# another numeric value that has is represented with a decimal place.
weight = 168.7
# Boolean
# This is a built in command that can inform the user of the truth or falsity of a line of code.
# Addition
print(55+22)
#Subtraction
print(76-24)
#Division
print(99/11)
#Multiplication
print(19*14)
#Modulo
print(142%19)
#Equation Demonstration
print(84/12*8+1)
#Change Excercise
number_into_string = str(37)
print(number_into_string)
string_into_number = int(37)
print(string_into_number)
string_into_float = float(37)
print(string_into_float)
number_into_boolean = bool(37)
print(number_into_boolean)
#List Excercise
New_England = ["Vermont","New Hampshire","Maine","Massachusetts","Rhode Island","Connecticut"]
| [
2,
39443,
9520,
3854,
4685,
532,
3498,
4930,
198,
2,
4285,
654,
198,
2,
8686,
23862,
284,
705,
47350,
6,
618,
5128,
318,
18846,
416,
2836,
198,
3672,
796,
366,
39443,
9520,
3854,
4685,
1,
198,
198,
2,
15995,
364,
198,
2,
770,
318,... | 2.996923 | 325 |
import os
import sys
import json
import pathlib
import argparse
import getpass
from services import registry
config_template = {
"DAEMON_LISTENING_PORT": None,
"ETHEREUM_JSON_RPC_ENDPOINT": "https://kovan.infura.io/",
"AGENT_CONTRACT_ADDRESS": None,
"PASSTHROUGH_ENDPOINT": None,
"PASSTHROUGH_ENABLED": True,
"BLOCKCHAIN_ENABLED": True,
"LOG_LEVEL": 10,
"PRIVATE_KEY": None,
}
agent_contracts = {
"kovan": {
"face_detect_server": "0x4cBe33Aa28eBBbFAa7d98Fa1c65af2FEf6885EF2",
"face_landmarks_server": "0x88DeC961e30F973b6DeDbae35754a3c557380BEE",
"face_alignment_server": "0xCB58410EE3B8E99ABd9774aB98951680E637b5F3",
"face_recognition_server": "0x8f3c5F4B522803DA8B07a257b6a558f61100452C",
}
}
if __name__ == "__main__":
main() | [
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
33918,
198,
11748,
3108,
8019,
198,
11748,
1822,
29572,
198,
11748,
651,
6603,
628,
198,
6738,
2594,
1330,
20478,
198,
198,
11250,
62,
28243,
796,
1391,
198,
220,
220,
220,
366,
5631,
36... | 2.012407 | 403 |
from flask import Blueprint
from flask_login import login_required
from flask_restplus import Api
resources_api = Blueprint('api.resources', __name__)
@resources_api.before_request # login nécessaire pour tout le blueprint
@login_required
api = Api(resources_api)
from . import routes
| [
6738,
42903,
1330,
39932,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
35827,
198,
6738,
42903,
62,
2118,
9541,
1330,
5949,
72,
198,
198,
37540,
62,
15042,
796,
39932,
10786,
15042,
13,
37540,
3256,
11593,
3672,
834,
8,
198,
198,
31,
... | 3.406977 | 86 |
import logging
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modeling_utils import ProteinConfig
from .modeling_utils import ProteinModel
from .modeling_utils import ValuePredictionHead
from .modeling_utils import SequenceClassificationHead
from .modeling_utils import SequenceToSequenceClassificationHead
from .modeling_utils import PairwiseContactPredictionHead
from ..registry import registry
logger = logging.getLogger(__name__)
@registry.register_task_model('fluorescence', 'onehot')
@registry.register_task_model('stability', 'onehot')
@registry.register_task_model('melting_point_regression', 'onehot')
@registry.register_task_model('remote_homology', 'onehot')
@registry.register_task_model('melting_point_classification', 'onehot')
@registry.register_task_model('secondary_structure', 'onehot')
@registry.register_task_model('contact_prediction', 'onehot')
| [
11748,
18931,
198,
11748,
19720,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
764,
4666,
10809,
62,
26791,
1330,
31702,
16934,
198,
6738,
764,
4666,
10... | 3.407407 | 270 |
from .Item import *
for id in range(0, 16):
handler.register(Concret)
handler.register(ConcretPowder)
| [
6738,
764,
7449,
1330,
1635,
198,
198,
1640,
4686,
287,
2837,
7,
15,
11,
1467,
2599,
628,
220,
220,
220,
21360,
13,
30238,
7,
3103,
66,
1186,
8,
628,
220,
220,
220,
21360,
13,
30238,
7,
3103,
66,
1186,
47,
34656,
8,
198
] | 2.627907 | 43 |
# lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
_tabversion = '3.10'
_lextokens = set(('ABBREV_AXIS_AT', 'ABBREV_PATH_SEP', 'ABBREV_STEP_PARENT', 'ABBREV_STEP_SELF', 'AND_OP', 'AXISNAME', 'AXIS_SEP', 'CLOSE_BRACKET', 'CLOSE_PAREN', 'COLON', 'COMMA', 'DIV_OP', 'DOLLAR', 'EQUAL_OP', 'FLOAT', 'FUNCNAME', 'INTEGER', 'INTERSECT_OP', 'LITERAL', 'MINUS_OP', 'MOD_OP', 'MULT_OP', 'NCNAME', 'NODETYPE', 'OPEN_BRACKET', 'OPEN_PAREN', 'OR_OP', 'PATH_SEP', 'PIPELINE_OP', 'PLUS_OP', 'REL_OP', 'STAR_OP', 'UNION_OP'))
_lexreflags = 32
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_LITERAL>"[^"]*"|\'[^\']*\')|(?P<t_FLOAT>\\d+\\.\\d*|\\.\\d+)|(?P<t_INTEGER>\\d+)|(?P<t_NCNAME>(([A-Z]|_|[a-z]|\\xc0-\\xd6]|[\\xd8-\\xf6]|[\\xf8-\\u02ff]|[\\u0370-\\u037d]|[\\u037f-\\u1fff]|[\\u200c-\\u200d]|[\\u2070-\\u218f]|[\\u2c00-\\u2fef]|[\\u3001-\\uD7FF]|[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]|[\\U00010000-\\U000EFFFF]))(([A-Z]|_|[a-z]|\\xc0-\\xd6]|[\\xd8-\\xf6]|[\\xf8-\\u02ff]|[\\u0370-\\u037d]|[\\u037f-\\u1fff]|[\\u200c-\\u200d]|[\\u2070-\\u218f]|[\\u2c00-\\u2fef]|[\\u3001-\\uD7FF]|[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]|[\\U00010000-\\U000EFFFF])|[-.0-9\\xb7\\u0300-\\u036f\\u203f-\\u2040])*)|(?P<t_REL_OP>[<>]=?)|(?P<t_ABBREV_STEP_PARENT>\\.\\.)|(?P<t_EQUAL_OP>!?=)|(?P<t_ABBREV_PATH_SEP>//)|(?P<t_ABBREV_STEP_SELF>\\.)|(?P<t_AXIS_SEP>::)|(?P<t_CLOSE_BRACKET>\\])|(?P<t_CLOSE_PAREN>\\))|(?P<t_DOLLAR>\\$)|(?P<t_OPEN_BRACKET>\\[)|(?P<t_OPEN_PAREN>\\()|(?P<t_PIPELINE_OP>::)|(?P<t_PLUS_OP>\\+)|(?P<t_STAR_OP>\\*)|(?P<t_UNION_OP>\\|)|(?P<t_ABBREV_AXIS_AT>@)|(?P<t_COLON>:)|(?P<t_COMMA>,)|(?P<t_MINUS_OP>-)|(?P<t_PATH_SEP>/)', [None, ('t_LITERAL', 'LITERAL'), ('t_FLOAT', 'FLOAT'), ('t_INTEGER', 'INTEGER'), (None, 'NCNAME'), None, None, None, None, (None, 'REL_OP'), (None, 'ABBREV_STEP_PARENT'), (None, 'EQUAL_OP'), (None, 'ABBREV_PATH_SEP'), (None, 'ABBREV_STEP_SELF'), (None, 'AXIS_SEP'), (None, 'CLOSE_BRACKET'), (None, 'CLOSE_PAREN'), (None, 'DOLLAR'), (None, 'OPEN_BRACKET'), (None, 'OPEN_PAREN'), (None, 'PIPELINE_OP'), (None, 'PLUS_OP'), (None, 'STAR_OP'), (None, 'UNION_OP'), (None, 'ABBREV_AXIS_AT'), (None, 'COLON'), (None, 'COMMA'), (None, 'MINUS_OP'), (None, 'PATH_SEP')])]}
_lexstateignore = {'INITIAL': ' \t\r\n'}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
| [
2,
443,
742,
397,
13,
9078,
13,
770,
2393,
6338,
2727,
416,
9297,
56,
357,
9641,
513,
13,
1157,
737,
2094,
470,
4370,
0,
198,
62,
8658,
9641,
220,
220,
796,
705,
18,
13,
940,
6,
198,
62,
293,
742,
482,
641,
220,
220,
220,
796,... | 1.780635 | 1,322 |
import logging
#from ..shared_code import database
import azure.functions as func
import csv
import os
| [
11748,
18931,
201,
198,
2,
6738,
11485,
28710,
62,
8189,
1330,
6831,
201,
198,
11748,
35560,
495,
13,
12543,
2733,
355,
25439,
201,
198,
11748,
269,
21370,
201,
198,
11748,
28686,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.829268 | 41 |
# color_setup.py Customise for your hardware config
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2020 Peter Hinch
# As written, supports:
# Adafruit 1.5" 128*128 OLED display: https://www.adafruit.com/product/1431
# Adafruit 1.27" 128*96 display https://www.adafruit.com/product/1673
# Edit the driver import for other displays.
# Demo of initialisation procedure designed to minimise risk of memory fail
# when instantiating the frame buffer. The aim is to do this as early as
# possible before importing other modules.
# WIRING (Adafruit pin nos and names).
# Pyb SSD
# 3v3 Vin (10)
# Gnd Gnd (11)
# Y1 DC (3 DC)
# Y2 CS (5 OC OLEDCS)
# Y3 Rst (4 R RESET)
# Y6 CLK (2 CL SCK)
# Y8 DATA (1 SI MOSI)
import machine
import gc
# *** Choose your color display driver here ***
# Driver supporting non-STM platforms
# from drivers.ssd1351.ssd1351_generic import SSD1351 as SSD
# STM specific driver
from drivers.ssd1351.ssd1351 import SSD1351 as SSD
height = 96 # 1.27 inch 96*128 (rows*cols) display
# height = 128 # 1.5 inch 128*128 display
pdc = machine.Pin('Y1', machine.Pin.OUT_PP, value=0)
pcs = machine.Pin('Y2', machine.Pin.OUT_PP, value=1)
prst = machine.Pin('Y3', machine.Pin.OUT_PP, value=1)
spi = machine.SPI(2)
gc.collect() # Precaution before instantiating framebuf
ssd = SSD(spi, pcs, pdc, prst, height) # Create a display instance
| [
2,
3124,
62,
40406,
13,
9078,
8562,
786,
329,
534,
6890,
4566,
198,
198,
2,
28728,
739,
262,
17168,
13789,
357,
36393,
737,
4091,
38559,
24290,
13,
198,
2,
15069,
357,
66,
8,
12131,
5613,
367,
8589,
198,
198,
2,
1081,
3194,
11,
69... | 2.7833 | 503 |
'''
Created on Aug 5, 2020
@author: Jeff
'''
import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
import datetime
import pyperclip
#Fixes scaling issues on high res monitors
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
class SongDownloadView(object):
'''
classdocs
'''
| [
7061,
6,
198,
41972,
319,
2447,
642,
11,
12131,
198,
198,
31,
9800,
25,
5502,
198,
7061,
6,
198,
11748,
9485,
48,
83,
20,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
54,
312,
11407,
198,
11748,... | 2.306667 | 225 |
from captcha.models import CaptchaStore
from captcha.helpers import captcha_image_url
from django.http import JsonResponse
from django.shortcuts import redirect
from django.utils import timezone
from django.views.generic import TemplateView, View
from .forms import CommentForm
| [
6738,
48972,
13,
27530,
1330,
6790,
11693,
22658,
198,
6738,
48972,
13,
16794,
364,
1330,
48972,
62,
9060,
62,
6371,
198,
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
198,
... | 3.957746 | 71 |
from .rest import RestClient
class Organizations(object):
"""Auth0 organizations endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
"""
# Organizations
def all_organizations(self, page=None, per_page=None):
"""Retrieves a list of all the organizations.
Args:
page (int): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_organizations
"""
params = {}
params['page'] = page
params['per_page'] = per_page
return self.client.get(self._url(), params=params)
def get_organization_by_name(self, name=None):
"""Retrieves an organization given its name.
Args:
name (str): The name of the organization to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_name_by_name
"""
params = {}
return self.client.get(self._url('name', name), params=params)
def get_organization(self, id):
"""Retrieves an organization by its ID.
Args:
id (str): Id of organization to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_organizations_by_id
"""
params = {}
return self.client.get(self._url(id), params=params)
def create_organization(self, body):
"""Create a new organization.
Args:
body (dict): Attributes for the new organization.
See: https://auth0.com/docs/api/management/v2#!/Organizations/post_organizations
"""
return self.client.post(self._url(), data=body)
def update_organization(self, id, body):
"""Modifies an organization.
Args:
id (str): the ID of the organization.
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Organizations/patch_organizations_by_id
"""
return self.client.patch(self._url(id), data=body)
def delete_organization(self, id):
"""Deletes an organization and all its related assets.
Args:
id (str): Id of organization to delete.
See: https://auth0.com/docs/api/management/v2#!/Organizations/delete_organizations_by_id
"""
return self.client.delete(self._url(id))
# Organization Connections
def all_organization_connections(self, id, page=None, per_page=None):
"""Retrieves a list of all the organization connections.
Args:
id (str): the ID of the organization.
page (int): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_enabled_connections
"""
params = {}
params['page'] = page
params['per_page'] = per_page
return self.client.get(self._url(id, 'enabled_connections'), params=params)
def get_organization_connection(self, id, connection_id):
"""Retrieves an organization connection by its ID.
Args:
id (str): the ID of the organization.
connection_id (str): the ID of the connection.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_enabled_connections_by_connectionId
"""
params = {}
return self.client.get(self._url(id, 'enabled_connections', connection_id), params=params)
def create_organization_connection(self, id, body):
"""Adds a connection to an organization.
Args:
id (str): the ID of the organization.
body (dict): Attributes for the connection to add.
See: https://auth0.com/docs/api/management/v2#!/Organizations/post_enabled_connections
"""
return self.client.post(self._url(id, 'enabled_connections'), data=body)
def update_organization_connection(self, id, connection_id, body):
"""Modifies an organization.
Args:
id (str): the ID of the organization.
connection_id (str): the ID of the connection to update.
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Organizations/patch_enabled_connections_by_connectionId
"""
return self.client.patch(self._url(id, 'enabled_connections', connection_id), data=body)
def delete_organization_connection(self, id, connection_id):
"""Deletes a connection from the given organization.
Args:
id (str): Id of organization.
connection_id (str): the ID of the connection to delete.
See: https://auth0.com/docs/api/management/v2#!/Organizations/delete_enabled_connections_by_connectionId
"""
return self.client.delete(self._url(id, 'enabled_connections', connection_id))
# Organization Members
def all_organization_members(self, id, page=None, per_page=None):
"""Retrieves a list of all the organization members.
Args:
id (str): the ID of the organization.
page (int): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_members
"""
params = {}
params['page'] = page
params['per_page'] = per_page
return self.client.get(self._url(id, 'members'), params=params)
def create_organization_members(self, id, body):
"""Adds members to an organization.
Args:
id (str): the ID of the organization.
body (dict): Attributes from the members to add.
See: https://auth0.com/docs/api/management/v2#!/Organizations/post_members
"""
return self.client.post(self._url(id, 'members'), data=body)
def delete_organization_members(self, id, body):
"""Deletes members from the given organization.
Args:
id (str): Id of organization.
body (dict): Attributes from the members to delete
See: https://auth0.com/docs/api/management/v2#!/Organizations/delete_members
"""
return self.client.delete(self._url(id, 'members'), data=body)
# Organization Member Roles
def all_organization_member_roles(self, id, user_id, page=None, per_page=None):
"""Retrieves a list of all the roles from the given organization member.
Args:
id (str): the ID of the organization.
user_id (str): the ID of the user member of the organization.
page (int): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_organization_member_roles
"""
params = {}
params['page'] = page
params['per_page'] = per_page
return self.client.get(self._url(id, 'members', user_id, 'roles'), params=params)
def create_organization_member_roles(self, id, user_id, body):
"""Adds roles to a member of an organization.
Args:
id (str): the ID of the organization.
user_id (str): the ID of the user member of the organization.
body (dict): Attributes from the members to add.
See: https://auth0.com/docs/api/management/v2#!/Organizations/post_organization_member_roles
"""
return self.client.post(self._url(id, 'members', user_id, 'roles'), data=body)
def delete_organization_member_roles(self, id, user_id, body):
"""Deletes roles from a member of an organization.
Args:
id (str): Id of organization.
user_id (str): the ID of the user member of the organization.
body (dict): Attributes from the members to delete
See: https://auth0.com/docs/api/management/v2#!/Organizations/delete_organization_member_roles
"""
return self.client.delete(self._url(id, 'members', user_id, 'roles'), data=body)
# Organization Invitations
def all_organization_invitations(self, id, page=None, per_page=None):
"""Retrieves a list of all the organization invitations.
Args:
id (str): the ID of the organization.
page (int): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_invitations
"""
params = {}
params['page'] = page
params['per_page'] = per_page
return self.client.get(self._url(id, 'invitations'), params=params)
def get_organization_invitation(self, id, invitaton_id):
"""Retrieves an organization invitation by its ID.
Args:
id (str): the ID of the organization.
invitaton_id (str): the ID of the invitation.
See: https://auth0.com/docs/api/management/v2#!/Organizations/get_invitations_by_invitation_id
"""
params = {}
return self.client.get(self._url(id, 'invitations', invitaton_id), params=params)
def create_organization_invitation(self, id, body):
"""Create an invitation to an organization.
Args:
id (str): the ID of the organization.
body (dict): Attributes for the invitation to create.
See: https://auth0.com/docs/api/management/v2#!/Organizations/post_invitations
"""
return self.client.post(self._url(id, 'invitations'), data=body)
def delete_organization_invitation(self, id, invitation_id):
"""Deletes an invitation from the given organization.
Args:
id (str): Id of organization.
invitation_id (str): the ID of the invitation to delete.
See: https://auth0.com/docs/api/management/v2#!/Organizations/delete_invitations_by_invitation_id
"""
return self.client.delete(self._url(id, 'invitations', invitation_id))
| [
6738,
764,
2118,
1330,
8324,
11792,
628,
198,
4871,
41846,
7,
15252,
2599,
198,
220,
220,
220,
37227,
30515,
15,
5745,
886,
13033,
628,
220,
220,
220,
943,
14542,
25,
198,
220,
220,
220,
220,
220,
220,
220,
7386,
357,
2536,
2599,
34... | 2.506702 | 4,476 |
#!/usr/bin/env python
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
"""
This file starts a ROS node to run DOPE,
listening to an image topic and publishing poses.
"""
from __future__ import print_function
import cv2
import message_filters
import numpy as np
import resource_retriever
import rospy
import tf.transformations
from PIL import Image
from PIL import ImageDraw
from cv_bridge import CvBridge
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import PointCloud2, CameraInfo, Image as ImageSensor_msg
from std_msgs.msg import String
import os
import json
from multiprocessing import Pool
import pcl
from plyfile import PlyData, PlyElement
from utils import *
import argparse
import pprint
import yaml
# def image_info_cloud_callback(image_msg, camera_info, cloud_in):
# print("Received image_info_cloud_callback in sync message")
#
# def image_cloud_callback(image_msg, cloud_in):
# print("Received image_cloud_callback in sync message")
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
2864,
15127,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
770,
670,
318,
11971,
739,
257,
17404,
13815,
45336,
12,
15419,
48401,
12,
11649,
32,
2339,
604,
13,... | 3.296196 | 368 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""class that handles all header functions for a header in a po file"""
from translate.misc import dictutils
from translate import __version__
import re
import time
author_re = re.compile(r".*<\S+@\S+>.*\d{4,4}")
def parseheaderstring(input):
"""Parses an input string with the definition of a PO header and returns
the interpreted values as a dictionary."""
headervalues = dictutils.ordereddict()
for line in input.split("\n"):
if not line or ":" not in line:
continue
key, value = line.split(":", 1)
#We don't want unicode keys
key = str(key.strip())
headervalues[key] = value.strip()
return headervalues
def tzstring():
"""Returns the timezone as a string in the format [+-]0000, eg +0200.
@rtype: str"""
if time.daylight:
tzoffset = time.altzone
else:
tzoffset = time.timezone
hours, minutes = time.gmtime(abs(tzoffset))[3:5]
if tzoffset > 0:
hours *= -1
tz = str("%+d" % hours).zfill(3) + str(minutes).zfill(2)
return tz
def update(existing, add=False, **kwargs):
"""Update an existing header dictionary with the values in kwargs, adding new values
only if add is true.
@return: Updated dictionary of header entries
@rtype: dict
"""
headerargs = dictutils.ordereddict()
fixedargs = dictutils.cidict()
for key, value in kwargs.items():
key = key.replace("_", "-")
if key.islower():
key = key.title()
fixedargs[key] = value
removed = []
for key in poheader.header_order:
if existing.has_key(key):
if key in fixedargs:
headerargs[key] = fixedargs.pop(key)
else:
headerargs[key] = existing[key]
removed.append(key)
elif add and fixedargs.has_key(key):
headerargs[key] = fixedargs.pop(key)
for key, value in existing.iteritems():
if not key in removed:
headerargs[key] = value
if add:
for key in fixedargs:
headerargs[key] = fixedargs[key]
return headerargs
class poheader(object):
"""This class implements functionality for manipulation of po file headers.
This class is a mix-in class and useless on its own. It must be used from all
classes which represent a po file"""
x_generator = "Translate Toolkit %s" % __version__.sver
header_order = [
"Project-Id-Version",
"Report-Msgid-Bugs-To",
"POT-Creation-Date",
"PO-Revision-Date",
"Last-Translator",
"Language-Team",
"Language",
"MIME-Version",
"Content-Type",
"Content-Transfer-Encoding",
"Plural-Forms",
"X-Generator",
]
def init_headers(self, charset='utf-8', encoding='8bit', **kwargs):
"""sets default values for po headers"""
#FIXME: we need to allow at least setting target language, pluralforms and generator
headerdict = self.makeheaderdict(charset=charset, encoding=encoding, **kwargs)
self.updateheader(add=True, **headerdict)
return self.header()
def makeheaderdict(self,
charset="CHARSET",
encoding="ENCODING",
project_id_version=None,
pot_creation_date=None,
po_revision_date=None,
last_translator=None,
language_team=None,
mime_version=None,
plural_forms=None,
report_msgid_bugs_to=None,
**kwargs):
"""Create a header dictionary with useful defaults.
pot_creation_date can be None (current date) or a value (datetime or string)
po_revision_date can be None (form), False (=pot_creation_date), True (=now),
or a value (datetime or string)
@return: Dictionary with the header items
@rtype: dict
"""
if project_id_version is None:
project_id_version = "PACKAGE VERSION"
if pot_creation_date is None or pot_creation_date == True:
pot_creation_date = time.strftime("%Y-%m-%d %H:%M") + tzstring()
if isinstance(pot_creation_date, time.struct_time):
pot_creation_date = time.strftime("%Y-%m-%d %H:%M", pot_creation_date) + tzstring()
if po_revision_date is None:
po_revision_date = "YEAR-MO-DA HO:MI+ZONE"
elif po_revision_date == False:
po_revision_date = pot_creation_date
elif po_revision_date == True:
po_revision_date = time.strftime("%Y-%m-%d %H:%M") + tzstring()
if isinstance(po_revision_date, time.struct_time):
po_revision_date = time.strftime("%Y-%m-%d %H:%M", po_revision_date) + tzstring()
if last_translator is None:
last_translator = "FULL NAME <EMAIL@ADDRESS>"
if language_team is None:
language_team = "LANGUAGE <LL@li.org>"
if mime_version is None:
mime_version = "1.0"
if report_msgid_bugs_to is None:
report_msgid_bugs_to = ""
defaultargs = dictutils.ordereddict()
defaultargs["Project-Id-Version"] = project_id_version
defaultargs["Report-Msgid-Bugs-To"] = report_msgid_bugs_to
defaultargs["POT-Creation-Date"] = pot_creation_date
defaultargs["PO-Revision-Date"] = po_revision_date
defaultargs["Last-Translator"] = last_translator
defaultargs["Language-Team"] = language_team
defaultargs["MIME-Version"] = mime_version
defaultargs["Content-Type"] = "text/plain; charset=%s" % charset
defaultargs["Content-Transfer-Encoding"] = encoding
if plural_forms:
defaultargs["Plural-Forms"] = plural_forms
defaultargs["X-Generator"] = self.x_generator
return update(defaultargs, add=True, **kwargs)
def header(self):
"""Returns the header element, or None. Only the first element is allowed
to be a header. Note that this could still return an empty header element,
if present."""
if len(self.units) == 0:
return None
candidate = self.units[0]
if candidate.isheader():
return candidate
else:
return None
def parseheader(self):
"""Parses the PO header and returns the interpreted values as a
dictionary."""
header = self.header()
if not header:
return {}
return parseheaderstring(header.target)
def updateheader(self, add=False, **kwargs):
"""Updates the fields in the PO style header.
This will create a header if add == True."""
header = self.header()
if not header:
if add:
header = self.makeheader(**kwargs)
# we should be using .addunit() or some equivalent in case the
# unit needs to refer back to the store, etc. This might be
# subtly broken for POXLIFF, since we don't dupliate the code
# from lisa::addunit().
header._store = self
self.units.insert(0, header)
else:
headeritems = update(self.parseheader(), add, **kwargs)
keys = headeritems.keys()
if not "Content-Type" in keys or "charset=CHARSET" in headeritems["Content-Type"]:
headeritems["Content-Type"] = "text/plain; charset=UTF-8"
if not "Content-Transfer-Encoding" in keys or "ENCODING" in headeritems["Content-Transfer-Encoding"]:
headeritems["Content-Transfer-Encoding"] = "8bit"
headerString = ""
for key, value in headeritems.items():
if value is not None:
headerString += "%s: %s\n" % (key, value)
header.target = headerString
header.markfuzzy(False) # TODO: check why we do this?
return header
def getheaderplural(self):
"""Returns the nplural and plural values from the header."""
header = self.parseheader()
pluralformvalue = header.get('Plural-Forms', None)
if pluralformvalue is None:
return None, None
nplural = re.findall("nplurals=(.+?);", pluralformvalue)
plural = re.findall("plural=(.+?);?$", pluralformvalue)
if not nplural or nplural[0] == "INTEGER":
nplural = None
else:
nplural = nplural[0]
if not plural or plural[0] == "EXPRESSION":
plural = None
else:
plural = plural[0]
return nplural, plural
def updateheaderplural(self, nplurals, plural):
"""Update the Plural-Form PO header."""
if isinstance(nplurals, basestring):
nplurals = int(nplurals)
self.updateheader(add=True, Plural_Forms = "nplurals=%d; plural=%s;" % (nplurals, plural) )
def gettargetlanguage(self):
"""Return the target language if specified in the header.
Some attempt at understanding Poedit's custom headers is done."""
header = self.parseheader()
if 'X-Poedit-Language' in header:
from translate.lang import poedit
language = header.get('X-Poedit-Language')
country = header.get('X-Poedit-Country')
return poedit.isocode(language, country)
return header.get('Language')
def settargetlanguage(self, lang):
"""Set the target language in the header.
This removes any custom Poedit headers if they exist.
@param lang: the new target language code
@type lang: str
"""
if isinstance(lang, basestring) and len(lang) > 1:
self.updateheader(add=True, Language=lang, X_Poedit_Language=None, X_Poedit_Country=None)
def mergeheaders(self, otherstore):
"""Merges another header with this header.
This header is assumed to be the template.
@type otherstore: L{base.TranslationStore}
"""
newvalues = otherstore.parseheader()
retain = {
"Project_Id_Version": newvalues['Project-Id-Version'],
"PO_Revision_Date" : newvalues['PO-Revision-Date'],
"Last_Translator" : newvalues['Last-Translator'],
"Language_Team" : newvalues['Language-Team'],
}
# not necessarily there:
plurals = newvalues.get('Plural-Forms', None)
if plurals:
retain['Plural-Forms'] = plurals
self.updateheader(**retain)
def updatecontributor(self, name, email=None):
"""Add contribution comments if necessary."""
header = self.header()
if not header:
return
prelines = []
contriblines = []
postlines = []
contribexists = False
incontrib = False
outcontrib = False
for line in header.getnotes("translator").split('\n'):
line = line.strip()
if line == u"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.":
incontrib = True
continue
if author_re.match(line):
incontrib = True
contriblines.append(line)
continue
if line == "" and incontrib:
incontrib = False
outcontrib = True
if incontrib:
contriblines.append(line)
elif not outcontrib:
prelines.append(line)
else:
postlines.append(line)
year = time.strftime("%Y")
contribexists = False
for i in range(len(contriblines)):
line = contriblines[i]
if name in line and (email is None or email in line):
contribexists = True
if year in line:
break
else:
#The contributor is there, but not for this year
if line[-1] == '.':
line = line[:-1]
contriblines[i] = "%s, %s." % (line, year)
if not contribexists:
# Add a new contributor
if email:
contriblines.append("%s <%s>, %s." % (name, email, year))
else:
contriblines.append("%s, %s." % (name, year))
header.removenotes()
header.addnote("\n".join(prelines))
header.addnote("\n".join(contriblines))
header.addnote("\n".join(postlines))
def makeheader(self, **kwargs):
"""Create a header for the given filename.
Check .makeheaderdict() for information on parameters."""
headerpo = self.UnitClass(encoding=self._encoding)
headerpo.markfuzzy()
headerpo.source = ""
headeritems = self.makeheaderdict(**kwargs)
headervalue = ""
for (key, value) in headeritems.items():
headervalue += "%s: %s\n" % (key, value)
headerpo.target = headervalue
return headerpo
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
6244,
12,
10531,
1168,
84,
4496,
10442,
5693,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
262,
3602... | 2.250328 | 6,104 |
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
# The MIT License
#
# Copyright (c) Val Neekman @ Neekware Inc. http://neekware.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from unittest import TestCase
from spyne.util.address import set_address_parser_settings
set_address_parser_settings(trusted_proxies=['177.139.233.100'])
from spyne.util.address import address_parser
class IPv4TestCase(TestCase):
"""IP address Test"""
class IPv4TrustedProxiesTestCase(TestCase):
"""Trusted Proxies - IP address Test"""
class IPv6TestCase(TestCase):
"""IP address Test"""
class IPv6TrustedProxiesTestCase(TestCase):
"""Trusted Proxies - IP address Test"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
13997,
710,
532,
15069,
357,
34,
8,
23688,
710,
20420,
13,
198,
2,
198,
2,
770,
5888,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
13096,
340... | 3.572474 | 683 |
import os
import json
size_factors = [3, 5, 7, 9, 15, 18, 21, 24]
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
33918,
198,
198,
7857,
62,
22584,
669,
796,
685,
18,
11,
642,
11,
767,
11,
860,
11,
1315,
11,
1248,
11,
2310,
11,
1987,
60,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198... | 2.22 | 50 |
lxd.adjust_timing_tool = False
lxd.offset = 0 | [
75,
24954,
13,
23032,
62,
16514,
278,
62,
25981,
796,
10352,
198,
75,
24954,
13,
28968,
796,
657
] | 2.5 | 18 |
# 14. Leer un lote de N números y determine el promedio de múltiplos de 7 que contengan,
# cuántos son capicúas (Si el número y su inverso son iguales).
# Ejemplo: Si N = 8 [49, 2, 7, 34, 55, 105, 121, 53]
# → Promedio de Múltiplos de 7: 53.67
# Cantidad de Capicúas: 4
numeros = [49, 2, 7, 34, 55, 105, 121, 53]
mostrar(numeros)
| [
2,
1478,
13,
1004,
263,
555,
1256,
68,
390,
399,
299,
21356,
647,
418,
331,
5004,
1288,
1552,
276,
952,
390,
285,
21356,
2528,
24705,
418,
390,
767,
8358,
542,
268,
1030,
11,
220,
201,
198,
2,
18912,
6557,
429,
418,
3367,
1451,
29... | 2.144654 | 159 |
#!/bin/python3
print("- Markdown-HTML -")
import markdown, natsort, os, json, pathlib
REPO_PATH = pathlib.Path(os.environ['GITHUB_WORKSPACE'])
INPUT_LIST = json.loads(os.environ['INPUT_INPUT_FILES'])
OUTPUT_LIST = json.loads(os.environ['INPUT_OUTPUT_FILES'])
EXCLUDE_DUPLICATES : bool = json.loads(os.environ['INPUT_EXCLUDE_DUPLICATES'])
BUILTIN_STYLESHEET : str = os.environ['INPUT_BUILTIN_STYLESHEET']
EXTENSIONS : list = json.loads(os.environ['INPUT_EXTENSIONS'])
EXTENSION_CONFIGS : dict = json.loads(os.environ['INPUT_EXTENSION_CONFIGS'])
md = markdown.Markdown(extensions=EXTENSIONS, extension_configs=EXTENSION_CONFIGS, output_format="html5")
if not isinstance(INPUT_LIST, list) or not all([isinstance(sublist, list) for sublist in INPUT_LIST]):
raise ValueError("input_files must be a JSON list of lists")
if not isinstance(OUTPUT_LIST, list):
raise ValueError("output_files must be a JSON list")
if len(OUTPUT_LIST) != len(INPUT_LIST):
raise ValueError(f"input_files (length: {len(INPUT_LIST)}) must be the same length as output_files (length: {len(OUTPUT_LIST)})")
if BUILTIN_STYLESHEET != "":
with open(REPO_PATH.joinpath(BUILTIN_STYLESHEET), 'r') as stylesheet_file:
style = "<style>\n" + stylesheet_file.read() + "</style>\n"
else:
style = ""
for input_sublist, output_path_str in zip(INPUT_LIST, OUTPUT_LIST):
md.reset()
md_str = ""
input_path_included = set()
for input_path_glob_str in input_sublist:
input_path_list = natsort.natsorted([str(p) for p in REPO_PATH.glob(input_path_glob_str)])
print("input_path_list", input_path_list)
for input_path_str in input_path_list:
print("input_path_str", input_path_str)
if not EXCLUDE_DUPLICATES or input_path_str not in input_path_included:
input_path_included.add(input_path_str)
with open(input_path_str, 'r') as input_file:
md_str += input_file.read() + "\n"
print("data", md_str)
print("Generating", output_path_str)
output_path = REPO_PATH.joinpath(output_path_str)
html = "<!DOCTYPE html>\n" + md.convert(md_str) + "\n" + style
with open(output_path, 'w') as output_file:
output_file.write(html)
print("Markdown-HTML complete")
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
4798,
7203,
12,
2940,
2902,
12,
28656,
532,
4943,
198,
198,
11748,
1317,
2902,
11,
299,
1381,
419,
11,
28686,
11,
33918,
11,
3108,
8019,
198,
198,
2200,
16402,
62,
34219,
796,
3108,
8019,
13,... | 2.335729 | 974 |
"""Model for the list of robots."""
import logging
from typing import Tuple
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from src.driver.highlight import highlight
logger = logging.getLogger(__name__)
def get_robot_toggle_selector(name: str) -> Tuple[str, str]:
"""Get the locator Tuple for a robot's toggle by name of the robot."""
return (By.XPATH, f"//a[contains(@href,{name})]//button")
def get_robot_pipette_link_selector(name: str) -> Tuple[str, str]:
"""Get the locator Tuple for a robot's pipette link by name of the robot."""
return (
By.XPATH,
f'//ol//a[contains(@href,"#/robots/opentrons-{name}/instruments")]',
)
def get_robot_modules_link_selector(name: str) -> Tuple[str, str]:
"""Get the locator Tuple for a robot's modules link by name of the robot."""
return (
By.XPATH,
f'//ol//a[contains(@href,"#/robots/opentrons-{name}/modules")]',
)
class RobotsList:
"""All elements and actions for the Robots List."""
DEV = "dev"
spinner: Tuple[str, str] = (By.CSS_SELECTOR, "svg[class*=spin]")
header: Tuple[str, str] = (By.XPATH, '//h2[text()="Robots"]')
refresh_list: Tuple[str, str] = (By.XPATH, '//button[text()="refresh list"]')
no_robots_found: Tuple[str, str] = (By.XPATH, '//h3[text()="No robots found!"]')
try_again_button: Tuple[str, str] = (By.XPATH, '//button[text()="try again"]')
def __init__(self, driver: WebDriver) -> None:
"""Initialize with driver."""
self.driver: WebDriver = driver
def is_robot_toggle_active(self, name: str) -> bool:
"""Is a toggle for a robot 'on' using the name of the robot."""
return bool(
self.get_robot_toggle(name).get_attribute("class").find("_on_") != -1
)
@highlight
def get_robot_toggle(self, name: str) -> WebElement:
"""Retrieve the Webelement toggle buttone for a robot by name."""
toggle_locator: Tuple[str, str] = get_robot_toggle_selector(name)
toggle: WebElement = WebDriverWait(self.driver, 5).until(
EC.element_to_be_clickable(toggle_locator)
)
return toggle
@highlight
def get_robot_pipettes_link(self, name: str) -> WebElement:
"""Retrieve the pipettes link for a robot by name."""
link_locator: Tuple[str, str] = get_robot_pipette_link_selector(name)
link: WebElement = WebDriverWait(self.driver, 2).until(
EC.element_to_be_clickable(link_locator)
)
return link
@highlight
def get_robot_modules_link(self, name: str) -> WebElement:
"""Retrieve the modules link for a robot by name."""
link_locator: Tuple[str, str] = get_robot_modules_link_selector(name)
link: WebElement = WebDriverWait(self.driver, 2).until(
EC.element_to_be_clickable(link_locator)
)
return link
def wait_for_spinner_invisible(self) -> None:
"""Wait for spinner to become invisible. This should take 30 seconds."""
WebDriverWait(self.driver, 31).until(
EC.invisibility_of_element_located(RobotsList.spinner)
)
def wait_for_spinner_visible(self) -> None:
"""Wait for spinner to become visible. This should take ~1 seconds."""
WebDriverWait(self.driver, 2).until(
EC.visibility_of_element_located(RobotsList.spinner)
)
@highlight
def get_no_robots_found(self) -> WebElement:
"""Find with no waiting the h3 No robots found!"""
return self.driver.find_element(*RobotsList.no_robots_found)
@highlight
def get_try_again_button(self) -> WebElement:
"""Find with no waiting the TRY AGAIN button."""
return self.driver.find_element(*RobotsList.try_again_button)
def get_robot_count(self) -> int:
"""Get the number of robot links."""
try:
# wait 6 seconds to see if any robot links become visible
WebDriverWait(self.driver, 6).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, "a[class*=robot_link]")
)
)
except TimeoutException:
return 0
return len(self.driver.find_elements(By.CSS_SELECTOR, "a[class*=robot_link]"))
| [
37811,
17633,
329,
262,
1351,
286,
14193,
526,
15931,
198,
11748,
18931,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
384,
11925,
1505,
13,
11321,
13,
1069,
11755,
1330,
3862,
448,
16922,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
... | 2.396434 | 1,907 |
from codecs import open
from os import path
from setuptools import setup, find_packages
# adding README
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='neuralkernel',
version='0.0.8',
description='neural networks as a general-purpose computational framework',
long_description=long_description,
long_description_content_type='text/markdown',
author='Noah Stebbins',
author_email='nstebbins1@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(exclude=['docs', 'tests']),
install_requires=['numpy', 'matplotlib', 'seaborn'],
tests_require=['pytest']
)
| [
6738,
40481,
82,
1330,
1280,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
2,
4375,
20832,
11682,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
8... | 2.702065 | 339 |
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from django.forms import modelform_factory
from home.models import Todo
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
23914,
1330,
2746,
687,
62,
69,
9548,
198,
1... | 3.474576 | 59 |
import os
from Bio import SeqIO
import shutil
import subprocess
import webbrowser
# Converts files from abi to fasta
homeDIR = os.getcwd()
counter = 0
sourcefiles = os.listdir(homeDIR)
destinationpath = f"{homeDIR}\FASTA"
for filename in os.listdir(homeDIR):
if filename.endswith(".ab1"):
count = SeqIO.convert(filename, "abi", f"{filename}.fasta", "fasta")
counter+=1
print (f"####### {counter} files were converted #########")
# Moves fasta files to created FASTA folder
if not os.path.exists(f"{homeDIR}/FASTA"):
os.makedirs(f"{homeDIR}/FASTA")
for file in sourcefiles:
if file.endswith('.fasta'):
shutil.move(os.path.join(homeDIR,file), os.path.join(destinationpath,file))
print("###### FASTA files moved to FASTA folder #######")
# Combines single fasta files in to a single combined fasta file. This is if the files have been grouped to your liking else use the sort_cons.py file
DIR = f"{homeDIR}\FASTA"
if not os.path.exists('combined_fasta_file.fasta'):
cff = open('combined_fasta_file.fasta', 'w')
for file in os.listdir(DIR):
sff = open(os.path.join(DIR, file))
for line in sff:
cff.write(line)
sff.close()
cff.close()
print("######## FASTA files combined into a single file #######")
# Runs tandem repeat finder program with set parameters
fileDIR = fr'{homeDIR}\\trf409.dos64.exe'
subprocess.call([fileDIR,"combined_fasta_file.fasta","2", "7", "7", "80", "10", "50", "2000"])
# Opens summary html with default browser
for file in sourcefiles:
if file.endswith('.summary.html'):
filepath = os.path.realpath(file)
webbrowser.open('file://' + filepath)
# Moving html files to web_results folder
if not os.path.exists(f"{homeDIR}/web_results"):
os.makedirs(f"{homeDIR}/web_results")
resultssourcefiles = os.listdir(homeDIR)
resultsdestinationpath = f"{homeDIR}\web_results"
for file in resultssourcefiles:
if file.endswith('.html'):
shutil.move(os.path.join(homeDIR,file), os.path.join(resultsdestinationpath,file))
print("###### Results files moved to FASTA folder #######\n")
# Deleting redundant files
for file in sourcefiles:
if file.endswith('.html') or file.endswith('.fasta'):
os.remove(os.path.join(homeDIR,file))
shutil.rmtree(f"{homeDIR}\\FASTA")
print("####### Deleting redundant files ########\n")
print ('''
This program can only be run once in the present folder.\n
To run it again, the FASTA and web_results folder, and the
combined_fasta_file.fasta file must be deleted.\n
######################################################\n
The combined_fasta_file.fasta file contains all your sequences in fasta format
and tandem repeat finder results can be found in the web_results folder.
Use the first file (the summary file) in the web_results folder to navigate thorough your results.
''')
| [
11748,
28686,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
11748,
3992,
40259,
628,
198,
2,
1482,
24040,
3696,
422,
450,
72,
284,
3049,
64,
198,
11195,
34720,
796,
28686,
13,
1136,
66,
16... | 2.690102 | 1,081 |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
| [
2,
15069,
2864,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
2420,
37150,
1330,
4648,
298,
198,
19... | 3.809524 | 63 |
import os
import pytest
from treepath import path, get, TraversingError, get_match
from treepath.path.exceptions.path_syntax_error import PathSyntaxError
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
2054,
538,
776,
1330,
3108,
11,
651,
11,
4759,
690,
278,
12331,
11,
651,
62,
15699,
198,
6738,
2054,
538,
776,
13,
6978,
13,
1069,
11755,
13,
6978,
62,
1837,
41641,
62,
182... | 3.115385 | 52 |
st,sm,et,em=map(int,input().split())
rt=et-st
if rt<0:rt=24+(et-st)
rm=em-sm
if rm<0:
rm=60+(em-sm)
rt-=1
if et==st and em==sm:print("O JOGO DUROU 24 HORA(S) E 0 MINUTO(S)")
else:
if rt<0:rt=24+rt
print(f"O JOGO DUROU {rt} HORA(S) E {rm} MINUTO(S)")
| [
301,
11,
5796,
11,
316,
11,
368,
28,
8899,
7,
600,
11,
15414,
22446,
35312,
28955,
198,
17034,
28,
316,
12,
301,
198,
361,
374,
83,
27,
15,
25,
17034,
28,
1731,
33747,
316,
12,
301,
8,
198,
26224,
28,
368,
12,
5796,
198,
361,
... | 1.694268 | 157 |
import os
from app import create_app
from ext import db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
import app.model
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
11748,
28686,
198,
198,
6738,
598,
1330,
2251,
62,
1324,
198,
6738,
1070,
1330,
20613,
198,
6738,
42903,
62,
12048,
1330,
9142,
198,
6738,
42903,
62,
76,
42175,
1330,
337,
42175,
11,
337,
42175,
21575,
198,
198,
11748,
598,
13,
19849,
... | 3.008475 | 118 |
#!/usr/bin/env python
# longest common subsequence
X = "AGGTAB"
Y = "GXTXAYB"
print ("Length of Longest Common Subsequence is ", lcs(X , Y, len(X), len(Y)))
# least common subsequence
X = "AGGTAB"
Y = "GXTXAYB"
print ("Length of Least Common Subsequence is ", lcs(X , Y, len(X), len(Y)))
# longest increasing subsequence
X = "AGGTAB"
Y = "GXTXAYB"
print ("Length of Least Common Subsequence is ", lcs(X , Y, len(X), len(Y)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
14069,
2219,
6399,
594,
628,
198,
55,
796,
366,
4760,
38,
5603,
33,
1,
198,
56,
796,
366,
38,
25010,
55,
4792,
33,
1,
198,
4798,
5855,
24539,
286,
5882,
395,
8070,
3834,
... | 2.54386 | 171 |
one_or_many(1)
one_or_many('1', 1, 2, 'abc')
one_or_many()
| [
201,
198,
201,
198,
505,
62,
273,
62,
21834,
7,
16,
8,
201,
198,
505,
62,
273,
62,
21834,
10786,
16,
3256,
352,
11,
362,
11,
705,
39305,
11537,
201,
198,
505,
62,
273,
62,
21834,
3419,
201,
198
] | 1.692308 | 39 |
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import copy
import functools
import numpy as np
from wfa_cardinality_estimation_evaluation_framework.estimators.base import SketchBase
from wfa_cardinality_estimation_evaluation_framework.estimators.base import EstimatorBase
from wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import ExactMultiSet
ONE_PLUS = '1+'
class ExactSetOperator(object):
"""Set operations for ExactSet.
The methods below all accept an ExactMultiSet object and returning an
ExactMultiSet object.
"""
# TODO(uakyol) : Make this child of SketchOperator class.
@classmethod
def union(cls, this, that):
"""Union operation for ExactSet."""
if this is None:
return copy.deepcopy(that)
if that is None:
return copy.deepcopy(this)
result = copy.deepcopy(this)
result_key_set = set(result.ids().keys())
that_key_set = set(that.ids().keys())
result._ids = {x: 1 for x in result_key_set.union(that_key_set)}
return result
@classmethod
def intersection(cls, this, that):
"""Intersection operation for ExactSet."""
if this is None or that is None:
return None
result = copy.deepcopy(this)
result_key_set = set(result.ids().keys())
that_key_set = set(that.ids().keys())
result._ids = {x: 1 for x in result_key_set.intersection(that_key_set)}
return result
@classmethod
def difference(cls, this, that):
"""Difference operation for ExactSet."""
if this is None:
return None
if that is None:
return copy.deepcopy(this)
result = copy.deepcopy(this)
result_key_set = set(result.ids().keys())
that_key_set = set(that.ids().keys())
result._ids = {x: 1 for x in result_key_set.difference(that_key_set)}
return result
class StratifiedSketch(SketchBase):
"""A frequency sketch that contains cardinality sketches per frequency bucket."""
@classmethod
def __init__(self,
max_freq,
cardinality_sketch_factory,
random_seed,
noiser_class=None,
epsilon=0,
epsilon_split=0.5,
underlying_set=None,
union=ExactSetOperator.union):
"""Construct a Stratified sketch.
Args:
max_freq: the maximum targeting frequency level. For example, if it is set
to 3, then the sketches will include frequency=1, 2, 3+ (frequency >=
3).
cardinality_sketch_factory: A cardinality sketch factory.
random_seed: This arg exists in order to conform to
simulator.EstimatorConfig.sketch_factory.
noiser : A noiser class that is a subclass of base.EstimateNoiserBase.
epsilon : Total privacy budget to spend for noising this sketch.
epsilon_split : Ratio of privacy budget to spend to noise 1+ sketch. When
epsilon_split=0 the 1+ sketch is created from the underlying exact set
directly. epsilon_split should be smaller than 1.
underlying_set : ExactMultiSet object that holds the frequency for each
item for this Stratified Sketch.
union : Function to be used to calculate the 1+ sketch as the union of the
others.
"""
SketchBase.__init__(self)
# A dictionary that contains multiple sketches, which include:
# (1) sketches with frequency equal to k, where k < max_freq;
# (2) a sketch with frequency greater than or equal to max_freq;
assert (epsilon_split >= 0 and
epsilon_split < 1), ('epsilon split is not between 0 and 1')
self.sketches = {}
self.seed = random_seed
self.max_freq = max_freq
self.cardinality_sketch_factory = cardinality_sketch_factory
self.underlying_set = underlying_set if underlying_set is not None else ExactMultiSet(
)
self.epsilon_split = epsilon_split
self.epsilon = epsilon
self.union = union
self.one_plus_noiser = None
self.rest_noiser = None
if noiser_class is not None:
if epsilon_split != 0:
self.one_plus_noiser = noiser_class(epsilon=epsilon * epsilon_split)
self.rest_noiser = noiser_class(epsilon=epsilon * (1 - epsilon_split))
else:
self.one_plus_noiser = noiser_class(epsilon=epsilon)
self.rest_noiser = noiser_class(epsilon=epsilon)
def create_one_plus_sketch(self):
"""Create the 1+ sketch for this stratified sketch.
We support creation of 1+ sketch for 2 scenarios :
1) 1+ sketch is created from the underlying exact set directly. Here we
noise 1+ sketch with epsilon = (self.epsilon * self.epsilon_split).
2) 1+ sketch is created from the union of all other frequencies. Here
we noise 1+ sketch with epsilon = self.epsilon
These two scenarios are controlled with the epsilon_split parameter. If
epsilon_split = 0, then do scenario 1 otherwise do scenario 2.
"""
if ONE_PLUS in self.sketches:
return
assert (self.epsilon_split >= 0 and self.epsilon_split < 1), (
'epsilon split is not between 0 and 1 for ONE_PLUS sketch creation')
if (self.epsilon_split == 0):
self.sketches[ONE_PLUS] = self.create_one_plus_with_merge()
else:
self.sketches[ONE_PLUS] = self.create_one_plus_from_underlying()
@classmethod
def init_from_exact_multi_set(cls,
max_freq,
exact_multi_set,
cardinality_sketch_factory,
random_seed,
noiser_class=None,
epsilon=0,
epsilon_split=0.5,
union=ExactSetOperator.union):
"""Initialize a Stratified sketch from one ExactMultiSet.
Args:
exact_multi_set: ExactMultiSet object to use for initialization.
CardinalitySketch: Class type of cardinality sketches this stratified
sketch will hold.
"""
assert (cardinality_sketch_factory is
not None), ('cardinality_sketch is None')
stratified_sketch = cls(
max_freq=max_freq,
underlying_set=exact_multi_set,
cardinality_sketch_factory=cardinality_sketch_factory,
random_seed=random_seed,
noiser_class=noiser_class,
epsilon=epsilon,
epsilon_split=epsilon_split,
union=union)
stratified_sketch.create_sketches()
return stratified_sketch
@classmethod
def init_from_set_generator(cls,
max_freq,
set_generator,
cardinality_sketch_factory,
random_seed,
noiser_class=None,
epsilon=0,
epsilon_split=0.5,
union=ExactSetOperator.union):
"""Initialize a Stratified sketch from a Set Generator.
Args:
set_generator: SetGenerator object to draw ids from for initialization.
CardinalitySketch: Class type of cardinality sketches this stratified
sketch will hold.
"""
assert (cardinality_sketch_factory is
not None), ('cardinality_sketch is None')
exact_multi_set = ExactMultiSet()
for generated_set in set_generator:
exact_multi_set.add_ids(generated_set)
return cls.init_from_exact_multi_set(
max_freq,
exact_multi_set,
cardinality_sketch_factory,
random_seed,
noiser_class=noiser_class,
epsilon=epsilon,
epsilon_split=epsilon_split,
union=union)
def assert_compatible(self, other):
""""Check if the two StratifiedSketch are comparable.
Args:
other: the other StratifiedSketch for comparison.
Raises:
AssertionError: if the other sketches are not StratifiedSketch, or if
their random_seed are different, or if the frequency targets are
different.
"""
assert isinstance(other,
StratifiedSketch), ('other is not a StratifiedSketch.')
assert self.seed == other.seed, ('The random seeds are not the same: '
f'{self.seed} != {other.seed}')
assert self.max_freq == other.max_freq, (
'The frequency targets are different: '
f'{self.max_freq} != {other.max_freq}')
assert isinstance(self.cardinality_sketch_factory,
type(other.cardinality_sketch_factory))
if (self.sketches != {} and other.sketches != {}):
assert isinstance(
list(self.sketches.values())[0],
type(list(other.sketches.values())[0]))
class PairwiseEstimator(EstimatorBase):
"""Merge and estimate two StratifiedSketch."""
def __init__(self,
sketch_operator,
cardinality_estimator,
denoiser_class=None):
"""Create an estimator for two Stratified sketches.
Args:
sketch_operator: an object that have union, intersection, and difference
methods for two sketches.
cardinality_estimator: a cardinality estimator for estimating the
cardinality of a sketch.
"""
self.cardinality_estimator = cardinality_estimator
self.sketch_union = sketch_operator.union
self.sketch_difference = sketch_operator.difference
self.sketch_intersection = sketch_operator.intersection
self.denoiser_class = denoiser_class
def denoise_sketch(self, stratified_sketch):
"""Denoise a StratifiedSketch.
Args:
stratified_sketch: a StratifiedSketch.
Returns:
A denoised StratifiedSketch.
"""
denoised_stratified_sketch = copy.deepcopy(stratified_sketch)
if self.denoiser_class is None:
return denoised_stratified_sketch
epsilon = stratified_sketch.epsilon
epsilon_split = stratified_sketch.epsilon_split
max_key = str(stratified_sketch.max_freq) + '+'
one_plus_epsilon = epsilon if epsilon_split == 0 else epsilon * epsilon_split
rest_epsilon = stratified_sketch.epsilon * (1 -
stratified_sketch.epsilon_split)
one_plus_denoiser = self.denoiser_class(epsilon=one_plus_epsilon)
rest_denoiser = self.denoiser_class(epsilon=rest_epsilon)
for freq in range(1, denoised_stratified_sketch.max_freq):
denoised_stratified_sketch.sketches[freq] = rest_denoiser(
stratified_sketch.sketches[freq])
denoised_stratified_sketch.sketches[max_key] = rest_denoiser(
stratified_sketch.sketches[max_key])
denoised_stratified_sketch.sketches[ONE_PLUS] = one_plus_denoiser(
stratified_sketch.sketches[ONE_PLUS])
return denoised_stratified_sketch
def merge_sketches(self, this, that):
"""Merge two StratifiedSketch.
Given 2 sketches A and B:
Merged(k) = (A(k) & B(0)) U (A(k-1) & B(1)) ... U (A(0) & B(k))
where
A(k) & B(0) = A(k) - (A(k) & B(1+))
B(k) & A(0) = B(k) - (B(k) & A(1+))
Args:
this: one of the two StratifiedSketch to be merged.
that: the other StratifiedSketch to be merged.
Returns:
A merged StratifiedSketch from the input.
"""
this.assert_compatible(that)
this_one_plus = this.sketches[ONE_PLUS]
that_one_plus = that.sketches[ONE_PLUS]
max_freq = this.max_freq
max_key = str(max_freq) + '+'
merged_sketch = copy.deepcopy(this)
for k in range(1, max_freq):
# Calculate A(k) & B(0) = A(k) - (A(k) & B(1+))
merged = self.sketch_difference(
this.sketches[k],
self.sketch_intersection(this.sketches[k], that_one_plus))
# Calculate A(0) & B(k) = B(k) - (B(k) & A(1+))
merged = self.sketch_union(
merged,
self.sketch_difference(
that.sketches[k],
self.sketch_intersection(this_one_plus, that.sketches[k])))
# Calculate A(i) & B(k-i)
for i in range(1, k):
merged = self.sketch_union(
merged,
self.sketch_intersection(this.sketches[i], that.sketches[(k - i)]))
merged_sketch.sketches[k] = merged
# Calculate Merged(max_freq)
merged = this.sketches[max_key]
rest = that_one_plus
for k in range(1, max_freq):
merged = self.sketch_union(
merged, self.sketch_intersection(this.sketches[max_freq - k], rest))
rest = self.sketch_difference(rest, that.sketches[k])
merged = self.sketch_union(
merged,
self.sketch_difference(
that.sketches[max_key],
self.sketch_intersection(that.sketches[max_key], this_one_plus)))
merged_sketch.sketches[max_key] = merged
# Calculate Merged(1+)
merged_one_plus = None
for k in range(1, max_freq):
merged_one_plus = self.sketch_union(merged_one_plus,
merged_sketch.sketches[k])
merged_one_plus = self.sketch_union(merged_one_plus,
merged_sketch.sketches[max_key])
merged_sketch.sketches[ONE_PLUS] = merged_one_plus
return merged_sketch
def estimate_cardinality(self, stratified_sketch):
"""Estimate the cardinality of a StratifiedSketch.
Args:
stratified_sketch: a StratifiedSketch object.
Returns:
A dictionary: the key is the frequency and the value is the corresponding
cardinality.
"""
# We estimate a histogram for each frequency bucket. Since an estimator
# returns a histogram, we assert that for each bucket it has a lenghth of 1
# This has to be the case because, the input to the underliying estimators
# are cardinality sketches and should not have any repeated ids, thus, no
# bucket other than frequency = 1.
# We then put them into a list and take the cumilative of it to match the
# api output.
result = []
for freq in range(1, stratified_sketch.max_freq):
freq_count_histogram = self.cardinality_estimator(
[stratified_sketch.sketches[freq]])
assert (len(freq_count_histogram) == 1), (
'cardinality sketch has more than 1 freq bucket.')
result.append(freq_count_histogram[0])
max_key = str(stratified_sketch.max_freq) + '+'
max_freq_count_histogram = self.cardinality_estimator(
[stratified_sketch.sketches[max_key]])
assert (len(max_freq_count_histogram) == 1), (
'cardinality sketch has more than 1 freq bucket for max_freq.')
result.append(max_freq_count_histogram[0])
result = list(np.cumsum(list(reversed(result))))
result = list(reversed(result))
return result
class SequentialEstimator(EstimatorBase):
"""Sequential frequency estimator."""
| [
2,
15069,
12131,
383,
15348,
25564,
414,
10062,
18991,
25161,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.308343 | 6,616 |
from __future__ import annotations
import typing
from functools import partial
import cryptography.exceptions as bkx
from cryptography.hazmat.primitives import serialization as serial
from cryptography.hazmat.primitives.asymmetric import rsa, utils
from cryptography.hazmat.primitives.serialization import (
Encoding,
PrivateFormat,
PublicFormat,
)
from ... import base, exc
from ..asymmetric import OAEP, PSS
from . import Hash
from .asymmetric import get_padding_func
def generate(bits: int, e: int = 65537) -> RSAPrivateKey:
"""
Generate a private key with given key modulus ``bits`` and public exponent
``e`` (default 65537). Recommended size of ``bits`` > 1024.
Args:
bits: The bit length of the RSA key.
e: The public exponent value. Default is 65537.
Returns:
RSAPrivateKey: The RSA private key.
"""
return RSAPrivateKey(bits, e)
def load_public_key(data: bytes) -> RSAPublicKey:
"""Loads the public key and returns a Key interface.
Args:
data: The public key (a bytes-like object) to deserialize.
Returns:
RSAPublicKey: The RSA public key.
"""
return RSAPublicKey.load(data)
def load_private_key(
data: bytes,
passphrase: typing.Optional[bytes] = None,
) -> RSAPrivateKey:
"""Loads the private key and returns a Key interface.
Args:
data: The private key (a bytes-like object) to deserialize.
passphrase:
The passphrase that was used to encrypt the private key. ``None``
if the private key is not encrypted.
Returns:
RSAPrivateKey: The RSA private key.
"""
return RSAPrivateKey.load(data, passphrase)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
19720,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
11748,
45898,
13,
1069,
11755,
355,
275,
74,
87,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
1330,
11389,
1634... | 2.805921 | 608 |
from gopigo import *
import time
p = Piggy()
try:
menu()
except(KeyboardInterrupt, SystemExit):
from gopigo import *
stop() | [
6738,
308,
404,
14031,
1330,
1635,
198,
11748,
640,
628,
198,
79,
796,
23097,
1360,
3419,
628,
198,
198,
28311,
25,
198,
220,
220,
220,
6859,
3419,
198,
16341,
7,
9218,
3526,
9492,
3622,
11,
4482,
30337,
2599,
198,
220,
220,
220,
42... | 2.592593 | 54 |
import sys
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
word, pattern = line.split(' ')
for i in range(len(pattern)):
insert = word[i].lower() if pattern[i] == '0' else word[i].upper()
word = word[:i] + insert + word[(i + 1):]
print(word)
lines.close()
| [
11748,
25064,
201,
198,
6615,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
4357,
705,
81,
11537,
201,
198,
1640,
1627,
287,
3951,
25,
201,
198,
220,
220,
220,
1627,
796,
1627,
13,
33491,
10786,
59,
77,
3256,
10148,
737,
33491,
10786,
... | 2.068421 | 190 |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# Transformed into a cheesy ngram-server by JRN
# See LICENSE for details.
#
# Copyright (c) [2014-], Yandex, LLC
# Author: jorono@yandex-team.ru (Josef Robert Novak)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted #provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of #conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#
# \file
# A simplistic example of an ngram-server for the rnnlm bindings.
# This is based on the twisted-python echo-server example.
# It fields requests in the form of a string, and returns a JSON
# object consisting of the sentence-prob given the RNNLM, as well
# as the probabilities of the individual tokens. There are also some
# stubs for KenLM, but we don't use these currently for the G2P.
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor
import struct, re, json, sys, os
sys.path.append (os.getcwd())
import kenlm, rnnlm
from phonetisaurus import Phonetisaurus
# For some reason this has no effect in OSX+Python2.7
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
### Protocol Implementation
def FormatG2PResult (responses, m):
"""
Format the G2P response object. Return
a dictionary/list that we can easily serialize
and return in a JSON response.
"""
prons = []
for response in responses :
# Rebuild the original joint-token sequence
joint = [ "{0}}}{1}".format (m.FindIsym(g),mapsym (m.FindOsym(p)))
for g, p in zip (response.ILabels, response.OLabels)
if not (g == 0 and p == 0)]
pron = {
'score' : response.PathWeight,
'pron' : " ".join([m.FindOsym(p)
for p in response.Uniques]),
'joint' : " ".join(joint)
}
prons.append (pron)
return prons
# This is just about the simplest possible ngram server
if __name__ == '__main__':
import sys, argparse
example = "USAGE: {0} --g2p test.g2p.fst --arpa test.arpa.bin "\
"--rnnlm test.rnnlm".format (sys.argv[0])
parser = argparse.ArgumentParser (description = example)
# Each of these model 'types' should ultimately permit a list
parser.add_argument ("--g2p", "-g", help="PhonetisaurusG2P model.")
parser.add_argument ("--arpa", "-a", help="ARPA model in KenLM binary.")
parser.add_argument ("--rnnlm", "-r", help="RnnLM to use.")
parser.add_argument ("--prnnlm", "-pr", help="Phoneme RnnLM to use.")
parser.add_argument ("--port", "-p", help="Port to run the server on",
type=int, default=8000)
parser.add_argument ("--verbose", "-v", help="Verbose mode", default=False,
action="store_true")
args = parser.parse_args ()
if args.verbose :
for k,v in args.__dict__.iteritems () :
print k, "=", v
models = {}
if args.g2p :
models['g2p'] = Phonetisaurus (args.g2p)
if args.arpa :
models['arpa'] = kenlm.LanguageModel (args.arpa)
if args.rnnlm :
models['rnnlm'] = rnnlm.RnnLMPy (args.rnnlm)
if args.prnnlm :
models['prnnlm'] = rnnlm.RnnLMPy (args.prnnlm)
main (models)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
40006,
24936,
46779,
13,
198,
2,
3602,
12214,
656,
257,
45002,
299,
4546,
12,
15388,
416,
32598,
45,
198,
2,
4091,
38559,
24290,
329,
3307,
13,
198,
2,
198,
2... | 2.586306 | 1,738 |
from math import trunc
n = float(input('Digite um valor: '))
print(f'A porção inteira de {n} é {trunc(n)}')
| [
6738,
10688,
1330,
40122,
198,
198,
77,
796,
12178,
7,
15414,
10786,
19511,
578,
23781,
1188,
273,
25,
705,
4008,
198,
4798,
7,
69,
6,
32,
16964,
16175,
28749,
493,
68,
8704,
390,
1391,
77,
92,
38251,
1391,
2213,
19524,
7,
77,
38165... | 2.422222 | 45 |
__author__ = 'Vijay_Thomas@intuit.com'
| [
834,
9800,
834,
796,
705,
53,
2926,
323,
62,
22405,
31,
600,
5013,
13,
785,
6,
628
] | 2.352941 | 17 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.VoucherTimeRule import VoucherTimeRule
from alipay.aop.api.domain.VoucherSingleItemInfo import VoucherSingleItemInfo
from alipay.aop.api.domain.PromoInfo import PromoInfo
from alipay.aop.api.domain.VoucherTimeRule import VoucherTimeRule
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
34184,
1187,
1330,
163... | 2.687943 | 141 |
from pyglet.window import key
import pyglet
import directories
import assets
# Window
caption = "Colors"
#icon = pyglet.image.load("Assets/mario.png")
wWidth, wHeight = 1280, 720
window = pyglet.window.Window(wWidth,wHeight)
window.set_caption(caption)
window.set_icon(assets.icon)
# Keyboard
keys = key.KeyStateHandler()
window.push_handlers(keys)
# Batches
background = pyglet.graphics.Batch()
# Where player resides & things player collides with
ground = pyglet.graphics.Batch()
foreground = pyglet.graphics.Batch()
# Global Variables
state = 1
debug = True
# Fullscreen
timeSinceLastF = 1
fKeyEligiblity = True
| [
6738,
12972,
70,
1616,
13,
17497,
1330,
1994,
201,
198,
201,
198,
11748,
12972,
70,
1616,
201,
198,
201,
198,
11748,
29196,
201,
198,
11748,
6798,
201,
198,
201,
198,
2,
26580,
201,
198,
6888,
1159,
796,
366,
5216,
669,
1,
201,
198,... | 2.588235 | 255 |
from mkt.webapps.serializers import SimpleAppSerializer, SimpleESAppSerializer
class FeedFireplaceESAppSerializer(BaseFireplaceAppSerializer,
SimpleESAppSerializer):
"""
Serializer for Fireplace Feed pages (mostly detail pages). Needs
collection groups.
"""
| [
6738,
285,
21841,
13,
12384,
18211,
13,
46911,
11341,
1330,
17427,
4677,
32634,
7509,
11,
17427,
1546,
4677,
32634,
7509,
628,
628,
198,
198,
4871,
18272,
13543,
5372,
1546,
4677,
32634,
7509,
7,
14881,
13543,
5372,
4677,
32634,
7509,
11,... | 2.706897 | 116 |
#!/usr/bin/env python3
import sys
class GFF3_line:
"""A class to represet GFF3 lines and allow modification of the
values of its fields.
Attributes:
------------
field0, ..., field8 strings containing the values of each field
in a GFF3 line
attributes a dictionary containing the key-value pairs
in the GFF3 line 9th field
Methods:
------------
str() Outputs GFF3 line
repr() Outputs GFF3 line
refreshAttrStr() This needs to be called if changes were made to
any of the attributes. It refreshes
"""
def __init__(self, line):
"""GFF3_line is initialized to contain the fields of the GFF3
line provided as attributes. The attributes are kept in a
dictionary and a the keys are ordered in a list to preserve
the order of attributes upon getting the sring representation
of the line from the GFF3_line object.
"""
(self.seqid,
self.source,
self.type,
self.start,
self.end,
self.score,
self.strand,
self.phase,
self.attributes_str) = line.strip().split('\t')
# preserve attribute order as a list of keys (attributes_order)
attributes_list = self.attributes_str.split(';')
self.attributes_order = [attr.split('=')[0] for attr in
attributes_list]
# store attribute keys and their values in a dictionary
self.attributes = {attr.split('=')[0]:attr.split('=')[1] for attr in
attributes_list}
# rename the name attribute key to Name so it conforms to the
# GFF3 specification, where Name is a reserved attribute key
if 'name' in self.attributes:
self.attributes['Name'] = self.attributes.pop('name')
self.attributes_order[self.attributes_order.index('name')] = 'Name'
def __repr__(self):
"""Output for overloaded functions str() and repr()"""
return '\t'.join([str(self.seqid),
str(self.source),
str(self.type),
str(self.start),
str(self.end),
str(self.score),
str(self.strand),
str(self.phase),
str(self.attributes_str)])
def refreshAttrStr(self):
"""If the attributes dictionary or attributes_order has been
altered this should be called to update attributes_str.
"""
self.attributes_str = ';'.join(['='.join(
[attr, self.attributes[attr]]) for attr in self.attributes_order])
def mergeCoords(A, B):
"""Takes two tuples of coordinates A and B as arguments; A must
have a start coordinate before or equal to the start coordinate
of B. If they do not overlap then A and B are returned as input
and if they overlap the minimum and maximum values are returned.
let A = (a1, a2), B = (b1, b2) | a1<=b1, a1<=a2, b1<=b2
case 1: a2<=b1 ---> output A and B
case 2: b1<a2 && b2>a2 ---> output (a1, b2)
case 3: b2<=a2 ---> output A
"""
assert min(A) <= min(B), ("tuples given to mergeCoords in wrong order: "
"A={0}, B={1}").format(A,B)
if min(B) >= max(A):
return ((A, B), 0)
elif min(B) < max(A) and max(B) > max(A):
output = (min(A),max(B))
return ((output, output), 1)
elif max(B) <= max(A):
return ((A, A), 2)
else:
raise Exception(("Unexpected result from mergeCoords(A,B) using "
" A={0}, B={1}").format(A,B))
# print help information if asked for
if '-h' in sys.argv:
help()
# for each line in the input gff store as a GFF3_line object in a
# dictionary organized by scaffold. then merge features on each
# scaffold and output
gffFeats = {}
for line in sys.stdin:
# skip comment lines
if not line.startswith('#'):
lineObj = GFF3_line(line)
scaf = lineObj.seqid
if scaf in gffFeats:
gffFeats[scaf].append(lineObj)
else:
gffFeats[scaf] = [lineObj]
# for each scaffold sort features by start coordinate then merge
# overlapping features
for scaf in gffFeats:
gffFeats[scaf] = sorted(gffFeats[scaf], key=lambda x:int(x.start))
newScafFeats = []
currentFeat = gffFeats[scaf][0]
i=0
while i < len(gffFeats[scaf]) - 1:
nextFeat = gffFeats[scaf][i + 1]
mergeResult = mergeCoords((int(currentFeat.start), int(currentFeat.end)),
(int(nextFeat.start), int(nextFeat.end)))
# feats do not overlap
if mergeResult[1] == 0:
currentFeat.start, currentFeat.end = mergeResult[0][0]
currentFeat.score = '.'
if '-keepAttr' not in sys.argv:
currentFeat.attributes_str = '.'
newScafFeats.append(currentFeat)
currentFeat = nextFeat
# feats overlap. continue iterations and check for overlap with
# subsequent feature
else:
currentFeat.start, currentFeat.end = mergeResult[0][0]
i += 1
# finish processing last feature
currentFeat.score = '.'
if '-keepAttr' not in sys.argv:
currentFeat.attributes_str = '.'
newScafFeats.append(currentFeat)
# replace existing
gffFeats[scaf] = newScafFeats
# output new gff lines in order of scaffold and start coordinate
for scaf in sorted(gffFeats.keys()):
for line in gffFeats[scaf]:
print(str(line))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
628,
198,
198,
4871,
402,
5777,
18,
62,
1370,
25,
198,
220,
220,
220,
37227,
32,
1398,
284,
1128,
42503,
402,
5777,
18,
3951,
290,
1249,
17613,
286,
262,
198,
... | 2.131376 | 2,725 |
from __future__ import print_function
from problog.logic import Var, Term, Constant
from itertools import product
from logging import getLogger
from problog.util import Timer
import logging
from getSQLQuery import getSQLQuery
# from sympy.parsing.sympy_parser import parse_expr
# from sympy import lambdify, symbols
from numpy import errstate
import time
import psycopg2
from math import exp
from eval import getLogList, evaluate_expression
# from numericSS import numericSS # Numeric Safe Sample
from copy import copy
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
1861,
6404,
13,
6404,
291,
1330,
12372,
11,
35118,
11,
20217,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
6738,
18931,
1330,
651,
11187,
1362,
198,
6738,
1861,
6404,
13,
22602,
... | 3.583893 | 149 |
import os
# checks if the scanned IP address equals the target
| [
11748,
28686,
628,
198,
2,
8794,
611,
262,
28660,
6101,
2209,
21767,
262,
2496,
198
] | 4.333333 | 15 |
import json
import os
if __name__ == '__main__':
obj = readJson()
if obj is None:
print('not obj data')
else:
result = readDirList()
writeResult()
| [
11748,
33918,
198,
11748,
28686,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
26181,
796,
1100,
41,
1559,
3419,
198,
220,
220,
220,
611,
26181,
318,
6045,
25,
198,
220,
220,
220,
... | 2.25 | 84 |
import torch
from torch_geometric.data import Data
import logging
import numpy as np
from abc import ABC, abstractmethod
import time
from time import time as now
from funlib.segment.arrays import replace_values
from gnn_agglomeration import utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
| [
11748,
28034,
198,
6738,
28034,
62,
469,
16996,
13,
7890,
1330,
6060,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
11748,
640,
198,
6738,
640,
1330,
640,
355,
783,
198,
... | 3.37234 | 94 |
from pyconjp_domains.talks import Category
categories_raw_data = [
{
"id": 30061,
"title": "Track",
"items": [
{"id": 80001, "name": "Track1"},
{"id": 80002, "name": "Track2"},
{"id": 80003, "name": "Track3"},
{"id": 80004, "name": "Track4"},
],
},
{
"id": 30062,
"title": "Level",
"items": [
{"id": 80011, "name": "Level1"},
{"id": 80012, "name": "Level2"},
{"id": 80013, "name": "Level3"},
],
},
{
"id": 30063,
"title": "Language",
"items": [
{"id": 80021, "name": "Language1"},
{"id": 80022, "name": "Language2"},
],
},
{
"id": 30064,
"title": "発表資料の言語 / Language of presentation material",
"items": [
{"id": 80031, "name": "Slide Language1"},
{"id": 80032, "name": "Slide Language2"},
],
},
]
item_id_to_category_title = {
80001: "Track",
80002: "Track",
80003: "Track",
80004: "Track",
80011: "Level",
80012: "Level",
80013: "Level",
80021: "Language",
80022: "Language",
80031: "発表資料の言語 / Language of presentation material",
80032: "発表資料の言語 / Language of presentation material",
}
item_id_to_name = {
80001: "Track1",
80002: "Track2",
80003: "Track3",
80004: "Track4",
80011: "Level1",
80012: "Level2",
80013: "Level3",
80021: "Language1",
80022: "Language2",
80031: "Slide Language1",
80032: "Slide Language2",
}
create_parameters = (
([], False),
([80004, 80013, 80021, 80032], False),
([80013, 80022], False),
([80022, 80032], True),
)
create_expecteds = (
Category(None, None, None, None),
Category("Track4", "Level3", "Language1", "Slide Language2"),
Category(None, "Level3", "Language2", None),
Category(None, "All", "Language2", "Slide Language2"),
)
| [
6738,
12972,
1102,
34523,
62,
3438,
1299,
13,
83,
23833,
1330,
21743,
198,
198,
66,
26129,
62,
1831,
62,
7890,
796,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
312,
1298,
5867,
5333,
11,
198,
220,
2... | 1.955577 | 1,013 |
def masters():
"""Gets tachyon masters
:return:
"""
return __salt__['search.mine_by_host']('roles:tachyon.master')
def is_primary_master():
"""Checks whether current host is the first one created as tachyon master
:return:
"""
return __salt__['search.is_primary_host']('roles:tachyon.master')
| [
198,
4299,
18159,
33529,
198,
220,
220,
220,
37227,
38,
1039,
256,
620,
19181,
18159,
628,
220,
220,
220,
1058,
7783,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1441,
11593,
82,
2501,
834,
17816,
12947,
13,
3810,
62,
1525,
6... | 2.64 | 125 |
import time
from datetime import datetime, date, timedelta
from config import get_env
| [
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
11,
28805,
12514,
198,
6738,
4566,
1330,
651,
62,
24330,
628
] | 3.954545 | 22 |
# Author: Travis Oliphant
# 1999 -- 2002
import warnings
import sigtools
from scipy import linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
ifftn, fftfreq
from numpy import polyadd, polymul, polydiv, polysub, roots, \
poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \
ones, real, real_if_close, zeros, array, arange, where, rank, \
newaxis, product, ravel, sum, r_, iscomplexobj, take, \
argsort, allclose, expand_dims, unique, prod, sort, reshape, \
transpose, dot, any, mean, flipud, ndarray
import numpy as np
from scipy.misc import factorial
from windows import get_window
_modedict = {'valid':0, 'same':1, 'full':2}
_boundarydict = {'fill':0, 'pad':0, 'wrap':2, 'circular':2, 'symm':1,
'symmetric':1, 'reflect':4}
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate in1 and in2 with the output size determined by the mode
argument.
Parameters
----------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
- 'valid': the output consists only of those elements that do not
rely on the zero-padding.
- 'same': the output is the same size as the largest input centered
with respect to the 'full' output.
- 'full': the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
Notes
-----
The correlation z of two arrays x and y of rank d is defined as
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
"""
val = _valfrommode(mode)
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
for i in range(len(ps)):
if ps[i] <= 0:
raise ValueError("Dimension of x(%d) < y(%d) " \
"not compatible with valid mode" % \
(in1.shape[i], in2.shape[i]))
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
else:
raise ValueError("Uknown mode %s" % mode)
return z
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fftn(in1,fsize)
IN1 *= fftn(in2,fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1,axis=0) > product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve in1 and in2 with output size determined by mode.
Parameters
----------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
"""
volume = asarray(in1)
kernel = asarray(in2)
if rank(volume) == rank(kernel) == 0:
return volume*kernel
elif not volume.ndim == kernel.ndim:
raise ValueError("in1 and in2 should have the same rank")
slice_obj = [slice(None,None,-1)]*len(kernel.shape)
if mode == 'valid':
for d1, d2 in zip(volume.shape, kernel.shape):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> import scipy.signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> sp.signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> sp.signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by kernel_size.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * len(volume.shape)
kernel_size = asarray(kernel_size)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size,axis=0)
order = int(numels/2)
return sigtools._order_filterND(volume,domain,order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize);
# Estimate the local mean
lMean = correlate(im,ones(mysize), 'same') / product(mysize,axis=0)
# Estimate the local variance
lVar = correlate(im**2,ones(mysize), 'same') / product(mysize,axis=0) - lMean**2
# Estimate the noise power if needed.
if noise==None:
noise = mean(ravel(lVar),axis=0)
res = (im - lMean)
res *= (1-noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by mode and boundary
conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : ndarray
Two-dimensional input arrays to be convolved.
mode: str, optional
A string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
boundary : str, optional
A flag indicating how to handle boundaries:
- 'fill' : pad input arrays with fillvalue. (default)
- 'wrap' : circular boundary conditions.
- 'symm' : symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
if mode == 'valid':
for d1, d2 in zip(np.shape(in1), np.shape(in2)):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1,in2,1,val,bval,fillvalue)
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""Cross-correlate two 2-dimensional arrays.
Cross correlate in1 and in2 with output size determined by mode and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : ndarray
Two-dimensional input arrays to be convolved.
mode: str, optional
A string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
boundary : str, optional
A flag indicating how to handle boundaries:
- 'fill' : pad input arrays with fillvalue. (default)
- 'wrap' : circular boundary conditions.
- 'symm' : symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
"""
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1, in2, 0,val,bval,fillvalue)
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * 2
kernel_size = asarray(kernel_size)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, x, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis (*Default* = -1)
zi : array_like (optional)
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
max(len(a),len(b))-1. If zi=None or is not given then initial
rest is assumed. SEE signal.lfiltic for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array (optional)
If zi is None, this is not returned, otherwise, zf holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements
::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b,a,y,x=None):
"""
Construct initial conditions for lfilter
Given a linear filter (b,a) and initial conditions on the output y
and the input x, return the inital conditions on the state vector zi
which is used by lfilter to generate the output given the input.
If M=len(b)-1 and N=len(a)-1. Then, the initial conditions are given
in the vectors x and y as::
x = {x[-1],x[-2],...,x[-M]}
y = {y[-1],y[-2],...,y[-N]}
If x is not given, its inital conditions are assumed zero.
If either vector is too short, then zeros are added
to achieve the proper length.
The output vector zi contains::
zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]} where K=max(M,N).
"""
N = np.size(a)-1
M = np.size(b)-1
K = max(M,N)
y = asarray(y)
zi = zeros(K,y.dtype.char)
if x is None:
x = zeros(M,y.dtype.char)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x,zeros(M-L)]
L = np.size(y)
if L < N:
y = r_[y,zeros(N-L)]
for m in range(M):
zi[m] = sum(b[m+1:]*x[:M-m],axis=0)
for m in range(N):
zi[m] -= sum(a[m+1:]*y[:N-m],axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves divisor out of signal.
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = [];
rem = num;
else:
input = ones(N-D+1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal `x_a(t)` of `x(t)` is::
x_a = F^{-1}(F(x) 2U) = x + i y
where ``F`` is the Fourier transform, ``U`` the unit step function,
and ``y`` the Hilbert transform of ``x``. [1]_
`axis` argument is new in scipy 0.8.0.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if N is None:
N = x.shape[axis]
if N <=0:
raise ValueError("N must be positive.")
if iscomplexobj(x):
print "Warning: imaginary part of x ignored."
x = real(x)
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N/2] = 1
h[1:N/2] = 2
else:
h[0] = 1
h[1:(N+1)/2] = 2
if len(x.shape) > 1:
ind = [newaxis]*x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf*h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
x = asarray(x)
if N is None:
N = x.shape
if len(N) < 2:
if N <=0:
raise ValueError("N must be positive.")
N = (N,N)
if iscomplexobj(x):
print "Warning: imaginary part of x ignored."
x = real(x)
Xf = fft2(x,N,axes=(0,1))
h1 = zeros(N[0],'d')
h2 = zeros(N[1],'d')
for p in range(2):
h = eval("h%d"%(p+1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1/2] = 1
h[1:N1/2] = 2
else:
h[0] = 1
h[1:(N1+1)/2] = 2
exec("h%d = h" % (p+1), globals(), locals())
h = h1[:,newaxis] * h2[newaxis,:]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf*h,axes=(0,1))
return x
def cmplx_sort(p):
"sort roots based on magnitude."
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p,indx,0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = sp.signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max','maximum']:
comproot = np.maximum
elif rtype in ['min','minimum']:
comproot = np.minimum
elif rtype in ['avg','mean']:
comproot = np.mean
p = asarray(p)*1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5*tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr-curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(s) and a(s) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
See Also
--------
residue, poly, polyval, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]]*mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra,a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]]*mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]]*(mult[k]-m-1))
b = polyadd(b,r[indx]*poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b,a = map(asarray,(b,a))
rscale = a[0]
k,b = polydiv(b,a)
p = roots(a)
r = p*0.0
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]]*mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]]*mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig,0,-1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn,1),an)
term2 = polymul(bn,polyder(an,1))
bn = polysub(term1,term2)
an = polymul(an,an)
r[indx+m-1] = polyval(bn,pout[n]) / polyval(an,pout[n]) \
/ factorial(sig-m)
indx += sig
return r/rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(z) / a(z).
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, poly, polyval, unique_roots
"""
b,a = map(asarray,(b,a))
gain = a[0]
brev, arev = b[::-1],a[::-1]
krev,brev = polydiv(brev,arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p*0.0
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]]*mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]]*mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig,0,-1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn,1),an)
term2 = polymul(bn,polyder(an,1))
bn = polysub(term1,term2)
an = polymul(an,an)
r[indx+m-1] = polyval(bn,1.0/pout[n]) / polyval(an,1.0/pout[n]) \
/ factorial(sig-m) / (-pout[n])**(sig-m)
indx += sig
return r/gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(z) and a(z) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
residuez, poly, polyval, unique_roots
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]]*mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra,a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]]*mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]]*(mult[k]-m-1))
brev = polyadd(brev,(r[indx]*poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from dx to:
dx * len(x) / num
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
"""
x = asarray(x)
X = fft(x,axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window,Nx))
newshape = ones(len(x.shape))
newshape[axis] = len(W)
W.shape = newshape
X = X*W
sl = [slice(None)]*len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num,Nx))
Y = zeros(newshape,'D')
sl[axis] = slice(0,(N+1)/2)
Y[sl] = X[sl]
sl[axis] = slice(-(N-1)/2,None)
Y[sl] = X[sl]
y = ifft(Y,axis=axis)*(float(num)/float(Nx))
if x.dtype.char not in ['F','D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0,num)*(t[1]-t[0])* Nx / float(num) + t[0]
return y, new_t
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (sp.signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear','l','constant','c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant','c']:
ret = data - expand_dims(mean(data,axis),axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0,bp,N]))
if any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0: axis = axis + rnk
newdims = r_[axis,0:axis,axis+1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0)/N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m+1] - bp[m]
A = ones((Npts,2),dtype)
A[:,0] = cast[dtype](arange(1,Npts+1)*1.0/Npts)
sl = slice(bp[m],bp[m+1])
coef,resids,rank,s = linalg.lstsq(A,newdata[sl])
newdata[sl] = newdata[sl] - dot(A,coef)
# Put data back in original shape.
tdshape = take(dshape,newdims,0)
ret = reshape(newdata,tuple(tdshape))
vals = range(1,rnk)
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret,tuple(olddims))
return ret
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""downsample the signal x by an integer factor q, using an order n filter
By default an order 8 Chebyshev type I filter is used or a 30 point FIR
filter with hamming window if ftype is 'fir'.
Parameters
----------
x : N-d array
the signal to be downsampled
q : int
the downsampling factor
n : int or None
the order of the filter (1 less than the length for 'fir')
ftype : {'iir' or 'fir'}
the type of the lowpass filter
axis : int
the axis along which to decimate
Returns
-------
y : N-d array
the down-sampled signal
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n+1, 1./q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8/q)
y = lfilter(b, a, x, axis=axis)
sl = [None]*y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| [
2,
6434,
25,
19804,
6544,
13323,
415,
198,
2,
7358,
1377,
6244,
198,
198,
11748,
14601,
198,
198,
11748,
43237,
31391,
198,
6738,
629,
541,
88,
1330,
300,
1292,
70,
198,
6738,
629,
541,
88,
13,
487,
83,
8002,
1330,
277,
701,
11,
6... | 2.148289 | 18,147 |
from flask_restx import Api
from flask import Blueprint
from .main.controller.user_controller import api as user_ns
from .main.controller.auth_controller import api as auth_ns
from .main.controller.blog_controller import api as blog_ns
from .main.controller.comments_controller import api as comments_ns
from .main.controller.product_controller import api as products_ns
from .main.controller.mpesa_payment_controller import api as mpesa_ns
blueprint = Blueprint('api', __name__)
api = Api(blueprint,
title='FLASK RESTX API BOILER-PLATE WITH JWT',
version='1.0',
description='a boilerplate for flask restx web service'
)
api.add_namespace(user_ns, path='/user')
api.add_namespace(auth_ns)
api.add_namespace(blog_ns, path='/blogs')
api.add_namespace(comments_ns, path='/comments')
api.add_namespace(products_ns, path='/products')
api.add_namespace(mpesa_ns, path='/mpesa')
| [
6738,
42903,
62,
2118,
87,
1330,
5949,
72,
198,
6738,
42903,
1330,
39932,
198,
198,
6738,
764,
12417,
13,
36500,
13,
7220,
62,
36500,
1330,
40391,
355,
2836,
62,
5907,
198,
6738,
764,
12417,
13,
36500,
13,
18439,
62,
36500,
1330,
4039... | 2.904762 | 315 |
expected_output={'vpn': {'1002': {'prefix': ['10.111.2.0/24',
'10.154.2.0/24',
'10.51.2.0/24']}}} | [
40319,
62,
22915,
34758,
6,
85,
21999,
10354,
1391,
6,
3064,
17,
10354,
1391,
6,
40290,
10354,
37250,
940,
13,
16243,
13,
17,
13,
15,
14,
1731,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.422018 | 109 |
#HUGHXIE
#a2q1.py
import random
#get random number between 1 and 100
number = (random.randint(1,100))
#TEST
print(str(number))
#initial guess
guess = input("Enter guess: ")
while True:
#checks if input is valid
if guess.isdigit()==True and int(guess)<101:
if (int(guess) > number):
guess = input("Guess is too high! Guess again: ")
elif (int(guess) < number):
guess = input("Guess is too low! Guess again: ")
elif int(guess) == number:
print("You guessed the number! It was " + str(number))
break
else:
#invalid output
print("Invalid input.")
break
| [
2,
39,
44004,
55,
10008,
198,
2,
64,
17,
80,
16,
13,
9078,
198,
11748,
4738,
198,
2,
1136,
4738,
1271,
1022,
352,
290,
1802,
198,
17618,
796,
357,
25120,
13,
25192,
600,
7,
16,
11,
3064,
4008,
198,
2,
51,
6465,
198,
4798,
7,
2... | 2.261168 | 291 |
# -*- coding: utf-8 -*-
#
# Author: jimin.huang
#
# Created Time: 2015年02月07日 星期六 17时01分29秒
#
from scrapy import signals
from scrapy.mail import MailSender
from scrapy.exceptions import NotConfigured
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
198,
2,
6434,
25,
474,
320,
259,
13,
13415,
648,
198,
2,
220,
198,
2,
15622,
3862,
25,
1853,
33176,
112,
2999,
17312,
230,
2998,
33768,
98,
10545,
246,
253,
... | 2.228261 | 92 |
"""
This module contains general purpose functions that are used throughout PBjam.
"""
from . import PACKAGEDIR
import os
import numpy as np
def get_priorpath():
""" Get default prior path name
Returns
-------
prior_file : str
Default path to the prior in the package directory structure.
"""
return os.path.join(*[PACKAGEDIR, 'data', 'prior_data.csv'])
def get_percentiles(X, sigma = 2, **kwargs):
""" Get percentiles of an distribution
Compute the percentiles corresponding to sigma=1,2,3.. including the
median (50th), of an array.
Parameters
----------
X : numpy.array()
Array to find percentiles of
sigma : int, optional.
Sigma values to compute the percentiles of, e.g. 68% 95% are 1 and 2
sigma, etc. Default is 2.
kwargs : dict
Arguments to be passed to numpy.percentile
returns
-------
percentiles : numpy.array()
Numpy array of percentile values of X.
"""
percs = np.array([0.682689492137,
0.954499736104,
0.997300203937,
0.999936657516,
0.999999426697,
0.999999998027])*100/2
percs = np.append(0, percs)
percs = np.append(-percs[::-1][:-1],percs)
percs += 50
return np.percentile(X, percs[6-sigma : 6+sigma+1], **kwargs)
def to_log10(x, xerr):
""" Transform to value to log10
Takes a value and related uncertainty and converts them to logscale.
Approximate.
Parameters
----------
x : float
Value to transform to logscale
xerr : float
Value uncertainty
Returns
-------
logval : list
logscaled value and uncertainty
"""
if xerr > 0:
return [np.log10(x), xerr/x/np.log(10.0)]
return [x, xerr]
def normal(x, mu, sigma):
""" Evaluate logarithm of normal distribution (not normalized!!)
Evaluates the logarithm of a normal distribution at x.
Inputs
------
x : float
Values to evaluate the normal distribution at.
mu : float
Distribution mean.
sigma : float
Distribution standard deviation.
Returns
-------
y : float
Logarithm of the normal distribution at x
"""
if (sigma < 0):
return 0.0
return -0.5 * (x - mu)**2 / sigma**2 | [
37811,
198,
198,
1212,
8265,
4909,
2276,
4007,
5499,
326,
389,
973,
3690,
30524,
39159,
13,
198,
198,
37811,
198,
198,
6738,
764,
1330,
47035,
4760,
1961,
4663,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
651,... | 2.275862 | 1,073 |
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import rename_field
from frappe.modules import scrub, get_doctype_module
lab_test_name = ["test_name", "lab_test_name"]
lab_test_code = ["test_code", "lab_test_code"]
lab_test_comment = ["test_comment", "lab_test_comment"]
lab_test_created = ["test_created", "lab_test_created"]
lab_test_template = ["test_template", "lab_test_template"]
lab_test_rate = ["test_rate", "lab_test_rate"]
lab_test_description = ["test_description", "lab_test_description"]
lab_test_group = ["test_group", "lab_test_group"]
lab_test_template_type = ["test_template_type", "lab_test_template_type"]
lab_test_uom = ["test_uom", "lab_test_uom"]
lab_test_normal_range = ["test_normal_range", "lab_test_normal_range"]
lab_test_event = ["test_event", "lab_test_event"]
lab_test_particulars = ["test_particulars", "lab_test_particulars"]
field_rename_map = {
"Lab Test Template": [lab_test_name, lab_test_code, lab_test_rate, lab_test_description,
lab_test_group, lab_test_template_type, lab_test_uom, lab_test_normal_range],
"Normal Test Items": [lab_test_name, lab_test_comment, lab_test_uom, lab_test_event],
"Lab Test": [lab_test_name, lab_test_comment, lab_test_group],
"Lab Prescription": [lab_test_name, lab_test_code, lab_test_comment, lab_test_created],
"Lab Test Groups": [lab_test_template, lab_test_rate, lab_test_description],
"Lab Test UOM": [lab_test_uom],
"Normal Test Template": [lab_test_uom, lab_test_event],
"Special Test Items": [lab_test_particulars]
}
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
5306,
27768,
198,
6738,
5306,
27768,
13,
19849,
13,
26791,
13,
918,
480,
62,
3245,
1330,
36265,
62,
3245,
198,
6738,
5306,
27768,
13,
18170,
1330,
27268,
11,
651,
... | 2.759786 | 562 |
import FWCore.ParameterSet.Config as cms
lheZdecayFilter = cms.EDFilter("LHEDYdecayFilter",
src = cms.InputTag("source"),
leptonID =cms.int32(11),
verbose = cms.untracked.bool(True)
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
75,
258,
57,
12501,
323,
22417,
796,
269,
907,
13,
1961,
22417,
7203,
43,
39,
1961,
56,
12501,
323,
22417,
1600,
198,
220,
220,
220,
12351,
796,
269,
907,
13,... | 1.763514 | 148 |
# pyisotopomer: Nitrous oxide isotopocule data corrections in Python
# Copyright (C) 2021 Colette L Kelly et al. (MIT License)
from .pyisotopomer import Scrambling
from .pyisotopomer import Isotopomers
| [
2,
12972,
271,
313,
404,
12057,
25,
24282,
7596,
30788,
31624,
404,
420,
2261,
1366,
26251,
287,
11361,
198,
2,
15069,
357,
34,
8,
33448,
220,
1766,
21348,
406,
9077,
2123,
435,
13,
220,
357,
36393,
13789,
8,
198,
198,
6738,
764,
90... | 3.153846 | 65 |
import unittest
from os import path
import os
from iridauploader.tests_integration import tests_integration
import iridauploader.config as config
import iridauploader.api as api
import iridauploader.model as model
import iridauploader.progress as progress
from iridauploader.core.cli_entry import upload_run_single_entry, batch_upload_single_entry
path_to_module = path.dirname(__file__)
if len(path_to_module) == 0:
path_to_module = '.'
CLEANUP_DIRECTORY_LIST = [
path.join(path_to_module, "fake_dir_data"),
path.join(path_to_module, "fake_miniseq_data"),
path.join(path_to_module, "fake_nextseq_data"),
path.join(path_to_module, "fake_ngs_data"),
path.join(path_to_module, "fake_ngs_data_force"),
path.join(path_to_module, "fake_ngs_data_nonexistent_project"),
path.join(path_to_module, "fake_ngs_data_parse_fail"),
path.join(path_to_module, "fake_ngs_data_no_completed_file"),
path.join(path_to_module, "fake_batch_data", "run_1"),
path.join(path_to_module, "fake_batch_data", "run_2"),
path.join(path_to_module, "fake_batch_data", "run_3")
]
class TestEndToEnd(unittest.TestCase):
"""
This class is for End to End tests
It uses the single entry point for the CLI to make sure all parts of the program are working together correctly
"""
def tearDown(self):
"""
Return the sample config file to blank after tests
Deletes status file from data directories if they exist
:return:
"""
self.write_to_config_file("", "", "", "", "", "")
for directory_path in CLEANUP_DIRECTORY_LIST:
status_file_path = path.join(directory_path, 'irida_uploader_status.info')
if path.exists(status_file_path):
os.remove(status_file_path)
log_file_path = path.join(directory_path, 'irida-uploader.log')
if path.exists(log_file_path):
os.remove(log_file_path)
@staticmethod
def write_to_config_file(client_id, client_secret, username, password, base_url, parser):
"""
Write to out sample configuration file so that the IRIDA instance will be accessed
:param client_id:
:param client_secret:
:param username:
:param password:
:param base_url:
:param parser:
:return:
"""
config.set_config_options(client_id=client_id,
client_secret=client_secret,
username=username,
password=password,
base_url=base_url,
parser=parser)
config.write_config_options_to_file()
def test_valid_miseq_upload(self):
"""
Test a valid miseq directory for upload from end to end
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project"
project_description = "test_project_description"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# We always upload to project "1" so that tests will be consistent no matter how many / which tests are run
project_id = "1"
# Do the upload
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_ngs_data"))
# Make sure the upload was a success
self.assertEqual(upload_result.exit_code, 0)
# Verify the files were uploaded
sample_list = test_api.get_samples(project_id)
sample_1_found = False
sample_2_found = False
sample_3_found = False
for sample in sample_list:
if sample.sample_name in ["01-1111", "02-2222", "03-3333"]:
if sample.sample_name == "01-1111":
sample_1_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'01-1111_S1_L001_R1_001.fastq.gz',
'01-1111_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "02-2222":
sample_2_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'02-2222_S1_L001_R1_001.fastq.gz',
'02-2222_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "03-3333":
sample_3_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'03-3333_S1_L001_R1_001.fastq.gz',
'03-3333_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
self.assertEqual(sample_1_found, True)
self.assertEqual(sample_2_found, True)
self.assertEqual(sample_3_found, True)
def test_batch_miseq_upload(self):
"""
Test a valid miseq directory for upload from end to end
We have 3 run directories
run_1 has batch01-1111
run_2 is invalid
run_3 has batch03-3333
we expect to see batch01-1111 and batch03-3333 uploaded
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_batch_project"
project_description = "test_batch_project_description"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# We always upload to project "1" so that tests will be consistent no matter how many / which tests are run
project_id = "1"
# Do the upload
upload_result = batch_upload_single_entry(path.join(path_to_module, "fake_batch_data"))
# Make sure the upload was a success
self.assertEqual(upload_result.exit_code, 0)
# Verify the files were uploaded
sample_list = test_api.get_samples(project_id)
sample_1_found = False
sample_2_not_found = True
sample_3_found = False
for sample in sample_list:
if sample.sample_name in ["batch01-1111", "batch02-2222", "batch03-3333"]:
if sample.sample_name == "batch01-1111":
sample_1_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'batch01-1111_S1_L001_R1_001.fastq.gz',
'batch01-1111_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "batch02-2222":
# this one should not be found
sample_2_not_found = False
elif sample.sample_name == "batch03-3333":
sample_3_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'batch03-3333_S1_L001_R1_001.fastq.gz',
'batch03-3333_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
self.assertEqual(sample_1_found, True)
self.assertEqual(sample_2_not_found, True)
self.assertEqual(sample_3_found, True)
def test_valid_directory_upload(self):
"""
Test a valid directory for upload end to end
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="directory"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project_2"
project_description = "test_project_description_2"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# We always upload to project "1" so that tests will be consistent no matter how many / which tests are run
project_id = "1"
# Do the upload
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_dir_data"))
# Make sure the upload was a success
self.assertEqual(upload_result.exit_code, 0)
# Verify the files were uploaded
sample_list = test_api.get_samples(project_id)
sample_1_found = False
sample_2_found = False
sample_3_found = False
for sample in sample_list:
if sample.sample_name in ["my-sample-1", "my-sample-2", "my-sample-3"]:
if sample.sample_name == "my-sample-1":
sample_1_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
self.assertEqual(sequence_files[0]['fileName'], 'file_1.fastq.gz')
self.assertEqual(sequence_files[1]['fileName'], 'file_2.fastq.gz')
elif sample.sample_name == "my-sample-2":
sample_2_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
self.assertEqual(sequence_files[0]['fileName'], 'samp_F.fastq.gz')
self.assertEqual(sequence_files[1]['fileName'], 'samp_R.fastq.gz')
elif sample.sample_name == "my-sample-3":
sample_3_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
self.assertEqual(sequence_files[0]['fileName'], 'germ_f.fastq.gz')
self.assertEqual(sequence_files[1]['fileName'], 'germ_r.fastq.gz')
self.assertEqual(sample_1_found, True)
self.assertEqual(sample_2_found, True)
self.assertEqual(sample_3_found, True)
def test_valid_miniseq_upload(self):
"""
Test a valid directory for upload end to end
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miniseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project_4"
project_description = "test_project_description_4"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# We always upload to project "1" so that tests will be consistent no matter how many / which tests are run
project_id = "1"
# Do the upload
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_miniseq_data"))
# Make sure the upload was a success
self.assertEqual(upload_result.exit_code, 0)
# Verify the files were uploaded
sample_list = test_api.get_samples(project_id)
sample_1_found = False
sample_2_found = False
sample_3_found = False
for sample in sample_list:
if sample.sample_name in ["01-1111m", "02-2222m", "03-3333m"]:
if sample.sample_name == "01-1111m":
sample_1_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'01-1111m_S1_L001_R1_001.fastq.gz',
'01-1111m_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "02-2222m":
sample_2_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'02-2222m_S1_L001_R1_001.fastq.gz',
'02-2222m_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "03-3333m":
sample_3_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'03-3333m_S1_L001_R1_001.fastq.gz',
'03-3333m_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
self.assertEqual(sample_1_found, True)
self.assertEqual(sample_2_found, True)
self.assertEqual(sample_3_found, True)
def test_valid_nextseq_upload(self):
"""
Test a valid nextseq directory for upload from end to end
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="nextseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project_nextseq"
project_description = "test_project_description_nextseq"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# We always upload to project "1" so that tests will be consistent no matter how many / which tests are run
project_id_1 = "1"
# we are uploading 2 projects, so create another one
project_name_2 = "test_project_nextseq_2"
project_description_2 = "test_project_description_nextseq_2"
project_2 = model.Project(name=project_name_2, description=project_description_2)
test_api.send_project(project_2)
project_id_2 = "2"
# Do the upload
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_nextseq_data"))
# Make sure the upload was a success
self.assertEqual(upload_result.exit_code, 0)
# Verify the files were uploaded
sample_list_1 = test_api.get_samples(project_id_1)
sample_list_2 = test_api.get_samples(project_id_2)
sample_1_found = False
sample_2_found = False
for sample in sample_list_1:
if sample.sample_name == "SA20121712":
sample_1_found = True
sequence_files = test_api.get_sequence_files(project_id_1, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'SA20121712_S2_R1_001.fastq.qz',
'SA20121712_S2_R2_001.fastq.qz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
for sample in sample_list_2:
if sample.sample_name == "SA20121716":
sample_2_found = True
sequence_files = test_api.get_sequence_files(project_id_2, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'SA20121716_S1_R1_001.fastq.qz',
'SA20121716_S1_R2_001.fastq.qz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
self.assertEqual(sample_1_found, True)
self.assertEqual(sample_2_found, True)
def test_upload_to_nonexistent_project(self):
"""
Everything is correct except the sample sheet file specifies an invalid project
Samples should not be uploaded
:return:
"""
# try to upload to a non existent project
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project"
project_description = "test_project_description"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# Do the upload
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_ngs_data_nonexistent_project"))
# Make sure the upload was a failure
self.assertEqual(upload_result.exit_code, 1)
# Verify that the project does not exist
project_id = "1000"
with self.assertRaises(api.exceptions.IridaKeyError):
test_api.get_samples(project_id)
def test_upload_parse_fail(self):
"""
Given an invalid sample sheet, make sure that the upload does not happen
:return:
"""
# try to upload to a non existent project
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project"
project_description = "test_project_description"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# Do the upload
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_ngs_data_parse_fail"))
# Make sure the upload was a failure
self.assertEqual(upload_result.exit_code, 1)
def test_valid_miseq_with_status_file_force(self):
"""
Test a valid miseq directory for upload from end to end
We create a status file that indicates the files have already been uploaded,
and then use the force option to upload anyways
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# instance an api
test_api = api.ApiCalls(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
base_url=tests_integration.base_url,
username=tests_integration.username,
password=tests_integration.password
)
# Create a test project, the uploader does not make new projects on its own
# so one must exist to upload samples into
# This may not be the project that the files get uploaded to,
# but one will be made in the case this is the only test being run
project_name = "test_project_3"
project_description = "test_project_description"
project = model.Project(name=project_name, description=project_description)
test_api.send_project(project)
# We always upload to project "1" so that tests will be consistent no matter how many / which tests are run
project_id = "1"
# Write a status file to the upload directory that we can force past
directory_status = model.DirectoryStatus(directory=path.join(path_to_module, "fake_ngs_data_force"))
directory_status.status = model.DirectoryStatus.COMPLETE
progress.write_directory_status(directory_status)
# Do the upload, with force option
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_ngs_data_force"), True)
# Make sure the upload was a success
self.assertEqual(upload_result.exit_code, 0)
# Verify the files were uploaded
sample_list = test_api.get_samples(project_id)
sample_1_found = False
sample_2_found = False
sample_3_found = False
for sample in sample_list:
if sample.sample_name in ["01-1111", "02-2222", "03-3333"]:
if sample.sample_name == "01-1111":
sample_1_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'01-1111_S1_L001_R1_001.fastq.gz',
'01-1111_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "02-2222":
sample_2_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'02-2222_S1_L001_R1_001.fastq.gz',
'02-2222_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
elif sample.sample_name == "03-3333":
sample_3_found = True
sequence_files = test_api.get_sequence_files(project_id, sample.sample_name)
self.assertEqual(len(sequence_files), 2)
res_sequence_file_names = [
sequence_files[0]['fileName'],
sequence_files[1]['fileName']
]
expected_sequence_file_names = [
'03-3333_S1_L001_R1_001.fastq.gz',
'03-3333_S1_L001_R2_001.fastq.gz'
]
self.assertEqual(res_sequence_file_names.sort(), expected_sequence_file_names.sort())
self.assertEqual(sample_1_found, True)
self.assertEqual(sample_2_found, True)
self.assertEqual(sample_3_found, True)
def test_valid_miseq_with_status_file_already_uploaded(self):
"""
Test a valid miseq directory for upload from end to end
We create a status file that indicates the files have already been uploaded,
Then make sure it does not upload
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# Write a status file to the upload directory
directory_status = model.DirectoryStatus(directory=path.join(path_to_module, "fake_ngs_data"))
directory_status.status = model.DirectoryStatus.COMPLETE
progress.write_directory_status(directory_status)
# Do the upload, without force option
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_ngs_data"), False)
# Make sure the upload was a failure
self.assertEqual(upload_result.exit_code, 1)
def test_invalid_miseq_no_completed_file(self):
"""
Test a valid miseq directory for upload from end to end
We create a status file that indicates the files have already been uploaded,
Then make sure it does not upload
:return:
"""
# Set our sample config file to use miseq parser and the correct irida credentials
self.write_to_config_file(
client_id=tests_integration.client_id,
client_secret=tests_integration.client_secret,
username=tests_integration.username,
password=tests_integration.password,
base_url=tests_integration.base_url,
parser="miseq"
)
# Do the upload, without force option
upload_result = upload_run_single_entry(path.join(path_to_module, "fake_ngs_data_no_completed_file"), False)
# Make sure the upload was a failure
self.assertEqual(upload_result.exit_code, 1)
| [
11748,
555,
715,
395,
198,
6738,
28686,
1330,
3108,
198,
11748,
28686,
198,
198,
6738,
4173,
312,
559,
7304,
263,
13,
41989,
62,
18908,
1358,
1330,
5254,
62,
18908,
1358,
198,
198,
11748,
4173,
312,
559,
7304,
263,
13,
11250,
355,
456... | 2.119478 | 15,869 |
from __future__ import absolute_import
import mock
from sentry.interfaces.geo import Geo
from sentry.testutils import TestCase
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
15290,
198,
198,
6738,
1908,
563,
13,
3849,
32186,
13,
469,
78,
1330,
32960,
198,
6738,
1908,
563,
13,
9288,
26791,
1330,
6208,
20448,
628
] | 3.611111 | 36 |
import threading
import time
"""
Associo Threads a funções
Nomeio Threads
Pego o nome de threads nomeadas
"""
# my modification: thread id's
if __name__ == "__main__":
t1 = threading.Thread(name='first_function', target=first_function)
t2 = threading.Thread(name='second_function', target=second_function)
t3 = threading.Thread(name='third_function', target=third_function)
t1.start()
t2.start()
t3.start()
# join([timeout]) Wait until the thread terminates
# https://stackoverflow.com/questions/15085348/what-is-the-use-of-join-in-python-threading
t1.join()
print(t1.is_alive())
print('t1 joined')# my modifications -> this line is executed when t1 terminates
# print('press to continue and execute t2: ', input()) # but in fact, the t2 had already been started and proplably had already been executed. The same happens with the other threads
t2.join()
print(t2.is_alive())
print('t2 joined')# my modifications -> this line is executed when t2
# print('press to continue: ', input())
# print('press to continue and execute t3: ', input())
t3.join()
print(t3.is_alive())
print('t3 joined')# my modifications -> this line is executed when t2
# print('press to finish the main thread: ', input())
"""
output:
thread id: 140204642060032 first_function is Starting
thread id: 140204633667328 second_function is Starting
thread id: 140204625274624 third_function is Starting
thread id: 140204642060032 first_function is Exiting
thread id: 140204625274624 third_function is Exiting
thread id: 140204633667328 second_function is Exiting
"""
| [
11748,
4704,
278,
201,
198,
11748,
640,
201,
198,
201,
198,
37811,
201,
198,
8021,
420,
952,
14122,
82,
257,
1257,
16175,
127,
113,
274,
201,
198,
45,
462,
952,
14122,
82,
201,
198,
47,
1533,
78,
267,
299,
462,
390,
14390,
299,
46... | 2.692429 | 634 |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 8 21:02:21 2021
@author: Jerry
"""
import matplotlib.pyplot as plt
#
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
2447,
220,
807,
2310,
25,
2999,
25,
2481,
33448,
198,
198,
31,
9800,
25,
13075,
198,
37811,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,... | 2.333333 | 51 |
import numpy as np
import matplotlib.pyplot as plt
data = [2, 1, 3, 0.13, 1, 20]
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
7890,
796,
685,
17,
11,
352,
11,
513,
11,
657,
13,
1485,
11,
352,
11,
1160,
60,
628
] | 2.297297 | 37 |
from .... pyaz_utils import _call_az
def download(destination, node_id, pool_id, account_endpoint=None, account_key=None, account_name=None):
'''
Required Parameters:
- destination -- The path to the destination file or directory.
- node_id -- The ID of the Compute Node for which you want to get the Remote Desktop Protocol file.
- pool_id -- The ID of the Pool that contains the Compute Node.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
'''
return _call_az("az batch node remote-desktop download", locals())
| [
6738,
19424,
12972,
1031,
62,
26791,
1330,
4808,
13345,
62,
1031,
198,
198,
4299,
4321,
7,
16520,
1883,
11,
10139,
62,
312,
11,
5933,
62,
312,
11,
1848,
62,
437,
4122,
28,
14202,
11,
1848,
62,
2539,
28,
14202,
11,
1848,
62,
3672,
... | 3.356863 | 255 |
################################################################################
#
# Package : AlphaPy
# Module : features
# Created : July 11, 2013
#
# Copyright 2020 ScottFree Analytics LLC
# Mark Conway & Robert D. Scott II
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Imports
#
from alphapy.globals import BSEP, LOFF, NULLTEXT
from alphapy.globals import PSEP, SSEP, USEP
from alphapy.globals import Encoders
from alphapy.globals import ModelType
from alphapy.globals import Scalers
from alphapy.variables import Variable
from alphapy.variables import vparse
import category_encoders as ce
from importlib import import_module
import itertools
import logging
import math
import numpy as np
import os
import pandas as pd
import re
from scipy import sparse
import scipy.stats as sps
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import SelectFdr
from sklearn.feature_selection import SelectFpr
from sklearn.feature_selection import SelectFwe
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import VarianceThreshold
from sklearn.impute import SimpleImputer
from sklearn.manifold import Isomap
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
import sys
#
# Initialize logger
#
logger = logging.getLogger(__name__)
#
# Define feature scoring functions
#
feature_scorers = {'f_classif' : f_classif,
'chi2' : chi2,
'f_regression' : f_regression,
'SelectKBest' : SelectKBest,
'SelectFpr' : SelectFpr,
'SelectFdr' : SelectFdr,
'SelectFwe' : SelectFwe}
#
# Define Encoder map
#
encoder_map = {Encoders.backdiff : ce.BackwardDifferenceEncoder,
Encoders.basen : ce.BaseNEncoder,
Encoders.binary : ce.BinaryEncoder,
Encoders.catboost : ce.CatBoostEncoder,
Encoders.hashing : ce.HashingEncoder,
Encoders.helmert : ce.HelmertEncoder,
Encoders.jstein : ce.JamesSteinEncoder,
Encoders.leaveone : ce.LeaveOneOutEncoder,
Encoders.mestimate : ce.MEstimateEncoder,
Encoders.onehot : ce.OneHotEncoder,
Encoders.ordinal : ce.OrdinalEncoder,
Encoders.polynomial : ce.PolynomialEncoder,
Encoders.sum : ce.SumEncoder,
Encoders.target : ce.TargetEncoder,
Encoders.woe : ce.WOEEncoder}
#
# Function apply_transform
#
def apply_transform(fname, df, fparams):
r"""Apply a transform function to a column of the dataframe.
Parameters
----------
fname : str
Name of the column to be treated in the dataframe ``df``.
df : pandas.DataFrame
Dataframe containing the column ``fname``.
fparams : list
The module, function, and parameter list of the transform
function
Returns
-------
new_features : pandas.DataFrame
The set of features after applying a transform function.
"""
# Extract the transform parameter list
module = fparams[0]
func_name = fparams[1]
plist = fparams[2:]
# Append to system path
sys.path.append(os.getcwd())
# Import the external transform function
ext_module = import_module(module)
func = getattr(ext_module, func_name)
# Prepend the parameter list with the data frame and feature name
plist.insert(0, fname)
plist.insert(0, df)
# Apply the transform
logger.info("Applying function %s from module %s to feature %s",
func_name, module, fname)
return func(*plist)
#
# Function apply_transforms
#
def apply_transforms(model, X):
r"""Apply special functions to the original features.
Parameters
----------
model : alphapy.Model
Model specifications indicating any transforms.
X : pandas.DataFrame
Combined train and test data, or just prediction data.
Returns
-------
all_features : pandas.DataFrame
All features, including transforms.
Raises
------
IndexError
The number of transform rows must match the number of
rows in ``X``.
"""
# Extract model parameters
transforms = model.specs['transforms']
# Log input parameters
logger.info("Original Features : %s", X.columns)
logger.info("Feature Count : %d", X.shape[1])
# Iterate through columns, dispatching and transforming each feature.
logger.info("Applying transforms")
all_features = X
if transforms:
for fname in transforms:
# find feature series
fcols = []
for col in X.columns:
if col.split(LOFF)[0] == fname:
fcols.append(col)
# get lag values
lag_values = []
for item in fcols:
_, _, _, lag = vparse(item)
lag_values.append(lag)
# apply transform to the most recent value
if lag_values:
f_latest = fcols[lag_values.index(min(lag_values))]
features = apply_transform(f_latest, X, transforms[fname])
if features is not None:
if features.shape[0] == X.shape[0]:
all_features = pd.concat([all_features, features], axis=1)
else:
raise IndexError("The number of transform rows [%d] must match X [%d]" %
(features.shape[0], X.shape[0]))
else:
logger.info("Could not apply transform for feature %s", fname)
else:
logger.info("Feature %s is missing for transform", fname)
else:
logger.info("No transforms Specified")
logger.info("New Feature Count : %d", all_features.shape[1])
# Return all transformed training and test features
return all_features
#
# Function impute_values
#
def impute_values(feature, dt, sentinel):
r"""Impute values for a given data type. The *median* strategy
is applied for floating point values, and the *most frequent*
strategy is applied for integer or Boolean values.
Parameters
----------
feature : pandas.Series or numpy.array
The feature for imputation.
dt : str
The values ``'float64'``, ``'int64'``, or ``'bool'``.
sentinel : float
The number to be imputed for NaN values.
Returns
-------
imputed : numpy.array
The feature after imputation.
Raises
------
TypeError
Data type ``dt`` is invalid for imputation.
References
----------
You can find more information on feature imputation here [IMP]_.
.. [IMP] http://scikit-learn.org/stable/modules/preprocessing.html#imputation
"""
try:
# for pandas series
feature = feature.values.reshape(-1, 1)
except:
# for numpy array
feature = feature.reshape(-1, 1)
if dt == 'float64':
logger.info(" Imputation for Data Type %s: Median Strategy" % dt)
# replace infinity with imputed value
feature[np.isinf(feature)] = np.nan
imp = SimpleImputer(missing_values=np.nan, strategy='median')
elif dt == 'int64':
logger.info(" Imputation for Data Type %s: Most Frequent Strategy" % dt)
imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
elif dt != 'bool':
logger.info(" Imputation for Data Type %s: Fill Strategy with %d" % (dt, sentinel))
imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=sentinel)
else:
logger.info(" No Imputation for Data Type %s" % dt)
imp = None
imputed = imp.fit_transform(feature) if imp else feature
return imputed
#
# Function get_numerical_features
#
def get_numerical_features(fnum, fname, df, nvalues, dt,
sentinel, logt, plevel):
r"""Transform numerical features with imputation and possibly
log-transformation.
Parameters
----------
fnum : int
Feature number, strictly for logging purposes
fname : str
Name of the numerical column in the dataframe ``df``.
df : pandas.DataFrame
Dataframe containing the column ``fname``.
nvalues : int
The number of unique values.
dt : str
The values ``'float64'``, ``'int64'``, or ``'bool'``.
sentinel : float
The number to be imputed for NaN values.
logt : bool
If ``True``, then log-transform numerical values.
plevel : float
The p-value threshold to test if a feature is normally distributed.
Returns
-------
new_values : numpy array
The set of imputed and transformed features.
new_fnames : list
The new feature name(s) for the numerical variable.
"""
feature = df[fname]
if len(feature) == nvalues:
logger.info("Feature %d: %s is a numerical feature of type %s with maximum number of values %d",
fnum, fname, dt, nvalues)
else:
logger.info("Feature %d: %s is a numerical feature of type %s with %d unique values",
fnum, fname, dt, nvalues)
# imputer for float, integer, or boolean data types
new_values = impute_values(feature, dt, sentinel)
# log-transform any values that do not fit a normal distribution
new_fname = fname
if logt and np.all(new_values > 0):
_, pvalue = sps.normaltest(new_values)
if pvalue <= plevel:
logger.info("Feature %d: %s is not normally distributed [p-value: %f]",
fnum, fname, pvalue)
new_values = np.log(new_values)
else:
new_fname = USEP.join([new_fname, 'log'])
return new_values, [new_fname]
#
# Function get_polynomials
#
def get_polynomials(features, poly_degree):
r"""Generate interactions that are products of distinct features.
Parameters
----------
features : pandas.DataFrame
Dataframe containing the features for generating interactions.
poly_degree : int
The degree of the polynomial features.
Returns
-------
poly_features : numpy array
The interaction features only.
poly_fnames : list
List of polynomial feature names.
References
----------
You can find more information on polynomial interactions here [POLY]_.
.. [POLY] http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
"""
polyf = PolynomialFeatures(interaction_only=True,
degree=poly_degree,
include_bias=False)
poly_features = polyf.fit_transform(features)
poly_fnames = polyf.get_feature_names()
return poly_features, poly_fnames
#
# Function get_text_features
#
def get_text_features(fnum, fname, df, nvalues, vectorize, ngrams_max):
r"""Transform text features with count vectorization and TF-IDF,
or alternatively factorization.
Parameters
----------
fnum : int
Feature number, strictly for logging purposes
fname : str
Name of the text column in the dataframe ``df``.
df : pandas.DataFrame
Dataframe containing the column ``fname``.
nvalues : int
The number of unique values.
vectorize : bool
If ``True``, then attempt count vectorization.
ngrams_max : int
The maximum number of n-grams for count vectorization.
Returns
-------
new_features : numpy array
The vectorized or factorized text features.
new_fnames : list
The new feature name(s) for the numerical variable.
References
----------
To use count vectorization and TF-IDF, you can find more
information here [TFE]_.
"""
feature = df[fname]
min_length = int(feature.astype(str).str.len().min())
max_length = int(feature.astype(str).str.len().max())
if len(feature) == nvalues:
logger.info("Feature %d: %s is a text feature [%d:%d] with maximum number of values %d",
fnum, fname, min_length, max_length, nvalues)
else:
logger.info("Feature %d: %s is a text feature [%d:%d] with %d unique values",
fnum, fname, min_length, max_length, nvalues)
# need a null text placeholder for vectorization
feature.fillna(value=NULLTEXT, inplace=True)
# vectorization creates many columns, otherwise just factorize
if vectorize:
logger.info("Feature %d: %s => Attempting Vectorization", fnum, fname)
vectorizer = TfidfVectorizer(ngram_range=[1, ngrams_max])
try:
new_features = vectorizer.fit_transform(feature)
new_fnames = vectorizer.get_feature_names()
logger.info("Feature %d: %s => Vectorization Succeeded", fnum, fname)
except:
logger.info("Feature %d: %s => Vectorization Failed", fnum, fname)
new_features, _ = pd.factorize(feature)
new_fnames = [USEP.join([fname, 'factor'])]
else:
logger.info("Feature %d: %s => Factorization", fnum, fname)
new_features, _ = pd.factorize(feature)
new_fnames = [USEP.join([fname, 'factor'])]
return new_features, new_fnames
#
# Function float_factor
#
def float_factor(x, rounding):
r"""Convert a floating point number to a factor.
Parameters
----------
x : float
The value to convert to a factor.
rounding : int
The number of places to round.
Returns
-------
ffactor : int
The resulting factor.
"""
num2str = '{0:.{1}f}'.format
fstr = re.sub("[^0-9]", "", num2str(x, rounding))
ffactor = int(fstr) if len(fstr) > 0 else 0
return ffactor
#
# Function create_crosstabs
#
def create_crosstabs(model):
r"""Create cross-tabulations for categorical variables.
Parameters
----------
model : alphapy.Model
The model object containing the data.
Returns
-------
model : alphapy.Model
The model object with the updated feature map.
"""
logger.info("Creating Cross-Tabulations")
# Extract model data
X = model.X_train
y = model.y_train
# Extract model parameters
factors = model.specs['factors']
# Iterate through columns, dispatching and transforming each feature.
crosstabs = {}
for fname in X:
if fname in factors:
logger.info("Creating crosstabs for feature %s", fname)
ct = pd.crosstab(X[fname], y).apply(lambda r : r / r.sum(), axis=1)
crosstabs[fname] = ct
# Save crosstabs to the feature map
model.feature_map['crosstabs'] = crosstabs
return model
#
# Function get_factors
#
def get_factors(model, X_train, X_test, y_train, fnum, fname,
nvalues, dtype, encoder, rounding, sentinel):
r"""Convert the original feature to a factor.
Parameters
----------
model : alphapy.Model
Model object with the feature specifications.
X_train : pandas.DataFrame
Training dataframe containing the column ``fname``.
X_test : pandas.DataFrame
Testing dataframe containing the column ``fname``.
y_train : pandas.Series
Training series for target variable.
fnum : int
Feature number, strictly for logging purposes
fname : str
Name of the text column in the dataframe ``df``.
nvalues : int
The number of unique values.
dtype : str
The values ``'float64'``, ``'int64'``, or ``'bool'``.
encoder : alphapy.features.Encoders
Type of encoder to apply.
rounding : int
Number of places to round.
sentinel : float
The number to be imputed for NaN values.
Returns
-------
all_features : numpy array
The features that have been transformed to factors.
all_fnames : list
The feature names for the encodings.
"""
logger.info("Feature %d: %s is a factor of type %s with %d unique values",
fnum, fname, dtype, nvalues)
logger.info("Encoding: %s", encoder)
# get feature
feature_train = X_train[fname]
feature_test = X_test[fname]
# convert float to factor
if dtype == 'float64':
logger.info("Rounding: %d", rounding)
feature_train = feature_train.apply(float_factor, args=[rounding])
feature_test = feature_test.apply(float_factor, args=[rounding])
# create data frames for the feature
df_train = pd.DataFrame(feature_train)
df_test = pd.DataFrame(feature_test)
# encoders
enc = None
try:
enc = encoder_map[encoder](cols=[fname])
except:
raise ValueError("Unknown Encoder %s" % encoder)
# Transform the train and test features.
if enc is not None:
# fit training features
logger.info("Fitting training features for %s", fname)
ftrain = enc.fit_transform(df_train, y_train)
# fit testing features
logger.info("Transforming testing features for %s", fname)
ftest = enc.transform(df_test)
# get feature names
all_fnames = enc.get_feature_names()
# concatenate all generated features
all_features = np.row_stack((ftrain, ftest))
else:
all_features = None
all_fnames = None
logger.info("Encoding for feature %s failed" % fname)
return all_features, all_fnames
#
# Function create_numpy_features
#
def create_numpy_features(base_features, sentinel):
r"""Calculate the sum, mean, standard deviation, and variance
of each row.
Parameters
----------
base_features : numpy array
The feature dataframe.
sentinel : float
The number to be imputed for NaN values.
Returns
-------
np_features : numpy array
The calculated NumPy features.
np_fnames : list
The NumPy feature names.
"""
logger.info("Creating NumPy Features")
# Calculate the total, mean, standard deviation, and variance.
np_funcs = {'sum' : np.sum,
'mean' : np.mean,
'std' : np.std,
'var' : np.var}
features = []
for k in np_funcs:
logger.info("NumPy Feature: %s", k)
feature = np_funcs[k](base_features, axis=1)
feature = impute_values(feature, 'float64', sentinel)
features.append(feature)
# Stack and scale the new features.
np_features = np.column_stack(features)
np_features = StandardScaler().fit_transform(np_features)
# Return new NumPy features
logger.info("NumPy Feature Count : %d", np_features.shape[1])
return np_features, np_funcs.keys()
#
# Function create_scipy_features
#
def create_scipy_features(base_features, sentinel):
r"""Calculate the skew, kurtosis, and other statistical features
for each row.
Parameters
----------
base_features : numpy array
The feature dataframe.
sentinel : float
The number to be imputed for NaN values.
Returns
-------
sp_features : numpy array
The calculated SciPy features.
sp_fnames : list
The SciPy feature names.
"""
logger.info("Creating SciPy Features")
# Generate scipy features
logger.info("SciPy Feature: geometric mean")
row_gmean = sps.gmean(base_features, axis=1)
logger.info("SciPy Feature: kurtosis")
row_kurtosis = sps.kurtosis(base_features, axis=1)
logger.info("SciPy Feature: kurtosis test")
row_ktest, pvalue = sps.kurtosistest(base_features, axis=1)
logger.info("SciPy Feature: normal test")
row_normal, pvalue = sps.normaltest(base_features, axis=1)
logger.info("SciPy Feature: skew")
row_skew = sps.skew(base_features, axis=1)
logger.info("SciPy Feature: skew test")
row_stest, pvalue = sps.skewtest(base_features, axis=1)
logger.info("SciPy Feature: variation")
row_var = sps.variation(base_features, axis=1)
logger.info("SciPy Feature: signal-to-noise ratio")
row_stn = sps.signaltonoise(base_features, axis=1)
logger.info("SciPy Feature: standard error of mean")
row_sem = sps.sem(base_features, axis=1)
sp_features = np.column_stack((row_gmean, row_kurtosis, row_ktest,
row_normal, row_skew, row_stest,
row_var, row_stn, row_sem))
sp_features = impute_values(sp_features, 'float64', sentinel)
sp_features = StandardScaler().fit_transform(sp_features)
# Return new SciPy features
logger.info("SciPy Feature Count : %d", sp_features.shape[1])
sp_fnames = ['sp_geometric_mean',
'sp_kurtosis',
'sp_kurtosis_test',
'sp_normal_test',
'sp_skew',
'sp_skew_test',
'sp_variation',
'sp_signal_to_noise',
'sp_standard_error_of_mean']
return sp_features, sp_fnames
#
# Function create_clusters
#
def create_clusters(features, model):
r"""Cluster the given features.
Parameters
----------
features : numpy array
The features to cluster.
model : alphapy.Model
The model object with the clustering parameters.
Returns
-------
cfeatures : numpy array
The calculated clusters.
cnames : list
The cluster feature names.
References
----------
You can find more information on clustering here [CLUS]_.
.. [CLUS] http://scikit-learn.org/stable/modules/clustering.html
"""
logger.info("Creating Clustering Features")
# Extract model parameters
cluster_inc = model.specs['cluster_inc']
cluster_max = model.specs['cluster_max']
cluster_min = model.specs['cluster_min']
seed = model.specs['seed']
# Log model parameters
logger.info("Cluster Minimum : %d", cluster_min)
logger.info("Cluster Maximum : %d", cluster_max)
logger.info("Cluster Increment : %d", cluster_inc)
# Generate clustering features
cfeatures = np.zeros((features.shape[0], 1))
cnames = []
for i in range(cluster_min, cluster_max+1, cluster_inc):
logger.info("k = %d", i)
km = MiniBatchKMeans(n_clusters=i, random_state=seed)
km.fit(features)
labels = km.predict(features)
labels = labels.reshape(-1, 1)
cfeatures = np.column_stack((cfeatures, labels))
cnames.append(USEP.join(['cluster', str(i)]))
cfeatures = np.delete(cfeatures, 0, axis=1)
# Return new clustering features
logger.info("Clustering Feature Count : %d", cfeatures.shape[1])
return cfeatures, cnames
#
# Function create_pca_features
#
def create_pca_features(features, model):
r"""Apply Principal Component Analysis (PCA) to the features.
Parameters
----------
features : numpy array
The input features.
model : alphapy.Model
The model object with the PCA parameters.
Returns
-------
pfeatures : numpy array
The PCA features.
pnames : list
The PCA feature names.
References
----------
You can find more information on Principal Component Analysis here [PCA]_.
.. [PCA] http://scikit-learn.org/stable/modules/decomposition.html#pca
"""
logger.info("Creating PCA Features")
# Extract model parameters
pca_inc = model.specs['pca_inc']
pca_max = model.specs['pca_max']
pca_min = model.specs['pca_min']
pca_whiten = model.specs['pca_whiten']
# Log model parameters
logger.info("PCA Minimum : %d", pca_min)
logger.info("PCA Maximum : %d", pca_max)
logger.info("PCA Increment : %d", pca_inc)
logger.info("PCA Whitening : %r", pca_whiten)
# Generate clustering features
pfeatures = np.zeros((features.shape[0], 1))
pnames = []
for i in range(pca_min, pca_max+1, pca_inc):
logger.info("n_components = %d", i)
X_pca = PCA(n_components=i, whiten=pca_whiten).fit_transform(features)
pfeatures = np.column_stack((pfeatures, X_pca))
pnames.append(USEP.join(['pca', str(i)]))
pfeatures = np.delete(pfeatures, 0, axis=1)
# Return new clustering features
logger.info("PCA Feature Count : %d", pfeatures.shape[1])
return pfeatures, pnames
#
# Function create_isomap_features
#
def create_isomap_features(features, model):
r"""Create Isomap features.
Parameters
----------
features : numpy array
The input features.
model : alphapy.Model
The model object with the Isomap parameters.
Returns
-------
ifeatures : numpy array
The Isomap features.
inames : list
The Isomap feature names.
Notes
-----
Isomaps are very memory-intensive. Your process will be killed
if you run out of memory.
References
----------
You can find more information on Principal Component Analysis here [ISO]_.
.. [ISO] http://scikit-learn.org/stable/modules/manifold.html#isomap
"""
logger.info("Creating Isomap Features")
# Extract model parameters
iso_components = model.specs['iso_components']
iso_neighbors = model.specs['iso_neighbors']
n_jobs = model.specs['n_jobs']
# Log model parameters
logger.info("Isomap Components : %d", iso_components)
logger.info("Isomap Neighbors : %d", iso_neighbors)
# Generate Isomap features
model = Isomap(n_neighbors=iso_neighbors, n_components=iso_components,
n_jobs=n_jobs)
ifeatures = model.fit_transform(features)
inames = [USEP.join(['isomap', str(i+1)]) for i in range(iso_components)]
# Return new Isomap features
logger.info("Isomap Feature Count : %d", ifeatures.shape[1])
return ifeatures, inames
#
# Function create_tsne_features
#
def create_tsne_features(features, model):
r"""Create t-SNE features.
Parameters
----------
features : numpy array
The input features.
model : alphapy.Model
The model object with the t-SNE parameters.
Returns
-------
tfeatures : numpy array
The t-SNE features.
tnames : list
The t-SNE feature names.
References
----------
You can find more information on the t-SNE technique here [TSNE]_.
.. [TSNE] http://scikit-learn.org/stable/modules/manifold.html#t-distributed-stochastic-neighbor-embedding-t-sne
"""
logger.info("Creating T-SNE Features")
# Extract model parameters
seed = model.specs['seed']
tsne_components = model.specs['tsne_components']
tsne_learn_rate = model.specs['tsne_learn_rate']
tsne_perplexity = model.specs['tsne_perplexity']
# Log model parameters
logger.info("T-SNE Components : %d", tsne_components)
logger.info("T-SNE Learning Rate : %d", tsne_learn_rate)
logger.info("T-SNE Perplexity : %d", tsne_perplexity)
# Generate T-SNE features
model = TSNE(n_components=tsne_components, perplexity=tsne_perplexity,
learning_rate=tsne_learn_rate, random_state=seed)
tfeatures = model.fit_transform(features)
tnames = [USEP.join(['tsne', str(i+1)]) for i in range(tsne_components)]
# Return new T-SNE features
logger.info("T-SNE Feature Count : %d", tfeatures.shape[1])
return tfeatures, tnames
#
# Function create_features
#
def create_features(model, X, X_train, X_test, y_train):
r"""Create features for the train and test set.
Parameters
----------
model : alphapy.Model
Model object with the feature specifications.
X : pandas.DataFrame
Combined train and test data.
X_train : pandas.DataFrame
Training data.
X_test : pandas.DataFrame
Testing data.
y_train : pandas.DataFrame
Target variable for training data.
Returns
-------
all_features : numpy array
The new features.
Raises
------
TypeError
Unrecognized data type.
"""
# Extract model parameters
clustering = model.specs['clustering']
counts_flag = model.specs['counts']
encoder = model.specs['encoder']
factors = model.specs['factors']
isomap = model.specs['isomap']
logtransform = model.specs['logtransform']
ngrams_max = model.specs['ngrams_max']
numpy_flag = model.specs['numpy']
pca = model.specs['pca']
pvalue_level = model.specs['pvalue_level']
rounding = model.specs['rounding']
scaling = model.specs['scaler_option']
scaler = model.specs['scaler_type']
scipy_flag = model.specs['scipy']
sentinel = model.specs['sentinel']
tsne = model.specs['tsne']
vectorize = model.specs['vectorize']
# Log input parameters
logger.info("Original Features : %s", X.columns)
logger.info("Feature Count : %d", X.shape[1])
# Count zero and NaN values
if counts_flag:
logger.info("Creating Count Features")
logger.info("NA Counts")
X['nan_count'] = X.count(axis=1)
logger.info("Number Counts")
for i in range(10):
fc = USEP.join(['count', str(i)])
X[fc] = (X == i).astype(int).sum(axis=1)
logger.info("New Feature Count : %d", X.shape[1])
# Iterate through columns, dispatching and transforming each feature.
logger.info("Creating Base Features")
all_features = np.zeros((X.shape[0], 1))
model.feature_names = []
for i, fname in enumerate(X):
fnum = i + 1
dtype = X[fname].dtypes
nunique = len(X[fname].unique())
# standard processing of numerical, categorical, and text features
if factors and fname in factors:
features, fnames = get_factors(model, X_train, X_test, y_train, fnum, fname,
nunique, dtype, encoder, rounding, sentinel)
elif dtype == 'float64' or dtype == 'int64' or dtype == 'bool':
features, fnames = get_numerical_features(fnum, fname, X, nunique, dtype,
sentinel, logtransform, pvalue_level)
elif dtype == 'object':
features, fnames = get_text_features(fnum, fname, X, nunique, vectorize, ngrams_max)
else:
raise TypeError("Base Feature Error with unrecognized type %s" % dtype)
if features.shape[0] == all_features.shape[0]:
# add features
all_features = np.column_stack((all_features, features))
# add feature names
model.feature_names.extend(fnames)
else:
logger.info("Feature %s has the wrong number of rows: %d",
fname, features.shape[0])
all_features = np.delete(all_features, 0, axis=1)
logger.info("New Feature Count : %d", all_features.shape[1])
# Call standard scaler for all features
if scaling:
logger.info("Scaling Base Features")
if scaler == Scalers.standard:
all_features = StandardScaler().fit_transform(all_features)
elif scaler == Scalers.minmax:
all_features = MinMaxScaler().fit_transform(all_features)
else:
logger.info("Unrecognized scaler: %s", scaler)
else:
logger.info("Skipping Scaling")
# Perform dimensionality reduction only on base feature set
base_features = all_features
# Calculate the total, mean, standard deviation, and variance
if numpy_flag:
np_features, fnames = create_numpy_features(base_features, sentinel)
all_features = np.column_stack((all_features, np_features))
model.feature_names.extend(fnames)
logger.info("New Feature Count : %d", all_features.shape[1])
# Generate scipy features
if scipy_flag:
sp_features, fnames = create_scipy_features(base_features, sentinel)
all_features = np.column_stack((all_features, sp_features))
model.feature_names.extend(fnames)
logger.info("New Feature Count : %d", all_features.shape[1])
# Create clustering features
if clustering:
cfeatures, fnames = create_clusters(base_features, model)
all_features = np.column_stack((all_features, cfeatures))
model.feature_names.extend(fnames)
logger.info("New Feature Count : %d", all_features.shape[1])
# Create PCA features
if pca:
pfeatures, fnames = create_pca_features(base_features, model)
all_features = np.column_stack((all_features, pfeatures))
model.feature_names.extend(fnames)
logger.info("New Feature Count : %d", all_features.shape[1])
# Create Isomap features
if isomap:
ifeatures, fnames = create_isomap_features(base_features, model)
all_features = np.column_stack((all_features, ifeatures))
model.feature_names.extend(fnames)
logger.info("New Feature Count : %d", all_features.shape[1])
# Create T-SNE features
if tsne:
tfeatures, fnames = create_tsne_features(base_features, model)
all_features = np.column_stack((all_features, tfeatures))
model.feature_names.extend(fnames)
logger.info("New Feature Count : %d", all_features.shape[1])
# Return all transformed training and test features
assert all_features.shape[1] == len(model.feature_names), "Mismatched Features and Names"
return all_features
#
# Function select_features
#
def select_features(model):
r"""Select features with univariate selection.
Parameters
----------
model : alphapy.Model
Model object with the feature selection specifications.
Returns
-------
model : alphapy.Model
Model object with the revised number of features.
References
----------
You can find more information on univariate feature selection here [UNI]_.
.. [UNI] http://scikit-learn.org/stable/modules/feature_selection.html#univariate-feature-selection
"""
logger.info("Feature Selection")
# Extract model data.
X_train = model.X_train
y_train = model.y_train
# Extract model parameters.
fs_percentage = model.specs['fs_percentage']
fs_score_func = model.specs['fs_score_func']
# Select top features based on percentile.
fs = SelectPercentile(score_func=fs_score_func,
percentile=fs_percentage)
# Perform feature selection and get the support mask
fsfit = fs.fit(X_train, y_train)
support = fsfit.get_support()
# Record the support vector
logger.info("Saving Univariate Support")
model.feature_map['uni_support'] = support
# Record the support vector
X_train_new = model.X_train[:, support]
X_test_new = model.X_test[:, support]
# Count the number of new features.
logger.info("Old Feature Count : %d", X_train.shape[1])
logger.info("New Feature Count : %d", X_train_new.shape[1])
# Store the reduced features in the model.
model.X_train = X_train_new
model.X_test = X_test_new
# Mask the feature names and test that feature and name lengths are equal
model.feature_names = list(itertools.compress(model.feature_names, support))
assert X_train_new.shape[1] == len(model.feature_names), "Mismatched Features and Names"
# Return the modified model
return model
#
# Function save_features
#
def save_features(model, X_train, X_test, y_train=None, y_test=None):
r"""Save new features to the model.
Parameters
----------
model : alphapy.Model
Model object with train and test data.
X_train : numpy array
Training features.
X_test : numpy array
Testing features.
y_train : numpy array
Training labels.
y_test : numpy array
Testing labels.
Returns
-------
model : alphapy.Model
Model object with new train and test data.
"""
logger.info("Saving New Features in Model")
model.X_train = X_train
model.X_test = X_test
if y_train is not None:
model.y_train = y_train
if y_test is not None:
model.y_test = y_test
return model
#
# Function create_interactions
#
def create_interactions(model, X):
r"""Create feature interactions based on the model specifications.
Parameters
----------
model : alphapy.Model
Model object with train and test data.
X : numpy array
Feature Matrix.
Returns
-------
all_features : numpy array
The new interaction features.
Raises
------
TypeError
Unknown model type when creating interactions.
"""
logger.info("Creating Interactions")
# Extract model parameters
interactions = model.specs['interactions']
isample_pct = model.specs['isample_pct']
model_type = model.specs['model_type']
poly_degree = model.specs['poly_degree']
predict_mode = model.specs['predict_mode']
# Extract model data
X_train = model.X_train
y_train = model.y_train
# Log parameters
logger.info("Initial Feature Count : %d", X.shape[1])
# Initialize all features
all_features = X
# Get polynomial features
if interactions:
if not predict_mode:
logger.info("Generating Polynomial Features")
logger.info("Interaction Percentage : %d", isample_pct)
logger.info("Polynomial Degree : %d", poly_degree)
if model_type == ModelType.regression:
selector = SelectPercentile(f_regression, percentile=isample_pct)
elif model_type == ModelType.classification:
selector = SelectPercentile(f_classif, percentile=isample_pct)
else:
raise TypeError("Unknown model type when creating interactions")
selector.fit(X_train, y_train)
support = selector.get_support()
model.feature_map['poly_support'] = support
else:
support = model.feature_map['poly_support']
pfeatures, pnames = get_polynomials(X[:, support], poly_degree)
model.feature_names.extend(pnames)
logger.info("Polynomial Feature Count : %d", pfeatures.shape[1])
pfeatures = StandardScaler().fit_transform(pfeatures)
all_features = np.hstack((all_features, pfeatures))
logger.info("New Total Feature Count : %d", all_features.shape[1])
else:
logger.info("Skipping Interactions")
# Return all features
assert all_features.shape[1] == len(model.feature_names), "Mismatched Features and Names"
return all_features
#
# Function drop_features
#
def drop_features(X, drop):
r"""Drop any specified features.
Parameters
----------
X : pandas.DataFrame
The dataframe containing the features.
drop : list
The list of features to remove from ``X``.
Returns
-------
X : pandas.DataFrame
The dataframe without the dropped features.
"""
drop_cols = []
if drop:
for d in drop:
for col in X.columns:
if col.split(LOFF)[0] == d:
drop_cols.append(col)
logger.info("Dropping Features: %s", drop_cols)
logger.info("Original Feature Count : %d", X.shape[1])
X.drop(drop_cols, axis=1, inplace=True, errors='ignore')
logger.info("Reduced Feature Count : %d", X.shape[1])
return X
#
# Function remove_lv_features
#
def remove_lv_features(model, X):
r"""Remove low-variance features.
Parameters
----------
model : alphapy.Model
Model specifications for removing features.
X : numpy array
The feature matrix.
Returns
-------
X_reduced : numpy array
The reduced feature matrix.
References
----------
You can find more information on low-variance feature selection here [LV]_.
.. [LV] http://scikit-learn.org/stable/modules/feature_selection.html#variance-threshold
"""
logger.info("Removing Low-Variance Features")
# Extract model parameters
lv_remove = model.specs['lv_remove']
lv_threshold = model.specs['lv_threshold']
predict_mode = model.specs['predict_mode']
# Remove low-variance features
if lv_remove:
logger.info("Low-Variance Threshold : %.2f", lv_threshold)
logger.info("Original Feature Count : %d", X.shape[1])
if not predict_mode:
selector = VarianceThreshold(threshold=lv_threshold)
selector.fit(X)
support = selector.get_support()
model.feature_map['lv_support'] = support
else:
support = model.feature_map['lv_support']
X_reduced = X[:, support]
model.feature_names = list(itertools.compress(model.feature_names, support))
logger.info("Reduced Feature Count : %d", X_reduced.shape[1])
else:
X_reduced = X
logger.info("Skipping Low-Variance Features")
assert X_reduced.shape[1] == len(model.feature_names), "Mismatched Features and Names"
return X_reduced
| [
29113,
29113,
14468,
198,
2,
198,
2,
15717,
220,
220,
1058,
12995,
20519,
198,
2,
19937,
220,
220,
220,
1058,
3033,
198,
2,
15622,
220,
220,
1058,
2901,
1367,
11,
2211,
198,
2,
198,
2,
15069,
12131,
4746,
11146,
30437,
11419,
198,
2... | 2.494762 | 16,895 |
import string
import textwrap
import time
import pytest
from seaworthy.containers.nginx import NginxContainer
from seaworthy.pytest import dockertest
from seaworthy.utils import output_lines
@pytest.fixture(scope='module')
@dockertest()
| [
11748,
4731,
198,
11748,
2420,
37150,
198,
11748,
640,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
32085,
18906,
13,
3642,
50221,
13,
782,
28413,
1330,
399,
42822,
29869,
198,
6738,
32085,
18906,
13,
9078,
9288,
1330,
23423,
861,
395,
... | 3.375 | 72 |
# This file is part of PSL-Python.
# Copyright (c) 2021, Eijiro Shibusawa <phd_kimberlite@yahoo.co.jp>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import cv2
| [
2,
770,
2393,
318,
636,
286,
6599,
43,
12,
37906,
13,
198,
2,
15069,
357,
66,
8,
33448,
11,
412,
2926,
7058,
911,
26333,
6909,
1279,
746,
67,
62,
74,
320,
527,
36890,
31,
40774,
13,
1073,
13,
34523,
29,
198,
2,
1439,
2489,
10395... | 3.383529 | 425 |
import datetime
def get_mins_between_now_and_event(event):
"""Returns the minutes between the current time and the start time of the inputted event
Args:
event (dict): an single event from the event list returned from google_calendar's GoogleCalendar class method get_events
Returns:
int: minutes between the current time and the start time of the inputted event
"""
time_difference_timedelta = get_timedelta_between_now_and_event(event)
time_difference_mins = convert_timedelta_to_mins(time_difference_timedelta)
return time_difference_mins
def get_timedelta_between_now_and_event(event):
"""Returns the time difference between the current time and the start time of the event
Arguments:
event {dict} -- an single event from the event list returned from google_calendar's GoogleCalendar class method get_events
Returns:
datetime.timedelta -- the time difference between the event's start time and the current time
"""
# seperate the date and time from the event's start dateTime
date, time = event['start']['dateTime'].split('T')
# remove the time zone offset
time = time.split('+')
# convert and combine the date and time to a datetime object
date_object = datetime.datetime.strptime(date, '%Y-%m-%d')
time_object = datetime.datetime.strptime(time[0], '%H:%M:%S')
datetime_object = datetime.datetime.combine(date_object, time_object.time())
# get the time difference between the event's datetime object and now
datetime_now = datetime.datetime.now()
time_difference = datetime_object - datetime_now
return time_difference
def convert_timedelta_to_mins(timedelta):
"""Calculates and returns the minuetes from the inputted timedelta
Args:
timedelta (datetime.timedelta): the timedelta to calculate the minuetes from
Returns:
int: the minuetes calculated from the inputted timedelta
"""
return int(round(timedelta.total_seconds() / 60))
if __name__ == "__main__":
mock_event = {'created': '2020-07-20T13:56:32.000Z',
'end': {'dateTime': '2020-07-20T22:56:31+08:00', 'timeZone': 'Asia/Singapore'},
'reminders': {'overrides': [{'method': 'popup', 'minutes': 60}],
'useDefault': False},
'sequence': 0,
'start': {'dateTime': '2020-07-20T22:56:31+08:00',
'timeZone': 'Asia/Singapore'},
'status': 'confirmed',
'summary': 'test event',
'updated': '2020-07-20T13:56:32.984Z'}
print(get_mins_between_now_and_event(mock_event))
print(get_timedelta_between_now_and_event(mock_event))
timedelta = datetime.timedelta(seconds = 60)
print(convert_timedelta_to_mins(timedelta))
pass | [
11748,
4818,
8079,
198,
198,
4299,
651,
62,
42951,
62,
23395,
62,
2197,
62,
392,
62,
15596,
7,
15596,
2599,
198,
220,
220,
220,
37227,
35561,
262,
2431,
1022,
262,
1459,
640,
290,
262,
923,
640,
286,
262,
5128,
1513,
1785,
628,
220,... | 2.614019 | 1,070 |
from jesse.helpers import get_candle_source, slice_candles, np_shift
import numpy as np
from numba import njit
import talib
from typing import Union
from jesse.helpers import get_config
from collections import namedtuple
"""
https://www.tradingview.com/script/SFGEoDmG-Low-Pass-Channel-DW/
"""
LowPass = namedtuple('LowPass',['hband', 'lband', 'filt'])
@njit
@njit
| [
6738,
474,
35270,
13,
16794,
364,
1330,
651,
62,
46188,
293,
62,
10459,
11,
16416,
62,
46188,
829,
11,
45941,
62,
30846,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
11748,
3305,
571,
220,
198,
6738... | 2.652778 | 144 |
import pytz
import binascii
import os
import logging
import tzlocal
from datetime import datetime, timedelta
from collections import namedtuple
from securitybot.sql import SQLEngine
from scribe_logger.logger import ScribeLogHandler
__author__ = 'Alex Bertsch'
__email__ = 'abertsch@dropbox.com'
# http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
OPENING_HOUR = 10
CLOSING_HOUR = 18
LOCAL_TZ = tzlocal.get_localzone()
def during_business_hours(time):
'''
Checks if a given time is within business hours. Currently is true
from 10:00 to 17:59. Also checks to make sure that the day is a weekday.
Args:
time (Datetime): A datetime object to check.
'''
if time.tzinfo is not None:
here = time.astimezone(LOCAL_TZ)
else:
here = time.replace(tzinfo=pytz.utc).astimezone(LOCAL_TZ)
return (OPENING_HOUR <= here.hour < CLOSING_HOUR and
1 <= time.isoweekday() <= 5)
def get_expiration_time(start, time):
'''
Gets an expiration time for an alert.
Works by adding on a certain time and wrapping around after business hours
so that alerts that are started near the end of the day don't expire.
Args:
start (Datetime): A datetime object indicating when an alert was started.
time (Timedelta): A timedelta representing the amount of time the alert
should live for.
Returns:
Datetime: The expiry time for an alert.
'''
if start.tzinfo is None:
start = start.replace(tzinfo=pytz.utc)
end = start + time
if not during_business_hours(end):
logging.debug('Not during business hours.')
end_of_day = datetime(year=start.year,
month=start.month,
day=start.day,
hour=CLOSING_HOUR,
tzinfo=LOCAL_TZ)
delta = end - end_of_day
next_day = end_of_day + timedelta(hours=(OPENING_HOUR - CLOSING_HOUR) % 24)
# This may land on a weekend, so march to the next weekday
while not during_business_hours(next_day):
next_day += timedelta(days=1)
end = next_day + delta
return end
def create_new_alert(title, ldap, description, reason, url=None, key=None, escalation_list=None):
# type: (str, str, str, str, str, str) -> None
'''
Creates a new alert in the SQL DB with an optionally random hash.
'''
# Generate random key if none provided
if key is None:
key = binascii.hexlify(os.urandom(32))
if url is None:
# currently url field cannot be NULL
url = ''
# Insert that into the database as a new alert
SQLEngine.execute('''
INSERT INTO alerts (hash, ldap, title, description, reason, url, event_time)
VALUES (%s, %s, %s, %s, %s, %s, NOW())
''', (key, ldap, title, description, reason, url))
SQLEngine.execute('''
INSERT INTO user_responses (hash, ldap, comment, performed, authenticated, updated_at)
VALUES (%s, ldap, '', false, false, NOW())
''', (key,))
if escalation_list is not None and isinstance(escalation_list, list):
for escalation in escalation_list:
SQLEngine.execute('INSERT INTO escalation (hash, ldap, delay_in_sec, escalated_at) VALUES (%s, %s, %s, NULL)',
(key, escalation.ldap, escalation.delay_in_sec))
SQLEngine.execute('INSERT INTO alert_status (hash, status) VALUES (%s, 0)', (key,))
logging.info("Created new alert: {}".format({
'title': title,
'ldap': ldap,
'description': description,
'reason': reason,
'url': url,
'escalation': escalation_list
}))
| [
11748,
12972,
22877,
198,
11748,
9874,
292,
979,
72,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
256,
89,
12001,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
6738,
2... | 2.400643 | 1,555 |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
from operator import itemgetter
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
f = open(filename, 'rU')
file_text = f.read()
f.close()
t_year = re.findall(r'Popularity in (\d+)', file_text)
# t = re.findall(r'Popularity in (\d+).<td>(\d+)</td><td>(\w+)</td><td>(\w+)', f.read(), re.DOTALL)
t_names = re.findall(r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)', file_text)
l_d = get_names(t_names)
result = []
result.extend(t_year)
for name in l_d:
result.append(name['name'] + ' ' + name['rank'])
return '\n'.join(result)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
3050,
3012,
3457,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
... | 2.727941 | 680 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - (re)building of Xapian indices
@copyright: 2007 MoinMoin:KarolNowak
@license: GNU GPL, see COPYING for details.
"""
import MoinMoin.events as ev
def handle_renamed(event):
"""Updates Xapian index when a page changes its name"""
request = event.request
if request.cfg.xapian_search:
index = _get_index(request)
if index and index.exists():
index.update_item(event.old_page.page_name, now=False)
index.update_item(event.page.page_name)
def handle_copied(event):
"""Updates Xapian index when a page is copied"""
request = event.request
if request.cfg.xapian_search:
index = _get_index(request)
if index and index.exists():
index.update_item(event.page.page_name)
def handle_changed(event):
"""Updates Xapian index when a page is changed"""
request = event.request
if request.cfg.xapian_search:
index = _get_index(request)
if index and index.exists():
index.update_item(event.page.page_name)
def handle_deleted(event):
"""Updates Xapian index when a page is deleted"""
event = ev.PageChangedEvent(event.request, event.page, event.comment)
handle_changed(event)
def handle_attachment_change(event):
"""Updates Xapian index when attachment is added or removed"""
request = event.request
if request.cfg.xapian_search:
index = _get_index(request)
if index and index.exists():
index.update_item(event.pagename, event.filename)
| [
2,
532,
9,
12,
19617,
25,
47279,
12,
3459,
3270,
12,
16,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
4270,
259,
16632,
259,
532,
357,
260,
8,
16894,
286,
1395,
499,
666,
36525,
628,
220,
220,
220,
2488,
22163,
4766,
25,
4343,
42... | 2.537721 | 623 |
import main
medumList = [
[
"000007910007004600508903400920305000801720000400001250600000000000100008219030000",
"000003060080050700015080040800300009349006000206090370000400506700008002090000430",
"308062015400000060060070008802400107000008300000053280000500670200000000010020540",
"000410378000700029304000500053000006208096005000007000049000050500600904000040603",
"000105984803006007090028010005600800000012000240000000001060300730080009902400000",
"900070030057830900000500008500008070000063500000400009032057600100200007040090012",
"000008700006002500452000086970800000000100400060000005003096078000081043190040050",
"900000530000000600610094000000051000029730085061080300000000700832005090740309000",
"910680000407000065080200701200001070000000008000045020000009580090060037760000902",
"085000000030000160000060000560400000300000680090083200800340000006871345403000890",
"100000040006700000284006073070000000549010087000250060010300002063029010000560000",
"687010000910000060000680090009002080100008730038764200040000000706300051001000000",
"000000026760000150900000300107002400300745081000000035001050600000084000540013070",
"006000000200507300003600084040752030807000540000009000962000005100024063070000020",
"230600100000010000000057004870260415004000700000003920050940831000130600010000000",
"100098473000002508500000901300000692020049000001000000903027000600000004004600019",
"680000340050000090000420008079000080030000200062598000091206400045800000700009050",
"200410007080700002000900050007200609010006500000000080005602490006800025700540060",
"030008160005102009000704000000020013002905687007003000026870040050009000400006000",
"609043001000020340070098200060310000001000003040700005500000002007200508910800700",
"400913060900080042000000197000400680000002030704500000001005020340006050200000403",
"687010000910000060000680090009002080100008730038764200040000000706300051001000000",
"080020000000000687070603250600100009040598160090004030960070000400000801800300000",
"100382490300000010280000000000700960063200100000050300072004000830000000509160820",
"000500000950000080038007250103000009009704162046219000017000300000050720000000090",
"107032006306700000080190050030250040009000000600904001060400085000000300415070000",
"000000008000050901600007005030060090500304100064029003070090004009081760250003000",
"300020000700000003800400060087042100000068270250007800160000980000085000030210600",
"000079000000008210900162034003706800710005906500800000270910500000000000836000000",
"000004903004019600109086204010940020030602090060070000650001070090000008000000002",
"308015000409700005561008000904000530050001049000000800030007006700390200000080700",
"000900003004810020020007108000000974000003000006291000700086000000324786008000052",
"000000830000028005204070006908004501060000409000690008672001080030009050500007000",
"960401300070900560000700020406100000008060000000048106500607900000000207820000603",
"007003000040005087560420900700508000000130000009700030020070305850000706003006090",
"910006207087000100204009000009508012060400000030000070000307000020000430005090781",
"029000000000600090000740102907000003030807010050093027206430801080000300040900000",
"031000060609007001807003400750090020218040030000800000000400003070560000002000145",
"100708900600000000008601074010300096000000200824000750002890001380520000000063000",
"000803200503000000002094000000700400926450008307002000009100032280009040700200100",
"031000060609007001807003400750090020218040030000800000000400003070560000002000145",
"008704003107300200302000607000000500700005160080031070006008005400050000850940000",
"054030007607100432003200000000000005008640000100980370305708090006000500000060700",
"130685000000000002060019038001000040050403000300800006427560900005002080080007000",
"000080700832050000705390010013500820200030049980000000070000000409060185001000000",
"400050802000600000003400196000076080009000000007005603000514000074809060100300029",
"600071000000309270300256900008030700901064000070098010010600004000910000000020003",
"096000008000300040000240000001000089000803700007590000002451000105006900370980160",
"000003060080050700015080040800300009349006000206090370000400506700008002090000430",
"300001700070000345000003090002000470000300102760109030000050013910000000683900004",
"640030007501070900000000010004908060080003020000400000400157030208300040750000096",
"600984000001200400509000087000300690000420810900061700050000000004100500300045008",
"000013068009600002000054007100040006000026050052000070200070000095160030407000210",
"500009100000062508010008764065013020240007010000000000070000056058700300000040800",
"000007203080004090427100000801000000300006000050901300704050010500010007100049580",
"040027000800640000070030908410900000003005002008000016084000000096000700150490820",
"002530049500820006030000000007090000096000400004700061000060000928370005063080090",
"008090050490107023050200009000008500100060402000010007531602040040000000007000630",
"100000040006700000284006073070000000549010087000250060010300002063029010000560000",
"080070102004000090209000064000900027040607910007050080020000000001706300000890206",
"000035002010708500250000031370260005009501063001070000040810000096000000800004000",
"342100007000007900007060050080012400734000100000670530000000800906000005001009720",
"870035902040000010000901007000000500010060000003024079000009650000610040760300091",
"000800040800340020400006108680000904700500001009700000008000009042005086090003450",
"010540008007830940080600100002107000700006002106200000300415000029000500060000003",
"400008002060020805000060030396000020007002040204000570072084000030000009509610000",
"103500027020100600007040000740605100610030098000219000005906000000001000000073006",
"130685000000000002060019038001000040050403000300800006427560900005002080080007000",
"031070000640000000027005003304009501005000420060504008090750030008600740000000090",
"080296000430580006000003801004000000000300102010805470000010054000400080200058900",
"030400060000910000205080097097032005503004000100598730060300000004071000000000400",
"685079002210508060704002000000305800850020000003090006008000420000607000100000003",
"003906701160500000050100090040301006200009030000007005087000004002000360506000809",
"040027000800640000070030908410900000003005002008000016084000000096000700150490820",
"040080006007400080008500003000000600706000402400063917070310005200000000035070029",
"010800009984000600000600030830000100000200096090750280400180000370902000001000920",
"120600409000004102006010500608100000050342000402008000807000305304000026000400000",
"000008100000060008600032079062800000400050000810340006080027003000000600100680254",
"000002530610000020000300649000007000045000018096820003300045060400670000001090004",
"046802100008040000073006084600000010810000003000560000409050000000984670200010009",
"070030200005002900400900000004205090010390706200000005192700030047500100000103000",
"008704003107300200302000607000000500700005160080031070006008005400050000850940000",
"038010704060004590070080002000906000726801300000400050250000000007000005900068400",
"013000820050001000000070500000057096040300008000280703029063000308000000061920004",
"000204000800000000090000830030600000940830050000009610209400308580003420073900001",
"000008700006002500452000086970800000000100400060000005003096078000081043190040050",
"080070002000060950009538600560000007300080005100204000850006030000000080403801020",
"040001000500040013106807000061053000000200069308090070200000700703900451000004000",
"009400008001063420470020500694000000000390040050000100000070000728009013906000200",
"070302008201050003800070000300200080006000300450803009607009015040000000098520000",
"010072004096030257008009003000410000000700029700020500960000000000300740007084100",
"602001400185300076000000000000002040070098010000000050006400190234000800019083020",
"300020000700000003800400060087042100000068270250007800160000980000085000030210600",
"000090501000000020830020000004016750300075018050000090410000902703000100020650004",
"000000000680905400000003071470000806102008000000064102706340010305800000090200700",
"000760000004500102005102079008400003002010080190003004400000801006000000050309026",
"005300020962000045407006000580027900000083000024005600000000030040598060000000470",
"029000000000600090000740102907000003030807010050093027206430801080000300040900000",
"600000240002680095000254863090005001080020430043000000005000000000162900000030600",
"910000000680005340000000100070002085530004210002598030305070006000340800000000400",
],
[
"342687915197254683568913427926345871851726349473891256685479132734162598219538764",
"427913865683254791915687243871345629349726158256891374132479586764538912598162437",
"378962415429185763561374928832496157745218396196753284984531672257649831613827549",
"962415378185763429374928561753284196218396745496157832649831257531672984827549613",
"627135984813946257594728613175694832369812745248357196451269378736581429982473561",
"984672135257831946613549728561928473429763581378415269832157694196284357745396812",
"319658724786432519452917386974865231835129467261374895543296178627581943198743652",
"984627531257813649613594827378451962429736185561982374196248753832175496745369218",
"915687243427913865683254791256891374349726158871345629132479586598162437764538912",
"685132479734598162219764538568427913342915687197683254851349726926871345473256891",
"157832946396745128284196573672984351549613287831257469415378692763429815928561734",
"687915342913427568254683197479132685162598734538764219345871926726349851891256473",
"415378926763429158928561347157832469396745281284196735831257694672984513549613872",
"496238157218547396753691284649752831827316549531489672962873415185924763374165928",
"238694157547812396691357284873269415924581763165473928752946831489135672316728549",
"162598473479132568538764921345871692726349185891256347913427856687915234254683719",
"687915342254683197913427568479132685538764219162598734891256473345871926726349851",
"269415837581763942473928156357284619812396574694157283135672498946831725728549361",
"734598162685132479219764538568427913342915687197683254926871345851349726473256891",
"629543871158627349374198256865319427791452683243786915586974132437261598912835764",
"427913568915687342683254197132479685598162734764538219871345926349726851256891473",
"687915342913427568254683197479132685162598734538764219345871926726349851891256473",
"586427913324915687179683254658132479743598162291764538962871345437256891815349726",
"157382496396475218284916753415738962763249185928651374672894531831527649549163827",
"472568913951342687638197254123685479589734162746219538817926345394851726265473891",
"157832496396745218284196753831257649549613827672984531763429185928561374415378962",
"915432678427658931683917245132865497598374126764129583871296354349581762256743819",
"345926718726851493891473562687342159913568274254197836162734985479685321538219647",
"321479685647538219985162734493726851718345926562891473274913568159687342836254197",
"586724913324519687179386254815943726437652891962178345658231479291467538743895162",
"378415962429763185561928374984672531257831649613549827832157496745396218196284753",
"187962543934815627625437198213658974859743261476291835742586319591324786368179452",
"157946832396128745284573196928734561763815429415692378672351984831469257549287613",
"962451378374982561185736429496175832218369745753248196531627984649813257827594613",
"197683254342915687568427913734598162685132479219764538926871345851349726473256891",
"913856247687234195254719863479568312162473958538921674891347526726185439345692781",
"129358764374612598865749132917524683432867915658193427296435871581276349743981256",
"531984762649257381827613459753196824218745936496832517185429673374561298962378145",
"145738962673249185298651374517382496936475218824916753762894531381527649459163827",
"694813257513627984872594613158736429926451378347982561469175832281369745735248196",
"531984762649257381827613459753196824218745936496832517185429673374561298962378145",
"568724913197386254342519687219467538734895162685231479926178345473652891851943726",
"254836917687159432913274658479321865538647129162985374345718296726493581891562743",
"132685479598734162764219538871926345256473891349851726427568913915342687683197254",
"196284753832157496745396218613549827257831649984672531378415962429763185561928374",
"496751832218693745753482196531276984649138257827945613962514378374829561185367429",
"692871435185349276347256981568132749921764358473598612719683524234915867856427193",
"496175238218369547753248691531627489649813752827594316962451873185736924374982165",
"427913865683254791915687243871345629349726158256891374132479586764538912598162437",
"349581726871296345256743891132865479598374162764129538427658913915432687683917254",
"649831257531672984827549613374928561185763429962415378496157832218396745753284196",
"672984351831257469549613287415378692763429815928561734157832946284196573396745128",
"724913568519687342386254197178345926943726851652891473231479685895162734467538219",
"586479132437162598912538764865913427243687915791254683374891256158726349629345871",
"915867243683524791427193865871435629349276158256981374764358912598612437132749586",
"549827631831649275672531948415962387763185492928374516284753169396218754157496823",
"672531849549827136831649572157496328396218457284753961415962783928374615763185294",
"218396754496157823753284169374928516185763492962415387531672948649831275827549631",
"157832946396745128284196573672984351549613287831257469415378692763429815928561734",
"685479132734162598219538764568913427342687915197254683926345871851726349473891256",
"984135672613728549257946831378269415429581763561473928745812396196357284832694157",
"342195687568247913197863254685312479734958162219674538473526891926781345851439726",
"871435962349276815256981437427193586915867324683524179132749658598612743764358291",
"926871543851349627473256198685132974734598261219764835568427319342915786197683452",
"613549728257831946984672135832157694745396812196284357378415269429763581561928473",
"415378692763429815928561734396745128157832946284196573672984351831257469549613287",
"193568427524197683867342915749685132612734598358219764435926871276851349981473256",
"132685479598734162764219538871926345256473891349851726427568913915342687683197254",
"531276984649138257827945613374829561185367429962514378496751832218693745753482196",
"781296345439581726526743891674129538958374162312865479863917254195432687247658913",
"931427568678915342245683197497132685583764219126598734762349851354871926819256473",
"685479132219538764734162598926345871851726349473891256568913427342687915197254683",
"823946751169573482754128693948351276275469138631287945387692514492815367516734829",
"549827631831649275672531948415962387763185492928374516284753169396218754157496823",
"543781296627439581198526743319247658786195432452863917974312865261958374835674129",
"613827549984531672257649831832496157745218396196753284429185763378962415561374928",
"123685479589734162746219538638197254951342687472568913817926345394851726265473891",
"743598162291764538658132479962871345437256891815349726586427913324915687179683254",
"984762531613459827257381649832517496745936218196824753378145962429673185561298374",
"946832157128745396573196284692378415815429763734561928469257831351984672287613549",
"971638254685472913423951687734265891518394726269817345192746538347589162856123479",
"568724913197386254342519687219467538734895162685231479926178345473652891851943726",
"538219764162734598479685132345926871726851349891473256254197683687342915913568427",
"613549827257831649984672531832157496745396218196284753429763185378415962561928374",
"357284196812396745694157832135672984946831257728549613269415378581763429473928561",
"319658724786432519452917386974865231835129467261374895543296178627581943198743652",
"685479312734162958219538674568913247342687195197254863851726439926345781473891526",
"849531627572649813136827594961753248457218369328496175294185736783962451615374982",
"269415378581763429473928561694157832812396745357284196135672984728549613946831257",
"974312658261958743835674291319247586786195324452863179627439815543781962198526437",
"315672984496831257278549613629415378851763429743928561964157832182396745537284196",
"692871435185349276347256981568132749473598612921764358856427193234915867719683524",
"345926718726851493891473562687342159913568274254197836162734985479685321538219647",
"672894531549163827831527649284916753396475218157382496415738962763249185928651374",
"913427685687915423254683971479132856162598347538764192726349518345871269891256734",
"219764538734598162685132479568427913342915687197683254473256891926871345851349726",
"815349726962871345437256891586427913179683254324915687291764538743598162658132479",
"129358764374612598865749132917524683432867915658193427296435871581276349743981256",
"658913247432687195917254863296345781581726439743891526865479312374162958129538674",
"913427568687915342254683197479132685538764219162598734345871926726349851891256473",
]
]
for index, medium in enumerate(medumList[0], start=0):
result,duration = main.sudokuLine(medium)
print("Medium test " + str(index + 1) + " " + ("PASS" if result == medumList[1][index] else "FAIL") + " :: " + str(duration) + "ms")
# print(medium)
# print(result)
| [
11748,
1388,
198,
198,
1150,
388,
8053,
796,
685,
198,
197,
58,
198,
197,
197,
1,
20483,
3720,
12825,
9879,
3510,
405,
33042,
3829,
2682,
28694,
1238,
22515,
830,
23,
29326,
2167,
22914,
2388,
1065,
35638,
8269,
18005,
2388,
6469,
1129,... | 2.392138 | 7,403 |
# Generated by Django 2.1.7 on 2019-04-16 12:39
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3023,
12,
1433,
1105,
25,
2670,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# -*- coding: utf-8 -*-
from common.mymako import render_mako_context, render_json
from blueking.component.shortcuts import get_client_by_request
from utils import get_job_instance_id, get_job_log_content
from models import Operations, ResourceData, CeleryTask
from datetime import datetime
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
2219,
13,
1820,
76,
25496,
1330,
8543,
62,
76,
25496,
62,
22866,
11,
8543,
62,
17752,
198,
6738,
4171,
3364,
13,
42895,
13,
19509,
23779,
1330,
651,
62,
163... | 3.210526 | 95 |
from .interfaces import api as interfaces_api
from flask import Blueprint
from flask_restplus import Api
api_blueprint = Blueprint('interfaces service', __name__)
api = Api(
api_blueprint,
title=f'Dependencies Service Public API',
version='1.0',
description='Service for managing interface dependencies between services',
)
api.add_namespace(interfaces_api, path='/v1')
| [
6738,
764,
3849,
32186,
1330,
40391,
355,
20314,
62,
15042,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
62,
2118,
9541,
1330,
5949,
72,
198,
198,
15042,
62,
17585,
4798,
796,
39932,
10786,
3849,
32186,
2139,
3256,
11593,
3672,
834,
... | 3.268908 | 119 |
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
password = ("anything")
hash_obj = SHA256.new(password.encode('utf-8'))
hkey = hash_obj.digest()
msg = "Hello stackoverflow!"
cipher_text = encrypt(msg)
print(cipher_text)
plaintext = decrypt(cipher_text)
print(plaintext)
| [
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
220,
220,
220,
220,
198,
6738,
36579,
13,
26257,
1330,
25630,
11645,
198,
198,
28712,
796,
5855,
49459,
4943,
220,
220,
220,
220,
198,
17831,
62,
26801,
796,
25630,
11645,
13,
3605,
7,
287... | 2.646018 | 113 |
import heapq
from collections import Counter
"""
Success
Details
Runtime: 28 ms, faster than 92.45% of Python3 online submissions for Reorganize String.
Memory Usage: 14.2 MB, less than 84.12% of Python3 online submissions for Reorganize String.
Next challenges:
Rearrange String k Distance Apart
Show off your acceptance:
Time Submitted
Status
Runtime
Memory
Language
07/11/2021 21:06 Accepted 28 ms 14.2 MB python3
"""
| [
11748,
24575,
80,
198,
6738,
17268,
1330,
15034,
628,
198,
198,
37811,
198,
33244,
198,
24259,
220,
198,
41006,
25,
2579,
13845,
11,
5443,
621,
10190,
13,
2231,
4,
286,
11361,
18,
2691,
22129,
329,
797,
9971,
1096,
10903,
13,
198,
308... | 3.455285 | 123 |
import aiohttp, asyncio, logging, os, io
import hangups
import plugins
logger = logging.getLogger(__name__)
@asyncio.coroutine
def _handle_forwarding(bot, event, command):
"""Handle message forwarding"""
# Test if message forwarding is enabled
if not bot.get_config_suboption(event.conv_id, 'forwarding_enabled'):
return
forward_to_list = bot.get_config_suboption(event.conv_id, 'forward_to')
if forward_to_list:
logger.debug("{}".format(forward_to_list))
for _conv_id in forward_to_list:
html_identity = "<b><a href='https://plus.google.com/u/0/{}/about'>{}</a></b><b>:</b> ".format(event.user_id.chat_id, event.user.full_name)
html_message = event.text
if not event.conv_event.attachments:
yield from bot.coro_send_message( _conv_id,
html_identity + html_message )
for link in event.conv_event.attachments:
filename = os.path.basename(link)
r = yield from aiohttp.request('get', link)
raw = yield from r.read()
image_data = io.BytesIO(raw)
image_id = None
try:
image_id = yield from bot._client.upload_image(image_data, filename=filename)
if not html_message:
html_message = "(sent an image)"
yield from bot.coro_send_message( _conv_id,
html_identity + html_message,
image_id=image_id )
except AttributeError:
yield from bot.coro_send_message( _conv_id,
html_identity + html_message + " " + link )
| [
11748,
257,
952,
4023,
11,
30351,
952,
11,
18931,
11,
28686,
11,
33245,
198,
198,
11748,
289,
2303,
862,
198,
198,
11748,
20652,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
198,
31,
292,
... | 1.91219 | 968 |
from pyrediseasyio.io.io_group import IOGroup
from pyrediseasyio.html.html_io_group import HMTLIOGroup
from pyrediseasyio.html.html_io import HTMLIO
from pyrediseasyio.io.single_io import SingleIO
from pyrediseasyio.io.boolean_io import BooleanIO
from pyrediseasyio.io.string_io import StringIO
from pyrediseasyio.io.float_io import FloatIO
from pyrediseasyio.io.integer_io import IntIO
from pyrediseasyio.io.trigger_io import TriggerIO
| [
6738,
12972,
445,
786,
4107,
952,
13,
952,
13,
952,
62,
8094,
1330,
314,
7730,
3233,
198,
6738,
12972,
445,
786,
4107,
952,
13,
6494,
13,
6494,
62,
952,
62,
8094,
1330,
367,
13752,
31271,
7730,
3233,
198,
6738,
12972,
445,
786,
4107... | 2.89404 | 151 |
#!/usr/bin/env python3
#
# Copyright (c) 2015, The Linux Foundation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import mock
import email
import re
import pwcli
FAKE_ATTRIBUTES = {
'id': '11',
'web_url': 'http://www.example.com/',
'msgid': '12345678',
'date': '2020-04-23T15:06:27',
'name': 'nnnn',
'commit_ref': '12345678',
'state' : 'ssss',
'submitter': {'name': 'Ed Example',
'email': 'ed@example.com'},
'delegate': {'username': 'dddd'},
'mbox': 'http://www.example.com',
'pull_url': None,
}
TEST_MBOX = 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nSubject: [1/7] foo\nFrom: Dino Dinosaurus <dino@example.com>\nX-Patchwork-Id: 12345\nMessage-Id: <11111@example.com>\nTo: list@example.com\nDate: Thu, 10 Feb 2011 15:23:31 +0300\n\nFoo commit log. Ignore this text\n\nSigned-off-by: Dino Dinosaurus <dino@example.com>\n\n---\nFIXME: add the patch here\n'
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
357,
66,
8,
1853,
11,
383,
7020,
5693,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
... | 2.918224 | 856 |
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimage
import numpy as np
import collections
import plotting as plot
# Define a class to receive the characteristics of each line detection
| [
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
51,
74,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
9060,
355,
29034,
9060,
198,
198,
11748,
299,
3215... | 3.275 | 80 |
import pytest
import pandas as pd
from s64da_benchmark_toolkit import streams
@pytest.fixture
@pytest.fixture
| [
198,
11748,
12972,
9288,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
264,
2414,
6814,
62,
26968,
4102,
62,
25981,
15813,
1330,
15190,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628... | 2.702128 | 47 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.models.transformer_layers import MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.multilingual_partitioned.linear import MPPositionWiseFeedForward
from onmt.modules.multilingual_partitioned.encdec_attention import MPEncdecMultiheadAttn
from onmt.modules.multilingual_partitioned.relative_attention import MPRelativeSelfMultiheadAttn
from onmt.modules.convolution import ConformerConvBlock
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
319,
16762,
198,
198,
6738,
319,
16762,
13,
27530,
13,
7645,
16354,
62,
75,
6962,
1330,
15237,
13847,
8086,
1463,
... | 3.533181 | 437 |
import pytest
from voucher_selection.server.config import (
DBConfig,
ServerConfig,
create_db_config,
create_server_config,
)
| [
11748,
12972,
9288,
198,
198,
6738,
40621,
62,
49283,
13,
15388,
13,
11250,
1330,
357,
198,
220,
220,
220,
360,
2749,
261,
5647,
11,
198,
220,
220,
220,
9652,
16934,
11,
198,
220,
220,
220,
2251,
62,
9945,
62,
11250,
11,
198,
220,
... | 2.642857 | 56 |
print("""You enter a dark room with two doors.
Do you go through door #1 or door #2?""")
door = input("> ")
if door == "1":
print("There's a giant bear here eating a cheese cake.")
print("What do you do?")
print("1. Take the cake.")
print("2. Scream at the bear.")
bear = input("> ")
if bear == "1":
print("The bear eats your face off. Good job!")
elif bear == "2":
print("The bear eats your legs off. Good job!")
else:
print(f"Well, doing {bear} is probably better.")
print("Bear runs away.")
print("You stare into the endless abyss at Cthulhu's retina.")
print("1. Blueberries.")
print("2. Yellow jacket clothespins.")
print("3. Understanding revolvers yelling melodies.")
insanity = input("> ")
if insanity == "1" or insanity == "2":
print("Your body survives powered by a mind of jello.")
print("Good job!")
else:
print("The insanity rots your eyes into a pool of muck.")
print("Good job!")
print("You stumble around and fall on a knife and die. Good")
| [
4798,
7203,
15931,
1639,
3802,
257,
3223,
2119,
351,
734,
8215,
13,
220,
198,
5211,
345,
467,
832,
3420,
1303,
16,
393,
3420,
1303,
17,
1701,
1,
4943,
220,
198,
220,
198,
9424,
796,
5128,
7,
5320,
366,
8,
220,
198,
220,
198,
361,
... | 2.646778 | 419 |
import queue, os, logging
import threading
from copy import copy
import time
from dquant.constants import Constants
from dquant.markets._okex_future_ws import OkexFutureWs
import asyncio
# 返回最后一个买/卖/删除记录,因为getorder不会记录已经成交或者删除的订单信息,而已经执行的订单保存在getHist里(只保存最近10个)
if __name__ == "__main__":
amount = 1
os.environ[Constants.DQUANT_ENV] = "dev"
okex = OkexTickers('btc_usd_this_week')
okex.start()
time.sleep(5)
# 取消所有未成交的订单
okex.q.put({'type': 'Delete all active orders'})
time.sleep(5)
# 挂买一卖一
book = okex.okex.getDepth()
ticker_bp = book['bids'][0]['price']
ticker_sp = book['asks'][0]['price']
okex.q.put({'type': 'New order', 'price': ticker_bp, 'amount': amount, 'side': 'long'})
m = okex.q_output.get()
logging.info('New order: Long %s @%s, lastPosition: %s' % (amount, ticker_bp, okex.lastPosition['long']))
okex.lastPosition['long'] = m['amount']
okex.q.put({'type': 'New order', 'price': ticker_sp, 'amount': amount, 'side': 'short'})
okex.q_output.get()
logging.info('New order: Short %s @%s, lastPosition: %s' % (amount, ticker_sp, okex.lastPosition['short']))
okex.lastPosition['short'] = m['amount']
policy = asyncio.get_event_loop_policy()
policy.set_event_loop(policy.new_event_loop())
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
| [
11748,
16834,
11,
28686,
11,
18931,
198,
11748,
4704,
278,
198,
6738,
4866,
1330,
4866,
198,
198,
11748,
640,
198,
198,
6738,
288,
40972,
13,
9979,
1187,
1330,
4757,
1187,
198,
6738,
288,
40972,
13,
34162,
13557,
2088,
87,
62,
37443,
... | 1.988701 | 708 |
#!/usr/bin/env python3
import numpy as np
from narratex.base import load_pickle, save_json
from narratex.clustering import get_group2name_by_freq
from narratex.visualization import make_dendrogram_dict
if __name__ == '__main__':
import argparse
aparser = argparse.ArgumentParser()
aparser.add_argument('group2event')
aparser.add_argument('pairwise_weights')
aparser.add_argument('outfile')
main(aparser.parse_args())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6664,
378,
87,
13,
8692,
1330,
3440,
62,
27729,
293,
11,
3613,
62,
17752,
198,
6738,
6664,
378,
87,
13,
565,
436,
1586,
1330,
... | 2.765432 | 162 |
import math
from .searcher import Searcher
from pychemia import pcm_log
| [
11748,
10688,
198,
198,
6738,
764,
325,
283,
2044,
1330,
42016,
2044,
198,
6738,
12972,
15245,
544,
1330,
279,
11215,
62,
6404,
628
] | 3.217391 | 23 |
# -*- coding: utf-8 -*-
from copy import deepcopy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
4866,
1330,
2769,
30073,
628
] | 2.47619 | 21 |
import argparse
import copy
from os import path
from pathlib import Path
import torch
torch.set_default_tensor_type('torch.cuda.FloatTensor')
from torchvision.utils import save_image
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# import ssl # enable if downloading models gives CERTIFICATE_VERIFY_FAILED error
# ssl._create_default_https_context = ssl._create_unverified_context
import os
import sys
sys.path.append('submodules') # needed to make imports work in GAN_stability
from graf.gan_training import Evaluator as Evaluator
from graf.config import get_data, build_models, update_config
from graf.utils import count_trainable_parameters, to_phi, to_theta, polar_to_cartesian, look_at
from submodules.GAN_stability.gan_training.checkpoints import CheckpointIO
from submodules.GAN_stability.gan_training.distributions import get_ydist, get_zdist
from submodules.GAN_stability.gan_training.config import (
load_config,
)
from torchvision.transforms import *
import numpy as np
from PIL import Image
import math
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
4866,
198,
6738,
28686,
1330,
3108,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
28034,
198,
198,
13165,
354,
13,
2617,
62,
12286,
62,
83,
22854,
62,
4906,
10786,
13165,
354,
13,
66,
15339,
13,
... | 3.046703 | 364 |
from django.conf import settings
from django.db import models
from django.utils.text import slugify
from django.contrib.auth import get_user_model
STATUS_PENDING = "pending"
STATUS_PROGRESS = "progress"
STATUS_COMPLETED = "completed"
STATUSES = [STATUS_PENDING, STATUS_PROGRESS, STATUS_COMPLETED]
THEMES = ["Validation méthode", "Conditionnement", "Gestion des risques", "Mise sur le marché", "Validation procédé", "BPF", "Assurance qualité"]
THEME_CHOICES = zip(THEMES, THEMES)
AUDIENCES = ["restricted", "all"]
AUDIENCE_CHOICES = zip(AUDIENCES, AUDIENCES)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
5239,
1330,
31065,
1958,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849... | 2.725962 | 208 |
from main import app
from unittest import TestCase
from flask import json
from nose.tools import ok_, eq_
import unittest
# if __name__ == '__main__':
# nose.run()
| [
6738,
1388,
1330,
598,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
42903,
1330,
33918,
198,
6738,
9686,
13,
31391,
1330,
12876,
62,
11,
37430,
62,
198,
11748,
555,
715,
395,
628,
628,
220,
220,
220,
1303,
611,
11593,
3672,... | 2.870968 | 62 |
import os
APPLICATION_PARAMETER = os.environ.get('APPLICATION_PARAMETER', 'my_parameter')
POSTGRES_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
POSTGRES_USER = os.environ.get("POSTGRES_USER", "postgres")
POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD", "admin")
POSTGRES_DB = os.environ.get("POSTGRES_DB", "postgrs_name") | [
11748,
28686,
198,
198,
2969,
31484,
6234,
62,
27082,
2390,
2767,
1137,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
2969,
31484,
6234,
62,
27082,
2390,
2767,
1137,
3256,
705,
1820,
62,
17143,
2357,
11537,
198,
198,
32782,
10761,
1546,
6... | 2.42446 | 139 |
import csv
import json
import urllib
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from decimal import Decimal
from xml.etree import ElementTree
from lxml import etree
import requests
from django.utils import timezone, dateparse
from driver.models import GPSDevice, GPSDeviceLog, TracknovateGPSDevice, TracknovateGPSDeviceLog, WaytrackerGPSDevice, \
WaytrackerGPSDeviceLog
from owner.models import Vehicle
from owner.vehicle_util import compare_format
from team.helper.helper import to_float
def get_tracknovate_update():
"""
login url:
POST http://tracknovate.com/login_tracknovate.php
Form Data:
username:Trilokji
password:123456
submit:Login
update url:
GET http://tracknovate.com/livetracking/test_map.php?q=all
"""
session = requests.session()
login_data = {
'username': 'Trilokji',
'password': '123456',
'submit': 'Login'
}
login_response = session.post('http://tracknovate.com/login_tracknovate.php', login_data)
if login_response.status_code != 200:
raise AssertionError('Could not login to tracknovate')
response = session.get('http://tracknovate.com/livetracking/test_map.php?q=all')
data = get_marker_data(response.content)
existing_vehicle_ids = dict(TracknovateGPSDevice.objects.values_list('vehicle_id', 'id'))
to_create = []
for row in data:
vehicle_id = row['vehicle_id']
vehicle_type, vehicle_status, driver_name, driver_number = get_vehicle_details(vehicle_id)
if vehicle_id in existing_vehicle_ids:
TracknovateGPSDevice.objects.filter(id=existing_vehicle_ids[vehicle_id]).update(
phone=row['phone_number'], sim_number=row['sim_number'], current_vstatus=row['vstatus'],
current_duration=row['duration'], vehicle_type=vehicle_type, vehicle_status=vehicle_status,
driver_name=driver_name, driver_number=driver_number, latitude=row['latitude'],
longitude=row['longitude'], location_time=row['datetime']
)
else:
to_create.append(
TracknovateGPSDevice(
phone=row['phone_number'], sim_number=row['sim_number'], vehicle_id=vehicle_id,
vehicle_number=vehicle_id, current_vstatus=row['vstatus'], current_duration=row['duration'],
vehicle_type=vehicle_type, vehicle_status=vehicle_status,
driver_name=driver_name, driver_number=driver_number, latitude=row['latitude'],
longitude=row['longitude'], location_time=row['datetime']
)
)
TracknovateGPSDevice.objects.bulk_create(to_create)
vehicle_ids = [r['vehicle_id'] for r in data]
vmap = {x.vehicle_id: x.id for x in TracknovateGPSDevice.objects.filter(vehicle_id__in=vehicle_ids)}
TracknovateGPSDeviceLog.objects.bulk_create([
TracknovateGPSDeviceLog(
datetime=row['datetime'],
vehicle_id=row['vehicle_id'],
latitude=row['latitude'], longitude=row['longitude'],
speed=row['speed'],
engine_on=row['engine_on'],
device_id=vmap[row['vehicle_id']],
vehicle_number=row['vehicle_id'],
) for row in data
])
"""
pull additional data for tracknovate vehicles from our database
start regular updates for both
streamlined interface for web for outward payment entry and
geo-fencing alerts
"""
| [
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
35555,
13,
316,
... | 2.416552 | 1,450 |
from django.db import models
class Theme(models.Model):
"""
Topic of interest
"""
name = models.CharField(max_length=150, blank=False)
comments = models.TextField(blank=True, default="")
parent_theme = models.ForeignKey(
'self',
on_delete=models.CASCADE,
related_name="children_themes",
blank=True
)
level = models.PositiveIntegerField(blank=False)
added_date = models.DateTimeField(auto_now_add=True, blank=True)
updated_date = models.DateTimeField(auto_now=True)
class Function(models.Model):
"""
Job
"""
name = models.CharField(max_length=150, blank=False)
comments = models.TextField(blank=True, default="")
added_date = models.DateTimeField(auto_now_add=True, blank=True)
updated_date = models.DateTimeField(auto_now=True)
class Person(models.Model):
"""
Actor in the bibliographic research
"""
firstname = models.CharField(max_length=150, default="", blank=True)
lastname = models.CharField(max_length=150, blank=False)
comments = models.TextField(blank=True, default="")
functions = models.ManyToManyField(Function)
themes = models.ManyToManyField(Theme)
added_date = models.DateTimeField(auto_now_add=True, blank=True)
updated_date = models.DateTimeField(auto_now=True)
class TypeRessource(models.Model):
"""
Topic of interest
"""
name = models.CharField(max_length=150, blank=False)
comments = models.TextField(blank=True, default="")
added_date = models.DateTimeField(auto_now_add=True, blank=True)
updated_date = models.DateTimeField(auto_now=True)
class Ressource(models.Model):
"""
Ressource
"""
name = models.CharField(max_length=150, blank=False)
link_url = models.CharField(max_length=300, blank=True)
comments = models.TextField(blank=True, default="")
themes = models.ManyToManyField(Theme)
type_ressource = models.ForeignKey(TypeRessource)
added_date = models.DateTimeField(auto_now_add=True, blank=True)
updated_date = models.DateTimeField(auto_now=True)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
4871,
26729,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
47373,
286,
1393,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
1438,
79... | 2.675879 | 796 |
# -*- coding: utf-8 -*-
import sys
from colorama import init, Fore, Style
import random, string
if __name__ == '__main__':
try:
init(autoreset=True)
text = raw_input(Fore.YELLOW+'[*] Set the text you want to encrypt: '+ Style.RESET_ALL)
# sec_key = 'asdhjuiktu76tgb4'#16
# bit = 128
# sec_key = 'asdhjuiktu76tgb49olxb4Ec'#24
# bit = 192
# sec_key = 'asdhjuiktu76tgb49olxb4Ecsgbqus70'#32
# bit = 256
for i in range(3):
try:
bit = int(raw_input(Fore.YELLOW+'[*] Select the bit for the encryption ("128"/"192"/"256"): '+ Style.RESET_ALL))
bit = int(bit)
break
except ValueError:
print Fore.RED + '[-] bit must be an integer'
if i == 2:
sys.exit()
else:
print Fore.YELLOW+'[*] Please try again...\n'
sec_key = raw_input(Fore.YELLOW+'[*] Set the secret key for encryption: '+ Style.RESET_ALL)
if sec_key == 'random':
characters = '~!@#$%^&*()<>?,./-=_+'
if bit == 128:
length = 16
elif bit == 192:
length = 24
else:
length = 32
sec_key = "".join([random.choice(string.ascii_letters+string.digits+characters) for i in xrange(length)])
print ''
aes = AES()
encrypt = aes.Encrypt(text, sec_key, bit)
print '\n\n'+Fore.GREEN+'Cipher Text: '+ Style.RESET_ALL + encrypt
print '\n\n\n'
print Fore.YELLOW+'[*] Decrypting '+ Fore.WHITE+ encrypt+ Fore.YELLOW+' using as a secret key: '+Fore.WHITE+sec_key
raw_input()
# encrypt = raw_input(Fore.YELLOW+'[*] Set the cipher text: '+ Style.RESET_ALL)
# sec_key = raw_input(Fore.YELLOW+'[*] Set the secret key for decryption: '+ Style.RESET_ALL)
decrypt = aes.Decrypt(encrypt, sec_key, bit)
print '\n\n'+ Fore.GREEN +'Clear Text: '+ Style.RESET_ALL +decrypt
raw_input()
except Exception as e:
print Fore.RED + '[-]'+str(e)
except KeyboardInterrupt:
sys.exit() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
11748,
25064,
201,
198,
6738,
3124,
1689,
1330,
2315,
11,
4558,
11,
17738,
201,
198,
11748,
4738,
11,
4731,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
197,... | 2.103982 | 904 |
#!/usr/bin/env python
"""
rtfobj.py
rtfobj is a Python module to extract embedded objects from RTF files, such as
OLE ojects. It can be used as a Python library or a command-line tool.
Usage: rtfobj.py <file.rtf>
rtfobj project website: http://www.decalage.info/python/rtfobj
rtfobj is part of the python-oletools package:
http://www.decalage.info/python/oletools
"""
#=== LICENSE =================================================================
# rtfobj is copyright (c) 2012-2016, Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
# CHANGELOG:
# 2012-11-09 v0.01 PL: - first version
# 2013-04-02 v0.02 PL: - fixed bug in main
# 2015-12-09 v0.03 PL: - configurable logging, CLI options
# - extract OLE 1.0 objects
# - extract files from OLE Package objects
# 2016-04-01 v0.04 PL: - fixed logging output to use stdout instead of stderr
# 2016-04-07 v0.45 PL: - improved parsing to handle some malware tricks
# 2016-05-06 v0.47 TJ: - added option -d to set the output directory
# (contribution by Thomas Jarosch)
# TJ: - sanitize filenames to avoid special characters
# 2016-05-29 PL: - improved parsing, fixed issue #42
__version__ = '0.47'
#------------------------------------------------------------------------------
# TODO:
# - improve regex pattern for better performance?
# - allow semicolon within hex, as found in this sample:
# http://contagiodump.blogspot.nl/2011/10/sep-28-cve-2010-3333-manuscript-with.html
#=== IMPORTS =================================================================
import re, os, sys, string, binascii, logging, optparse
from thirdparty.xglob import xglob
from oleobj import OleObject, OleNativeStream
import oleobj
# === LOGGING =================================================================
class NullHandler(logging.Handler):
"""
Log Handler without output, to avoid printing messages if logging is not
configured by the main application.
Python 2.7 has logging.NullHandler, but this is necessary for 2.6:
see https://docs.python.org/2.6/library/logging.html#configuring-logging-for-a-library
"""
def get_logger(name, level=logging.CRITICAL+1):
"""
Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging.
"""
# First, test if there is already a logger with the same name, else it
# will generate duplicate messages (due to duplicate handlers):
if name in logging.Logger.manager.loggerDict:
#NOTE: another less intrusive but more "hackish" solution would be to
# use getLogger then test if its effective level is not default.
logger = logging.getLogger(name)
# make sure level is OK:
logger.setLevel(level)
return logger
# get a new logger:
logger = logging.getLogger(name)
# only add a NullHandler for this logger, it is up to the application
# to configure its own logging:
logger.addHandler(NullHandler())
logger.setLevel(level)
return logger
# a global logger object used for debugging:
log = get_logger('rtfobj')
#=== CONSTANTS=================================================================
# REGEX pattern to extract embedded OLE objects in hexadecimal format:
# alphanum digit: [0-9A-Fa-f]
HEX_DIGIT = r'[0-9A-Fa-f]'
# hex char = two alphanum digits: [0-9A-Fa-f]{2}
# HEX_CHAR = r'[0-9A-Fa-f]{2}'
# in fact MS Word allows whitespaces in between the hex digits!
# HEX_CHAR = r'[0-9A-Fa-f]\s*[0-9A-Fa-f]'
# Even worse, MS Word also allows ANY RTF-style tag {*} in between!!
# AND the tags can be nested...
SINGLE_RTF_TAG = r'[{][^{}]*[}]'
# Nested tags, two levels (because Python's re does not support nested matching):
NESTED_RTF_TAG = r'[{](?:[^{}]|'+SINGLE_RTF_TAG+r')*[}]'
# ignored whitespaces and tags within a hex block:
IGNORED = r'(?:\s|'+NESTED_RTF_TAG+r')*'
#IGNORED = r'\s*'
# HEX_CHAR = HEX_DIGIT + IGNORED + HEX_DIGIT
# several hex chars, at least 4: (?:[0-9A-Fa-f]{2}){4,}
# + word boundaries
# HEX_CHARS_4orMORE = r'\b(?:' + HEX_CHAR + r'){4,}\b'
# at least 1 hex char:
# HEX_CHARS_1orMORE = r'(?:' + HEX_CHAR + r')+'
# at least 1 hex char, followed by whitespace or CR/LF:
# HEX_CHARS_1orMORE_WHITESPACES = r'(?:' + HEX_CHAR + r')+\s+'
# + word boundaries around hex block
# HEX_CHARS_1orMORE_WHITESPACES = r'\b(?:' + HEX_CHAR + r')+\b\s*'
# at least one block of hex and whitespace chars, followed by closing curly bracket:
# HEX_BLOCK_CURLY_BRACKET = r'(?:' + HEX_CHARS_1orMORE_WHITESPACES + r')+\}'
# PATTERN = r'(?:' + HEX_CHARS_1orMORE_WHITESPACES + r')*' + HEX_CHARS_1orMORE
#TODO PATTERN = r'\b(?:' + HEX_CHAR + IGNORED + r'){4,}\b'
# PATTERN = r'\b(?:' + HEX_CHAR + IGNORED + r'){4,}' #+ HEX_CHAR + r'\b'
PATTERN = r'\b(?:' + HEX_DIGIT + IGNORED + r'){7,}' + HEX_DIGIT + r'\b'
# at least 4 hex chars, followed by whitespace or CR/LF: (?:[0-9A-Fa-f]{2}){4,}\s*
# PATTERN = r'(?:(?:[0-9A-Fa-f]{2})+\s*)*(?:[0-9A-Fa-f]{2}){4,}'
# improved pattern, allowing semicolons within hex:
#PATTERN = r'(?:(?:[0-9A-Fa-f]{2})+\s*)*(?:[0-9A-Fa-f]{2}){4,}'
# a dummy translation table for str.translate, which does not change anythying:
TRANSTABLE_NOCHANGE = string.maketrans('', '')
re_hexblock = re.compile(PATTERN)
re_embedded_tags = re.compile(IGNORED)
re_decimal = re.compile(r'\d+')
re_delimiter = re.compile(r'[ \t\r\n\f\v]')
DELIMITER = r'[ \t\r\n\f\v]'
DELIMITERS_ZeroOrMore = r'[ \t\r\n\f\v]*'
BACKSLASH_BIN = r'\\bin'
# According to my tests, Word accepts up to 250 digits (leading zeroes)
DECIMAL_GROUP = r'(\d{1,250})'
re_delims_bin_decimal = re.compile(DELIMITERS_ZeroOrMore + BACKSLASH_BIN
+ DECIMAL_GROUP + DELIMITER)
re_delim_hexblock = re.compile(DELIMITER + PATTERN)
#=== FUNCTIONS ===============================================================
def rtf_iter_objects_old (filename, min_size=32):
"""
Open a RTF file, extract each embedded object encoded in hexadecimal of
size > min_size, yield the index of the object in the RTF file and its data
in binary format.
This is an iterator.
"""
data = open(filename, 'rb').read()
for m in re.finditer(PATTERN, data):
found = m.group(0)
orig_len = len(found)
# remove all whitespace and line feeds:
#NOTE: with Python 2.6+, we could use None instead of TRANSTABLE_NOCHANGE
found = found.translate(TRANSTABLE_NOCHANGE, ' \t\r\n\f\v}')
found = binascii.unhexlify(found)
#print repr(found)
if len(found)>min_size:
yield m.start(), orig_len, found
# TODO: backward-compatible API?
def rtf_iter_objects (data, min_size=32):
"""
Open a RTF file, extract each embedded object encoded in hexadecimal of
size > min_size, yield the index of the object in the RTF file and its data
in binary format.
This is an iterator.
"""
# Search 1st occurence of a hex block:
match = re_hexblock.search(data)
if match is None:
log.debug('No hex block found.')
# no hex block found
return
while match is not None:
found = match.group(0)
# start index
start = match.start()
# current position
current = match.end()
log.debug('Found hex block starting at %08X, end %08X, size=%d' % (start, current, len(found)))
if len(found) < min_size:
log.debug('Too small - size<%d, ignored.' % min_size)
match = re_hexblock.search(data, pos=current)
continue
#log.debug('Match: %s' % found)
# remove all whitespace and line feeds:
#NOTE: with Python 2.6+, we could use None instead of TRANSTABLE_NOCHANGE
found = found.translate(TRANSTABLE_NOCHANGE, ' \t\r\n\f\v')
# TODO: make it a function
# Also remove embedded RTF tags:
found = re_embedded_tags.sub('', found)
# object data extracted from the RTF file
# MS Word accepts an extra hex digit, so we need to trim it if present:
if len(found) & 1:
log.debug('Odd length, trimmed last byte.')
found = found[:-1]
#log.debug('Cleaned match: %s' % found)
objdata = binascii.unhexlify(found)
# Detect the "\bin" control word, which is sometimes used for obfuscation:
bin_match = re_delims_bin_decimal.match(data, pos=current)
while bin_match is not None:
log.debug('Found \\bin block starting at %08X : %r'
% (bin_match.start(), bin_match.group(0)))
# extract the decimal integer following '\bin'
bin_len = int(bin_match.group(1))
log.debug('\\bin block length = %d' % bin_len)
if current+bin_len > len(data):
log.error('\\bin block length is larger than the remaining data')
# move the current index, ignore the \bin block
current += len(bin_match.group(0))
break
# read that number of bytes:
objdata += data[current:current+bin_len]
# TODO: handle exception
current += len(bin_match.group(0)) + bin_len
# TODO: check if current is out of range
# TODO: is Word limiting the \bin length to a number of digits?
log.debug('Current position = %08X' % current)
match = re_delim_hexblock.match(data, pos=current)
if match is not None:
log.debug('Found next hex block starting at %08X, end %08X'
% (match.start(), match.end()))
found = match.group(0)
log.debug('Match: %s' % found)
# remove all whitespace and line feeds:
#NOTE: with Python 2.6+, we could use None instead of TRANSTABLE_NOCHANGE
found = found.translate(TRANSTABLE_NOCHANGE, ' \t\r\n\f\v')
# Also remove embedded RTF tags:
found = re_embedded_tags.sub(found, '')
objdata += binascii.unhexlify(found)
current = match.end()
bin_match = re_delims_bin_decimal.match(data, pos=current)
# print repr(found)
if len(objdata)>min_size:
yield start, current-start, objdata
# Search next occurence of a hex block:
match = re_hexblock.search(data, pos=current)
def sanitize_filename(filename, replacement='_', max_length=200):
"""compute basename of filename. Replaces all non-whitelisted characters.
The returned filename is always a basename of the file."""
basepath = os.path.basename(filename).strip()
sane_fname = re.sub(r'[^\w\.\- ]', replacement, basepath)
while ".." in sane_fname:
sane_fname = sane_fname.replace('..', '.')
while " " in sane_fname:
sane_fname = sane_fname.replace(' ', ' ')
if not len(filename):
sane_fname = 'NONAME'
# limit filename length
if max_length:
sane_fname = sane_fname[:max_length]
return sane_fname
#=== MAIN =================================================================
if __name__ == '__main__':
# print banner with version
print ('rtfobj %s - http://decalage.info/python/oletools' % __version__)
print ('THIS IS WORK IN PROGRESS - Check updates regularly!')
print ('Please report any issue at https://github.com/decalage2/oletools/issues')
print ('')
DEFAULT_LOG_LEVEL = "warning" # Default log level
LOG_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
usage = 'usage: %prog [options] <filename> [filename2 ...]'
parser = optparse.OptionParser(usage=usage)
# parser.add_option('-o', '--outfile', dest='outfile',
# help='output file')
# parser.add_option('-c', '--csv', dest='csv',
# help='export results to a CSV file')
parser.add_option("-r", action="store_true", dest="recursive",
help='find files recursively in subdirectories.')
parser.add_option("-d", type="str", dest="output_dir",
help='use specified directory to output files.', default=None)
parser.add_option("-z", "--zip", dest='zip_password', type='str', default=None,
help='if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)')
parser.add_option("-f", "--zipfname", dest='zip_fname', type='str', default='*',
help='if the file is a zip archive, file(s) to be opened within the zip. Wildcards * and ? are supported. (default:*)')
parser.add_option('-l', '--loglevel', dest="loglevel", action="store", default=DEFAULT_LOG_LEVEL,
help="logging level debug/info/warning/error/critical (default=%default)")
(options, args) = parser.parse_args()
# Print help if no arguments are passed
if len(args) == 0:
print __doc__
parser.print_help()
sys.exit()
# Setup logging to the console:
# here we use stdout instead of stderr by default, so that the output
# can be redirected properly.
logging.basicConfig(level=LOG_LEVELS[options.loglevel], stream=sys.stdout,
format='%(levelname)-8s %(message)s')
# enable logging in the modules:
log.setLevel(logging.NOTSET)
oleobj.log.setLevel(logging.NOTSET)
for container, filename, data in xglob.iter_files(args, recursive=options.recursive,
zip_password=options.zip_password, zip_fname=options.zip_fname):
# ignore directory names stored in zip files:
if container and filename.endswith('/'):
continue
process_file(container, filename, data, options.output_dir)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
17034,
69,
26801,
13,
9078,
198,
198,
17034,
69,
26801,
318,
257,
11361,
8265,
284,
7925,
14553,
5563,
422,
371,
10234,
3696,
11,
884,
355,
198,
46,
2538,
267,
752,
82,
1... | 2.532448 | 6,102 |
#!/usr/bin/env python3
import RPi.GPIO as GPIO # import GPIO
from hx711 import HX711 # import the class HX711
from hx711 import outliers_filter
try:
GPIO.setmode(GPIO.BCM) # set GPIO pin mode to BCM numbering
# Create an object hx which represents your real hx711 chip
# Required input parameters are only 'dout_pin' and 'pd_sck_pin'
# If you do not pass any argument 'gain_channel_A' then the default value is 128
# If you do not pass any argument 'set_channel' then the default value is 'A'
# you can set a gain for channel A even though you want to currently select channel B
hx = HX711(
dout_pin=24, pd_sck_pin=23, gain_channel_A=128, select_channel='B')
err = hx.reset() # Before we start, reset the hx711 ( not necessary)
if err: # you can check if the reset was successful
print('not ready')
else:
print('Ready to use')
hx.set_gain_A(
gain=64) # You can change the gain for channel A at any time.
hx.select_channel(
channel='A') # Select desired channel. Either 'A' or 'B' at any time.
# Read data several, or only one, time and return mean value
# argument "readings" is not required default value is 30
data = hx.get_raw_data_mean(readings=30)
if data: # always check if you get correct value or only False
print('Raw data:', data)
else:
print('invalid data')
# measure tare and save the value as offset for current channel
# and gain selected. That means channel A and gain 64
result = hx.zero(readings=30)
# Read data several, or only one, time and return mean value.
# It subtracts offset value for particular channel from the mean value.
# This value is still just a number from HX711 without any conversion
# to units such as grams or kg.
data = hx.get_data_mean(readings=30)
if data: # always check if you get correct value or only False
# now the value is close to 0
print('Data subtracted by offset but still not converted to any unit:',
data)
else:
print('invalid data')
# In order to calculate the conversion ratio to some units, in my case I want grams,
# you must have known weight.
input('Put known weight on the scale and then press Enter')
data = hx.get_data_mean(readings=30)
if data:
print('Mean value from HX711 subtracted by offset:', data)
known_weight_grams = input(
'Write how many grams it was and press Enter: ')
try:
value = float(known_weight_grams)
print(value, 'grams')
except ValueError:
print('Expected integer or float and I have got:',
known_weight_grams)
# set scale ratio for particular channel and gain which is
# used to calculate the conversion to units. Required argument is only
# scale ratio. Without arguments 'channel' and 'gain_A' it sets
# the ratio for current channel and gain.
ratio = data / value # calculate the ratio for channel A and gain 64
hx.set_scale_ratio(ratio) # set ratio for current channel
print('Ratio is set.')
else:
raise ValueError('Cannot calculate mean value. Try debug mode.')
# Read data several, or only one, time and return mean value
# subtracted by offset and converted by scale ratio to
# desired units. In my case in grams.
print('Current weight on the scale in grams is: ')
print(hx.get_weight_mean(30), 'g')
# if you need the data fast without doing average or filtering them.
# do some kind of loop and pass argument 'readings=1'. Default 'readings' is 30
# be aware that HX711 sometimes return invalid or wrong data.
# you can probably see it now
print('Now I will print data quickly, but sometimes wrong.')
input(
'That is why I recommend always passing argument readings=20 or higher value'
)
for i in range(40):
# the value will vary because it is only one immediate reading.
# the default speed for hx711 is 10 samples per second
print(hx.get_weight_mean(readings=1), 'g')
# if you are not sure which gain is currently set on channel A you can call
print('Current gain on channel A:', hx.get_current_gain_A())
# to get currently selected channel
print('Current channel is:', hx.get_current_channel())
# to get current offset for a specific channel
offset = hx.get_current_offset(channel='A', gain_A=128)
print('Current offset for channel A and gain 128:', offset)
# if no arguments passed then it return offset for the currently selected channel and gain
offset = hx.get_current_offset()
print('Current offset for channel A and the current gain (64):', offset)
# for channel B
offset = hx.get_current_offset(channel='B')
print('Current offset for channel B:', offset)
# to get current scale ratio
current_ratio = hx.get_current_scale_ratio()
print('Current scale ratio is set to:', current_ratio)
# set offset manually for specific channel and gain. If you want to
# set offset for channel B then argument 'gain_A' is not required
# if no arguments 'channel' and 'gain_A' provided. The offset is
# set for the current channel and gain. Such as:
# hx.set_offset(offset=15000)
input(
'Now I will show you how it looks if you turn on debug mode. Press ENTER'
)
# turns on debug mode. It prints many things so you can find problem
hx.set_debug_mode(flag=True)
print(hx.get_raw_data_mean(
4)) # now you can see many intermediate steps and values
hx.set_debug_mode(False)
#hx.power_down() # turns off the hx711. Low power consumption
#hx.power_up() # turns on the hx711.
#hx.reset() # resets the hx711 and get it ready for
# reading of the currently selected channel
for i in range(2):
# without argument 'readings' default is 30
print('-> Weight channel A gain 64:', hx.get_weight_mean(20), 'g')
print('-> Raw data channel A gain 64:', hx.get_raw_data_mean(20))
print('--------------------------------------------')
hx.set_gain_A(128)
# without argument 'readings' default is 30
print('-> Weight channel A gain 128:', hx.get_weight_mean(20), ' g')
print('-> Raw data channel A gain 128:', hx.get_raw_data_mean(20))
print('--------------------------------------------')
hx.select_channel('B')
print('Channel B selected')
# without argument default is 1
print('-> Weight channel B gain 32:', hx.get_weight_mean(20), 'g')
print('-> Raw data channel B gain 32:', hx.get_raw_data_mean(20))
# you can also get the last raw data read for each channel and gain without reading it again
# without an argument it return raw data for currently set channel and gain, so channel B
last_value = hx.get_last_raw_data()
print('It remembers last raw data for channel B:', last_value)
last_value = hx.get_last_raw_data(channel='A', gain_A=64)
print('It remembers last raw data for channel A gain 64:', last_value)
last_value = hx.get_last_raw_data(channel='A', gain_A=128)
print('It remembers last raw data for channel A gain 128:', last_value)
# To get the current data filter that is set
current_filter = hx.get_data_filter()
# To set a new data filter
hx.set_data_filter(outliers_filter)
# By default it is outliers_filter.
# If you want to create your own filter, the requirement is simple.
# It has to take a single argument that is a list of int and return list of int
print('\nThat is all. Cleaning up.')
except (KeyboardInterrupt, SystemExit):
print('Bye :)')
finally:
GPIO.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
220,
1303,
1330,
50143,
198,
6738,
289,
87,
22,
1157,
1330,
367,
55,
22,
1157,
220,
1303,
1330,
262,
1398,
367,
55,
22,
1157,
198,
... | 2.785992 | 2,827 |
"""
Created on 24.09.2009
@author: alen
"""
from django import template
register = template.Library()
@register.inclusion_tag('socialregistration/openid_form.html')
| [
37811,
198,
41972,
319,
1987,
13,
2931,
13,
10531,
198,
198,
31,
9800,
25,
435,
268,
198,
37811,
198,
6738,
42625,
14208,
1330,
11055,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
259,
4717,
62,
12985,
10786,... | 3.111111 | 54 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-11-19 13:11
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1157,
319,
2864,
12,
1157,
12,
1129,
1511,
25,
1157,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,... | 2.754386 | 57 |
from django.urls import path
from home import views as home_view
from . import views
urlpatterns = [
path('', home_view.index, name='index'),
path('',views.index, name='Programming'),
path('',views.index, name='Music'),
path('',views.index, name='Design'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
1363,
1330,
5009,
355,
1363,
62,
1177,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
1363,
62,
1177,
13,
9630,
1... | 2.865979 | 97 |
# Authors: Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
import pickle
from sklearn.utils.deprecation import _is_deprecated
from sklearn.utils.deprecation import deprecated
from sklearn.utils._testing import assert_warns_message
@deprecated('qwerty')
@deprecated()
| [
2,
46665,
25,
371,
10471,
615,
31367,
1279,
81,
37020,
10471,
615,
6052,
31,
14816,
13,
785,
29,
201,
198,
2,
13789,
25,
347,
10305,
513,
13444,
201,
198,
201,
198,
201,
198,
11748,
2298,
293,
201,
198,
201,
198,
6738,
1341,
35720,
... | 2.540984 | 122 |