hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
917dd4fbe5fe3f01d17527951428baefbf9dad35 | 414 | py | Python | 2016-12-03/manager.py | awesome-archive/mp | e904c7c858fe5df5859f6282e5d4906a96abd6e5 | [
"Apache-2.0"
] | 90 | 2016-11-29T07:21:58.000Z | 2022-03-29T06:41:01.000Z | 2016-12-03/manager.py | awesome-archive/mp | e904c7c858fe5df5859f6282e5d4906a96abd6e5 | [
"Apache-2.0"
] | 1 | 2020-01-19T10:03:12.000Z | 2020-01-19T10:03:12.000Z | 2016-12-03/manager.py | awesome-archive/mp | e904c7c858fe5df5859f6282e5d4906a96abd6e5 | [
"Apache-2.0"
] | 34 | 2016-12-07T09:26:34.000Z | 2020-09-14T01:57:11.000Z | from multiprocessing import Manager, Process
def modify(ns, lproxy, dproxy):
ns.a **= 2
lproxy.extend(['b', 'c'])
dproxy['b'] = 0
manager = Manager()
ns = manager.Namespace()
ns.a = 1
lproxy = manager.list()
lproxy.append('a')
dproxy = manager.dict()
dproxy['b'] = 2
p = Process(target=modify, args=(ns, lproxy, dproxy))
p.start()
print 'PID:', p.pid
p.join()
print ns.a
print lproxy
print dproxy
| 16.56 | 53 | 0.652174 |
f7b79ddef6d013b580ada4bc829633a496a93dd1 | 4,199 | py | Python | stanCode_Projects/break_out_game/extension.py | calvin0123/sc-projects | 88ac98e3543a1399387c2033f36dc5c6b86c488c | [
"MIT"
] | null | null | null | stanCode_Projects/break_out_game/extension.py | calvin0123/sc-projects | 88ac98e3543a1399387c2033f36dc5c6b86c488c | [
"MIT"
] | null | null | null | stanCode_Projects/break_out_game/extension.py | calvin0123/sc-projects | 88ac98e3543a1399387c2033f36dc5c6b86c488c | [
"MIT"
] | 1 | 2021-12-04T22:50:23.000Z | 2021-12-04T22:50:23.000Z | """
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
-----------------------------------------------
File : extension.py
Name : Calvin Chen
This file try to write the extension of breakout game.
But I think there is a little bit wrong. When the score
come to 100+, ball will start to move really slow.
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
from campy.graphics.gobjects import GLabel, GRect, GOval
# Constant
FRAME_RATE = 1000 / 120 # 120 frames per second.
NUM_LIVES = 3 # player's lives.
# Global Variables
lives = NUM_LIVES # how many lives you can play
score = 0 # remove one bricks and score will plus one
label_s = GLabel('Score: ' + str(score)) # label to show your score
label_l = GLabel('Lives: ' + str(lives)) # label to show your lives
def main():
global score, lives
graphics = BreakoutGraphics(title='extension_breakout')
count_brick = 0
label_s.font = '-20'
label_l.font = '-20'
graphics.window.add(label_s, 0, label_s.height)
graphics.window.add(label_l, 0, label_l.height + label_s.height)
# graphics.extension_brick()
bonus_count = 0
while True:
pause(FRAME_RATE)
dx = graphics.get_dx()
dy = graphics.get_dy()
graphics.ball.move(dx, dy)
# Create bonus's velocity and needed information / Bonus times.
bonus_dx = graphics.get_b_dx()
bonus_dy = graphics.get_b_dy()
# bonus_count = 0
graphics.bonus_1.move(bonus_dx, bonus_dy)
bonus_obj = graphics.check_get_bonus()
# This condition show the first bonus time.
if bonus_obj == graphics.paddle:
graphics.window.remove(graphics.bonus_1)
graphics.window.add(graphics.new_paddle)
# Regular situation and bonus situation when ball hits paddle
obj = graphics.check_for_collisions()
if obj == graphics.paddle and dy >= 0:
bonus_count += 1
graphics.set_dy(-dy)
if obj == graphics.new_paddle:
graphics.set_dy(-dy)
bonus_count += 1
# Condition that ends bonus time
if bonus_count > 4:
graphics.window.remove(graphics.new_paddle)
bonus_count = 0
# This condition will remove the obj(brick), change direction, and create bonus.
if obj is not graphics.paddle and obj is not None and obj is not label_l and obj is not label_s\
and obj is not graphics.bonus_1 and obj is not graphics.new_paddle:
graphics.set_dy(-dy)
graphics.window.remove(obj)
# This condition controls which bonus will be processed.
if graphics.create_bonus() == 1:
graphics.window.add(graphics.bonus_1, x=obj.x, y=obj.y)
count_brick += 1
score += 10
label_s.text = 'Score: ' + str(score)
# If the ball touches the window boundary, the ball will bounce back.
if graphics.ball.x <= 0 or graphics.ball.x + graphics.ball.width >= graphics.window.width:
graphics.set_dx(-dx)
if graphics.ball.y <= 0:
graphics.set_dy(-dy)
# When the ball out of scope(go deep in the window), condition will reset the ball and minus one lives.
if graphics.die():
graphics.window.remove(graphics.ball)
lives -= 1
label_l.text = 'Lives: ' + str(lives)
# No lives and End games.
if lives == 0:
game_over = GLabel('Game Over! QAQ')
game_over.font = '-40'
graphics.window.add(game_over, graphics.window.width/6, graphics.window.height/2)
break
graphics.start_again()
# When you remove all the brick, you win and end the game.
if count_brick == graphics.brick_needed_remove():
graphics.window.remove(graphics.ball)
break
if __name__ == '__main__':
main()
| 38.87963 | 111 | 0.601095 |
4639948051d4738ccf2e827aa99e80dd623c6e77 | 3,494 | py | Python | CorrectingImageOrientation/utilities/nn/cnn/minivggnet.py | gopalchettri/DeepLearningPractitioners | 475e764471e1a4280ae94fea5d4a56f7a7c40b29 | [
"Apache-2.0"
] | null | null | null | CorrectingImageOrientation/utilities/nn/cnn/minivggnet.py | gopalchettri/DeepLearningPractitioners | 475e764471e1a4280ae94fea5d4a56f7a7c40b29 | [
"Apache-2.0"
] | null | null | null | CorrectingImageOrientation/utilities/nn/cnn/minivggnet.py | gopalchettri/DeepLearningPractitioners | 475e764471e1a4280ae94fea5d4a56f7a7c40b29 | [
"Apache-2.0"
] | null | null | null | # importing required libraries
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.layers.pooling import MaxPooling1D, MaxPooling2D
from tensorflow.python.ops.gen_math_ops import mod
class MiniVGGNet:
@staticmethod
def gpu_grow_memory():
import tensorflow as tf
from distutils.version import LooseVersion
import warnings
from tensorflow.compat.v1.keras.backend import set_session
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.compat.v1.Session(config=config)
set_session(sess)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if not tf.test.gpu_device_name():
warnings.warn('No GPU found')
else:
print('Default GPU device: {}' .format(tf.test.gpu_device_name()))
@staticmethod
def build(width, height, depth, classes):
# increasing the gpu memory
MiniVGGNet.gpu_grow_memory()
# Initialize the model, input shape and the channel dimension
model = Sequential()
input_shape = (height, width, depth)
channel_dim = -1
# if we are using 'channel_first', update the input shape and channels dimension
if K.image_data_format() == 'channel_first':
input_shape = (depth, height, width)
channel_dim = 1
# First CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
# Second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Softmax classifier
model.add(Dense(classes))
model.add(Activation('softmax'))
# Return the constructed network architecture
return model | 41.105882 | 105 | 0.672582 |
a592a7e232617a9cb7891f617f1c18ea018e5e11 | 1,439 | py | Python | vespene/config/workers.py | Conan-Kudo/vespene | 9e9977523f45586e1326ccd77d8cc0cb10591a07 | [
"Apache-2.0"
] | 680 | 2018-10-29T12:12:10.000Z | 2019-04-27T09:52:58.000Z | vespene/config/workers.py | Conan-Kudo/vespene | 9e9977523f45586e1326ccd77d8cc0cb10591a07 | [
"Apache-2.0"
] | 110 | 2018-10-29T12:33:34.000Z | 2019-02-14T02:31:43.000Z | vespene/config/workers.py | Conan-Kudo/vespene | 9e9977523f45586e1326ccd77d8cc0cb10591a07 | [
"Apache-2.0"
] | 92 | 2018-10-29T12:21:12.000Z | 2019-06-08T11:08:08.000Z | # Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0
# ---------------------------------------------------------------------------
# workers.py - configuration related to worker setup. This file *CAN* be
# different per worker.
# ---------------------------------------------------------------------------
BUILD_ROOT = "/tmp/vespene/buildroot/"
# ---------------------------------------------------------------------------
# all of these settings deal with serving up the buildroot.
# to disable file serving thorugh Django you can set this to FALSE
FILESERVING_ENABLED = True
FILESERVING_PORT = 8000
# leave this blank and the system will try to figure this out
# the setup scripts will usually set this to `hostname` though if
# unset the registration code will run `hostname`
FILESERVING_HOSTNAME = ""
FILESERVING_URL="/srv"
# if you disable fileserving but are using triggers to copy build roots
# to some other location (perhaps NFS served up by a web server or an FTP
# server) you can set this FILESERVING_ENABLED to False and the following pattern will
# be used instead to generate web links in the main GUI. If this pattern
# is set the links to the built-in fileserver will NOT be rendered, but this will
# not turn on the fileserver. To do that, set FILESERVING_ENABLED to False also
# BUILDROOT_WEB_LINK = "http://build-fileserver.example.com/builds/{{ build.id }}"
BUILDROOT_WEB_LINK = ""
| 41.114286 | 86 | 0.642113 |
36782faa6c709f4b7e84876792ee200d25e69bc2 | 320 | py | Python | flickrsmartsync_oauth/__main__.py | inspector2211/flickrsmartsync_oauth | bf5e3ccb4089c491512e458d48cad851df4ccd55 | [
"MIT"
] | 4 | 2017-07-11T20:26:15.000Z | 2018-02-26T05:59:03.000Z | flickrsmartsync_oauth/__main__.py | inspector2211/flickrsmartsync_oauth | bf5e3ccb4089c491512e458d48cad851df4ccd55 | [
"MIT"
] | 2 | 2017-07-20T13:21:02.000Z | 2017-11-27T05:35:42.000Z | flickrsmartsync_oauth/__main__.py | inspector2211/flickrsmartsync_oauth | bf5e3ccb4089c491512e458d48cad851df4ccd55 | [
"MIT"
] | 4 | 2017-09-01T20:45:39.000Z | 2018-08-26T14:15:01.000Z | import os
import sys
if __name__ == "__main__": # if running from source
module_dir = os.path.dirname(__file__)
parent_dir = os.path.dirname(module_dir)
sys.path.insert(0, parent_dir) # insert parent directory at start of module search path
import flickrsmartsync_oauth
flickrsmartsync_oauth.main()
| 32 | 91 | 0.75 |
9c4b7085db299f3c21d470ecb81df57f6a54f2d6 | 11,142 | py | Python | api/base/exceptions.py | mattclark/osf.io | 7a362ceb6af3393d3d0423aafef336ee13277303 | [
"Apache-2.0"
] | null | null | null | api/base/exceptions.py | mattclark/osf.io | 7a362ceb6af3393d3d0423aafef336ee13277303 | [
"Apache-2.0"
] | null | null | null | api/base/exceptions.py | mattclark/osf.io | 7a362ceb6af3393d3d0423aafef336ee13277303 | [
"Apache-2.0"
] | null | null | null | import httplib as http
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed
def get_resource_object_member(error_key, context):
from api.base.serializers import RelationshipField
field = context['view'].serializer_class._declared_fields.get(error_key, None)
if field:
return 'relationships' if isinstance(field, RelationshipField) else 'attributes'
# If field cannot be found (where read/write operations have different serializers,
# or fields serialized on __init__, assume error was in 'attributes' by default
return 'attributes'
def dict_error_formatting(errors, context, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.items():
if isinstance(error_description, basestring):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
else:
formatted_error_list.extend([{'source': {'pointer': '/data/{}{}/'.format(index, get_resource_object_member(error_key, context)) + error_key}, 'detail': reason} for reason in error_description])
return formatted_error_list
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, context, index=None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, context, index=index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
def format_validation_error(e):
error_list = []
for key, value in e.message_dict.items():
error_list.append('There was an issue with the {} field. {}'.format(key, value[0]))
return error_list
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(
detail='The requested user is no longer available.',
meta={
'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url(),
},
)
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter,
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute),
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http.BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators,
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type,
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http.BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http.BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| 39.232394 | 205 | 0.705529 |
56e7913a82135cd4924211448e72394c413b0ecc | 16,279 | py | Python | src/rna/steps/coverage.py | nellore/rail | 8827a7a0b1195c04cddf6d7a23ec7885619716ca | [
"MIT"
] | 78 | 2015-04-13T19:50:16.000Z | 2022-02-02T10:39:42.000Z | src/rna/steps/coverage.py | nellore/rail | 8827a7a0b1195c04cddf6d7a23ec7885619716ca | [
"MIT"
] | 37 | 2015-04-30T16:31:27.000Z | 2020-11-30T18:58:55.000Z | src/rna/steps/coverage.py | nellore/rail | 8827a7a0b1195c04cddf6d7a23ec7885619716ca | [
"MIT"
] | 17 | 2015-04-12T00:02:57.000Z | 2021-12-01T05:16:15.000Z | #!/usr/bin/env python
"""
Rail-RNA-coverage
Follows Rail-RNA-coverage_pre
Reduce step in MapReduce pipelines that outputs normalization factors for
sample coverages, both for all primary alignments and for "uniquely mapping"
reads. Each normalization factor is computed from the histogram of
base coverage (horizontal axis: number of exonic chunks covering given base;
vertical axis: number of bases covered) as the (k*100)-th coverage
percentile, where k is input by the user via the command-line parameter
--percentile. bigwig files encoding coverage per sample are also written to a
specified destination, local or remote. Rail-RNA-coverage_post merely collects
the normalization factors and writes them to a file.
Input (read from stdin)
----------------------------
Tab-delimited input tuple columns:
1. Sample index OR mean[.RNAME] OR median[.RNAME]
2. Number string representing reference name (RNAME in SAM format; see
BowtieIndexReference class in bowtie_index for conversion information)
3. Position
4. Coverage counting all primary alignments (that is, the number of called ECs
in the sample overlapping the position) (mean or median of sample coverages
normalized by --library-size if field 1 specifies)
5. Coverage counting only "uniquely mapping" reads; here, unique mappings are
defined according to the criteria implied in --tie-margin (mean or median
of sample coverages normalized by --library-size if field 1 specifies)
Input is partitioned first by sample index, then sorted by fields 2-3.
Hadoop output (written to stdout)
----------------------------
Tab-delimited output tuple columns (only 1 per sample):
1. '4' to denote the output is a normalization factor
2. Sample index
3. '\x1c'
4. '\x1c'
5. '\x1c'
6. Normalization factor for coverage vector counting all primary alignments
7. Normalization factor for coverage vector counting only "uniquely mapping"
reads
Other output (written to directory specified by command-line parameter --out)
----------------------------
Two bigWig files per sample: one encodes coverage of genome by exonic parts of
primary alignments, and the other encodes coverage of genome by uniquely
mapping reads
"""
import os
import sys
import site
import argparse
import subprocess
import threading
if '--test' in sys.argv:
print("No unit tests")
#unittest.main(argv=[sys.argv[0]])
sys.exit(0)
base_path = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))
)
)
utils_path = os.path.join(base_path, 'rna', 'utils')
site.addsitedir(utils_path)
site.addsitedir(base_path)
import manifest
import bowtie
import bowtie_index
import filemover
import itertools
from collections import defaultdict
from dooplicity.tools import xstream, register_cleanup, make_temp_dir
from dooplicity.counters import Counter
from dooplicity.ansibles import Url
import tempdel
from re import search
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--percentile', metavar='FRACTION', type=float, required=False,
default=0.75,
help='For a given sample, the per-position percentile to extract as the '
'normalization factor')
parser.add_argument(
'--out', metavar='URL', type=str, required=False, default='.',
help='URL to which bigwig coverage output should be written. '
'DEFAULT IS CURRENT WORKING DIRECTORY, NOT STDOUT')
parser.add_argument('--manifest', type=str, required=False,
default='manifest',
help='Path to manifest file')
parser.add_argument(
'--bigwig-exe', type=str, required=False, default='bedGraphToBigWig',
help='Location of the Kent Tools bedGraphToBigWig executable')
parser.add_argument('--bigwig-basename', type=str, required=False, default='',
help='The basename (excluding path) of all bigwig output. Basename is'
'followed by ".[sample label].bw"; if basename is an empty string, '
'a sample\'s bigwig filename is simply [sample label].bw')
parser.add_argument(
'--keep-alive', action='store_const', const=True, default=False,
help='Prints reporter:status:alive messages to stderr to keep EMR '
'task alive')
parser.add_argument(
'--verbose', action='store_const', const=True, default=False,
help='Print out extra debugging statements'
)
counter = Counter('coverage')
register_cleanup(counter.flush)
filemover.add_args(parser)
bowtie.add_args(parser)
tempdel.add_args(parser)
args = parser.parse_args()
# Start keep_alive thread immediately
if args.keep_alive:
from dooplicity.tools import KeepAlive
keep_alive_thread = KeepAlive(sys.stderr)
keep_alive_thread.start()
if args.keep_alive:
class BedTobigwigThread(threading.Thread):
""" Wrapper class for bedtobigwig that permits polling for completion.
"""
def __init__(self, command_list):
super(BedTobigwigThread, self).__init__()
self.command_list = command_list
self.bedtobigwig_process = None
def run(self):
self.bedtobigwig_process = subprocess.Popen(self.command_list,
stdout=sys.stderr,
stderr=sys.stderr).wait()
def percentile(histogram, percentile=0.75):
""" Given histogram, computes desired percentile.
histogram: a dictionary whose keys are integers
and whose values are frequencies.
percentile: a value k on [0, 100] specifying that the (k*100)-th
percentile should be returned
Return value: Integer key closest to desired percentile.
"""
covered = 0
normalization = sum(histogram.values())
for key, frequency in sorted(histogram.items(), reverse=True):
covered += frequency
assert covered <= normalization
if covered > ((1.0 - percentile) * normalization):
return key
raise RuntimeError('Percentile computation should have terminated '
'mid-loop.')
import time
start_time = time.time()
temp_dir_path = make_temp_dir(tempdel.silentexpandvars(args.scratch))
# Clean up after script
register_cleanup(tempdel.remove_temporary_directories, [temp_dir_path])
bed_filename = os.path.join(temp_dir_path, 'temp.bed')
unique_bed_filename = os.path.join(temp_dir_path, 'temp.unique.bed')
if args.verbose:
print >>sys.stderr, 'Writing to temporary beds %s and %s.' % (
bed_filename,
unique_bed_filename
)
output_filename, output_url = None, None
'''Make RNAME lengths available from reference FASTA so SAM header can be
formed; reference_index.rname_lengths[RNAME] is the length of RNAME.'''
reference_index = bowtie_index.BowtieIndexReference(
os.path.expandvars(args.bowtie_idx)
)
# For mapping sample indices back to original sample labels
manifest_object = manifest.LabelsAndIndices(
os.path.expandvars(args.manifest)
)
# Create file with chromosome sizes for bedTobigwig
sizes_filename = os.path.join(temp_dir_path, 'chrom.sizes')
if args.verbose:
print >>sys.stderr, 'Sizes file: %s .' % sizes_filename
with open(sizes_filename, 'w') as sizes_stream:
for rname in reference_index.rname_lengths:
print >>sizes_stream, '%s %d' % (rname,
reference_index.rname_lengths[rname])
input_line_count, output_line_count = 0, 0
output_url = Url(args.out)
if output_url.is_local:
# Set up destination directory
try: os.makedirs(output_url.to_url())
except: pass
mover = filemover.FileMover(args=args)
track_line = ('track type=bedGraph name="{name}" '
'description="{description}" visibility=full '
'color=227,29,118 altColor=0,179,220 priority=400')
for (sample_index,), xpartition in xstream(sys.stdin, 1):
counter.add('partitions')
real_sample = True
try:
sample_label = manifest_object.index_to_label[sample_index]
except KeyError:
# It's a nonref track, a mean, or a median
real_sample = False
if search('\.[ATCGN]', sample_index):
try:
sample_label = (
manifest_object.index_to_label[sample_index[:-2]]
+ sample_index[-2:]
)
except KeyError:
raise RuntimeError('Sample label index "%s" was not recorded.'
% sample_index)
elif 'mean' in sample_index or 'median' in sample_index:
sample_label = sample_index
else:
raise RuntimeError('Sample label index "%s" was not recorded.'
% sample_index)
'''Dictionary for which each key is a coverage (i.e., number of ECs
covering a given base). Its corresponding value is the number of bases with
that coverage.'''
coverage_histogram, unique_coverage_histogram = (
defaultdict(int),
defaultdict(int)
)
with open(bed_filename, 'w') as bed_stream, \
open(unique_bed_filename, 'w') as unique_bed_stream:
print >>bed_stream, track_line.format(name=sample_label,
description=(
'base coverage by '
'primary alignments'
))
print >>unique_bed_stream, track_line.format(name=sample_label,
description=(
'base coverage by '
'uniquely mapping '
'reads')
)
for rname, coverages in itertools.groupby(xpartition,
key=lambda val: val[0]):
try:
rname = reference_index.l_string_to_rname[rname]
except KeyError:
raise RuntimeError(
'RNAME number string "%s" not in Bowtie index.'
% rname
)
(last_pos, last_coverage,
last_unique_pos, last_unique_coverage) = 0, 0, 0, 0
for _, pos, coverage, unique_coverage in coverages:
# BED is zero-indexed, while input is 1-indexed
pos, coverage, unique_coverage = (
int(pos) - 1, float(coverage), float(unique_coverage)
)
input_line_count += 1
if coverage != last_coverage:
counter.add('bed_lines')
print >>bed_stream, '%s\t%d\t%d\t%08f' % (rname,
last_pos, pos, last_coverage)
if last_coverage != 0:
# Only care about nonzero-coverage regions
coverage_histogram[last_coverage] += pos - last_pos
last_pos, last_coverage = pos, coverage
if unique_coverage != last_unique_coverage:
counter.add('unique_bed_lines')
print >>unique_bed_stream, '%s\t%d\t%d\t%08f' % (rname,
last_unique_pos, pos, last_unique_coverage)
if last_unique_coverage != 0:
# Only care about nonzero-coverage regions
unique_coverage_histogram[last_unique_coverage] \
+= pos - last_unique_pos
last_unique_pos, last_unique_coverage = (
pos,
unique_coverage
)
if last_pos != reference_index.rname_lengths[rname]:
# Print coverage up to end of strand
counter.add('bed_lines')
print >>bed_stream, '%s\t%d\t%d\t%08f' % (
rname,
last_pos,
reference_index.rname_lengths[rname],
coverage
)
if last_unique_pos != reference_index.rname_lengths[rname]:
# Print unique coverage up to end of strand
counter.add('unique_bed_lines')
print >>unique_bed_stream, '%s\t%d\t%d\t%08f' % (
rname,
last_unique_pos,
reference_index.rname_lengths[rname],
unique_coverage
)
'''Output normalization factors iff working with real sample'''
if real_sample:
auc = sum(coverage_value * coverage_histogram[coverage_value]
for coverage_value in coverage_histogram)
unique_auc = sum(coverage_value
* unique_coverage_histogram[coverage_value]
for coverage_value in unique_coverage_histogram)
print '4\t%s\t\x1c\t\x1c\t\x1c\t%d\t%d\t%d\t%d' % (sample_index,
percentile(coverage_histogram,
args.percentile),
percentile(
unique_coverage_histogram,
args.percentile),
auc,
unique_auc
)
output_line_count += 1
# Write bigwigs
assert os.path.exists(sizes_filename)
bigwig_filenames = [((args.bigwig_basename + '.')
if args.bigwig_basename != '' else '')
+ sample_label]*2
bigwig_filenames[0] += '.bw'
bigwig_filenames[1] += '.unique.bw'
if output_url.is_local:
# Write directly to local destination
bigwig_file_paths = [os.path.join(args.out, bigwig_filename)
for bigwig_filename in bigwig_filenames]
else:
# Write to temporary directory, and later upload to URL
bigwig_file_paths = [os.path.join(temp_dir_path, bigwig_filename)
for bigwig_filename in bigwig_filenames]
bigwig_commands = [[args.bigwig_exe, bed_filename, sizes_filename,
bigwig_file_paths[0]],
[args.bigwig_exe, unique_bed_filename, sizes_filename,
bigwig_file_paths[1]]]
for i, bigwig_command in enumerate(bigwig_commands):
if args.verbose:
print >>sys.stderr, 'Writing bigwig with command %s .' \
% ' '.join(bigwig_command)
counter.add('call_bedgraphtobigwig')
bedtobigwig_process = subprocess.Popen(
bigwig_command,
stderr=sys.stderr,
stdout=sys.stderr,
bufsize=-1
)
bedtobigwig_process.wait()
if bedtobigwig_process.returncode:
raise RuntimeError('bedgraphtobigwig process failed w/ '
'exitlevel %d.'
% bedtobigwig_process.returncode)
if args.verbose:
print >>sys.stderr, ('bedTobigwig command %s succeeded .'
% ' '.join(bigwig_command))
if not output_url.is_local:
# bigwig must be uploaded to URL and deleted
counter.add('files_moved')
mover.put(bigwig_file_paths[i],
output_url.plus(bigwig_filenames[i]))
os.remove(bigwig_file_paths[i])
print >>sys.stderr, 'DONE with coverage.py; in/out=%d/%d; time=%0.3f s' \
% (input_line_count, output_line_count,
time.time() - start_time)
| 44.6 | 79 | 0.589287 |
6143f4240406325e29bfd0cc43d0ecf55edd6f5e | 16,349 | py | Python | spyder/plugins/editor/panels/scrollflag.py | keepiru/spyder | b9d4bf1719fcd0d3c209163e82a67abad8129ffc | [
"MIT"
] | null | null | null | spyder/plugins/editor/panels/scrollflag.py | keepiru/spyder | b9d4bf1719fcd0d3c209163e82a67abad8129ffc | [
"MIT"
] | null | null | null | spyder/plugins/editor/panels/scrollflag.py | keepiru/spyder | b9d4bf1719fcd0d3c209163e82a67abad8129ffc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
This module contains the Scroll Flag panel
"""
# Standard library imports
from __future__ import division
from math import ceil
# Third party imports
from qtpy.QtCore import QSize, Qt, QTimer
from qtpy.QtGui import QPainter, QColor, QCursor
from qtpy.QtWidgets import (QStyle, QStyleOptionSlider, QApplication)
# Local imports
from spyder.api.panel import Panel
from spyder.plugins.completion.languageserver import DiagnosticSeverity
REFRESH_RATE = 1000
class ScrollFlagArea(Panel):
"""Source code editor's scroll flag area"""
WIDTH = 12
FLAGS_DX = 4
FLAGS_DY = 2
def __init__(self, editor):
Panel.__init__(self, editor)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.scrollable = True
self.setMouseTracking(True)
# Define some attributes to be used for unit testing.
self._unit_testing = False
self._range_indicator_is_visible = False
self._alt_key_is_down = False
# Define permanent Qt colors that are needed for painting the flags
# and the slider range.
self._facecolors = {
'warning': QColor(editor.warning_color),
'error': QColor(editor.error_color),
'todo': QColor(editor.todo_color),
'breakpoint': QColor(editor.breakpoint_color),
'occurrence': QColor(editor.occurrence_color),
'found_results': QColor(editor.found_results_color)
}
self._edgecolors = {key: color.darker(120) for
key, color in self._facecolors.items()}
self._slider_range_color = QColor(Qt.gray)
self._slider_range_color.setAlphaF(.85)
self._slider_range_brush = QColor(Qt.gray)
self._slider_range_brush.setAlphaF(.5)
editor.sig_focus_changed.connect(self.update)
editor.sig_key_pressed.connect(self.keyPressEvent)
editor.sig_key_released.connect(self.keyReleaseEvent)
editor.sig_alt_left_mouse_pressed.connect(self.mousePressEvent)
editor.sig_alt_mouse_moved.connect(self.mouseMoveEvent)
editor.sig_leave_out.connect(self.update)
editor.sig_flags_changed.connect(self.delayed_update_flags)
editor.sig_theme_colors_changed.connect(self.update_flag_colors)
self._update_list_timer = QTimer(self)
self._update_list_timer.setSingleShot(True)
self._update_list_timer.timeout.connect(self.update_flags)
self._todo_list = []
self._code_analysis_list = []
self._breakpoint_list = []
@property
def slider(self):
"""This property holds whether the vertical scrollbar is visible."""
return self.editor.verticalScrollBar().isVisible()
def sizeHint(self):
"""Override Qt method"""
return QSize(self.WIDTH, 0)
def update_flag_colors(self, color_dict):
"""
Update the permanent Qt colors that are used for painting the flags
and the slider range with the new colors defined in the given dict.
"""
for name, color in color_dict.items():
self._facecolors[name] = QColor(color)
self._edgecolors[name] = self._facecolors[name].darker(120)
def delayed_update_flags(self):
"""
This function is called every time a flag is changed.
There is no need of updating the flags thousands of time by second,
as it is quite resources-heavy. This limits the calls to REFRESH_RATE.
"""
if self._update_list_timer.isActive():
return
self._update_list_timer.start(REFRESH_RATE)
def update_flags(self):
"""
Update flags list.
This parses the entire file, which can take a lot of time for
large files. Save all the flags in lists for painting during
paint events.
"""
self._todo_list = []
self._code_analysis_list = []
self._breakpoint_list = []
editor = self.editor
block = editor.document().firstBlock()
while block.isValid():
# Parse all lines in the file looking for something to flag.
data = block.userData()
if data:
if data.code_analysis:
self._code_analysis_list.append((block, data))
if data.todo:
self._todo_list.append((block, data))
if data.breakpoint:
self._breakpoint_list.append((block, data))
block = block.next()
self.update()
def paintEvent(self, event):
"""
Override Qt method.
Painting the scroll flag area
There is two cases:
- The scroll bar is moving, in which case paint all flags.
- The scroll bar is not moving, only paint flags corresponding
to visible lines.
"""
# The area in which the slider handle of the scrollbar may move.
groove_rect = self.get_scrollbar_groove_rect()
# The scrollbar's scale factor ratio between pixel span height and
# value span height
scale_factor = groove_rect.height() / self.get_scrollbar_value_height()
# The vertical offset of the scroll flag area relative to the
# top of the text editor.
offset = groove_rect.y()
# Note that we calculate the pixel metrics required to draw the flags
# here instead of using the convenience methods of the ScrollFlagArea
# for performance reason.
rect_x = ceil(self.FLAGS_DX / 2)
rect_w = self.WIDTH - self.FLAGS_DX
rect_h = self.FLAGS_DY
# Fill the whole painting area
painter = QPainter(self)
painter.fillRect(event.rect(), self.editor.sideareas_color)
editor = self.editor
# Check if the slider is visible
paint_local = not bool(self.slider)
# Define compute_flag_ypos to position the flags:
if not paint_local:
# Paint flags for the entire document
last_line = editor.document().lastBlock().firstLineNumber()
# The 0.5 offset is used to align the flags with the center of
# their corresponding text edit block before scaling.
first_y_pos = self.value_to_position(
0.5, scale_factor, offset) - self.FLAGS_DY / 2
last_y_pos = self.value_to_position(
last_line + 0.5, scale_factor, offset) - self.FLAGS_DY / 2
def compute_flag_ypos(block):
line_number = block.firstLineNumber()
frac = line_number / last_line
pos = first_y_pos + frac * (last_y_pos - first_y_pos)
return ceil(pos)
else:
# Only paint flags for visible lines
visible_lines = [val[1] for val in editor.visible_blocks]
if not visible_lines:
# Nothing to do
return
min_line = min(visible_lines)
max_line = max(visible_lines)
def compute_flag_ypos(block):
# When the vertical scrollbar is not visible, the flags are
# vertically aligned with the center of their corresponding
# text block with no scaling.
top = editor.blockBoundingGeometry(block).translated(
editor.contentOffset()).top()
bottom = top + editor.blockBoundingRect(block).height()
middle = (top + bottom)/2
return ceil(middle-self.FLAGS_DY/2)
# Paint all the code analysis flags
for block, data in self._code_analysis_list:
if paint_local and not (
min_line <= block.blockNumber() + 1 <= max_line):
# No need to paint flags outside of the window
continue
# Paint the warnings
for source, code, severity, message in data.code_analysis:
error = severity == DiagnosticSeverity.ERROR
if error:
painter.setBrush(self._facecolors['error'])
painter.setPen(self._edgecolors['error'])
break
else:
painter.setBrush(self._facecolors['warning'])
painter.setPen(self._edgecolors['warning'])
rect_y = compute_flag_ypos(block)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint all the todo flags
for block, data in self._todo_list:
if paint_local and not (
min_line <= block.blockNumber() + 1 <= max_line):
continue
# Paint the todos
rect_y = compute_flag_ypos(block)
painter.setBrush(self._facecolors['todo'])
painter.setPen(self._edgecolors['todo'])
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint all the breakpoints flags
for block, data in self._breakpoint_list:
if paint_local and not (
min_line <= block.blockNumber() + 1 <= max_line):
continue
# Paint the breakpoints
rect_y = compute_flag_ypos(block)
painter.setBrush(self._facecolors['breakpoint'])
painter.setPen(self._edgecolors['breakpoint'])
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the occurrences of selected word flags
if editor.occurrences:
painter.setBrush(self._facecolors['occurrence'])
painter.setPen(self._edgecolors['occurrence'])
for line_number in editor.occurrences:
if paint_local and not (
min_line <= line_number + 1 <= max_line):
continue
block = editor.document().findBlockByNumber(line_number)
rect_y = compute_flag_ypos(block)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the found results flags
if editor.found_results:
painter.setBrush(self._facecolors['found_results'])
painter.setPen(self._edgecolors['found_results'])
for line_number in editor.found_results:
if paint_local and not (
min_line <= line_number + 1 <= max_line):
continue
block = editor.document().findBlockByNumber(line_number)
rect_y = compute_flag_ypos(block)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the slider range
if not self._unit_testing:
alt = QApplication.queryKeyboardModifiers() & Qt.AltModifier
else:
alt = self._alt_key_is_down
if self.slider:
cursor_pos = self.mapFromGlobal(QCursor().pos())
is_over_self = self.rect().contains(cursor_pos)
is_over_editor = editor.rect().contains(
editor.mapFromGlobal(QCursor().pos()))
# We use QRect.contains instead of QWidget.underMouse method to
# determined if the cursor is over the editor or the flag scrollbar
# because the later gives a wrong result when a mouse button
# is pressed.
if is_over_self or (alt and is_over_editor):
painter.setPen(self._slider_range_color)
painter.setBrush(self._slider_range_brush)
x, y, width, height = self.make_slider_range(
cursor_pos, scale_factor, offset, groove_rect)
painter.drawRect(x, y, width, height)
self._range_indicator_is_visible = True
else:
self._range_indicator_is_visible = False
def enterEvent(self, event):
"""Override Qt method"""
self.update()
def leaveEvent(self, event):
"""Override Qt method"""
self.update()
def mouseMoveEvent(self, event):
"""Override Qt method"""
self.update()
def mousePressEvent(self, event):
"""Override Qt method"""
if self.slider and event.button() == Qt.LeftButton:
vsb = self.editor.verticalScrollBar()
value = self.position_to_value(event.pos().y())
vsb.setValue(value-vsb.pageStep()/2)
def keyReleaseEvent(self, event):
"""Override Qt method."""
if event.key() == Qt.Key_Alt:
self._alt_key_is_down = False
self.update()
def keyPressEvent(self, event):
"""Override Qt method"""
if event.key() == Qt.Key_Alt:
self._alt_key_is_down = True
self.update()
def get_vertical_offset(self):
"""
Return the vertical offset of the scroll flag area relative to the
top of the text editor.
"""
groove_rect = self.get_scrollbar_groove_rect()
return groove_rect.y()
def get_slider_min_height(self):
"""
Return the minimum height of the slider range based on that set for
the scroll bar's slider.
"""
return QApplication.instance().style().pixelMetric(
QStyle.PM_ScrollBarSliderMin)
def get_scrollbar_groove_rect(self):
"""Return the area in which the slider handle may move."""
vsb = self.editor.verticalScrollBar()
style = QApplication.instance().style()
opt = QStyleOptionSlider()
vsb.initStyleOption(opt)
# Get the area in which the slider handle may move.
groove_rect = style.subControlRect(
QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarGroove, self)
return groove_rect
def get_scrollbar_position_height(self):
"""Return the pixel span height of the scrollbar area in which
the slider handle may move"""
groove_rect = self.get_scrollbar_groove_rect()
return float(groove_rect.height())
def get_scrollbar_value_height(self):
"""Return the value span height of the scrollbar"""
vsb = self.editor.verticalScrollBar()
return vsb.maximum() - vsb.minimum() + vsb.pageStep()
def get_scale_factor(self):
"""Return scrollbar's scale factor:
ratio between pixel span height and value span height"""
return (self.get_scrollbar_position_height() /
self.get_scrollbar_value_height())
def value_to_position(self, y, scale_factor, offset):
"""Convert value to position in pixels"""
vsb = self.editor.verticalScrollBar()
return (y - vsb.minimum()) * scale_factor + offset
def position_to_value(self, y):
"""Convert position in pixels to value"""
vsb = self.editor.verticalScrollBar()
offset = self.get_vertical_offset()
return vsb.minimum() + max([0, (y - offset) / self.get_scale_factor()])
def make_slider_range(self, cursor_pos, scale_factor, offset, groove_rect):
"""
Return the slider x and y positions and the slider width and height.
"""
# The slider range indicator position follows the mouse vertical
# position while its height corresponds to the part of the file that
# is currently visible on screen.
vsb = self.editor.verticalScrollBar()
slider_height = self.value_to_position(
vsb.pageStep(), scale_factor, offset) - offset
slider_height = max(slider_height, self.get_slider_min_height())
# Calcul the minimum and maximum y-value to constraint the slider
# range indicator position to the height span of the scrollbar area
# where the slider may move.
min_ypos = offset
max_ypos = groove_rect.height() + offset - slider_height
# Determine the bounded y-position of the slider rect.
slider_y = max(min_ypos, min(max_ypos,
ceil(cursor_pos.y()-slider_height/2)))
return 1, slider_y, self.WIDTH - 2, slider_height
def wheelEvent(self, event):
"""Override Qt method"""
self.editor.wheelEvent(event)
def set_enabled(self, state):
"""Toggle scroll flag area visibility"""
self.enabled = state
self.setVisible(state)
| 38.92619 | 79 | 0.616735 |
1537ab9c6a637e93db96a660f893de4dd2dd8ec1 | 152 | py | Python | hackerrank/itertools/product.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | hackerrank/itertools/product.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | hackerrank/itertools/product.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | from itertools import product
if __name__ == '__main__':
A = map(int, input().split())
B = map(int, input().split())
print(*product(A, B)) | 21.714286 | 33 | 0.605263 |
2381257cc21168f167f669c9af4f2c3997dd554a | 2,793 | py | Python | asphalt/wamp/component.py | Asphalt-framework/asphalt-wamp | 2eb3f36cf537af8caedac080b34e661b24445923 | [
"Apache-2.0"
] | 10 | 2015-11-27T17:48:23.000Z | 2017-10-30T04:28:36.000Z | asphalt/wamp/component.py | Asphalt-framework/asphalt-wamp | 2eb3f36cf537af8caedac080b34e661b24445923 | [
"Apache-2.0"
] | null | null | null | asphalt/wamp/component.py | Asphalt-framework/asphalt-wamp | 2eb3f36cf537af8caedac080b34e661b24445923 | [
"Apache-2.0"
] | 2 | 2017-01-31T07:39:43.000Z | 2017-03-02T05:56:04.000Z | import logging
from typing import Any, Dict, Tuple, Optional, List # noqa: F401
import txaio
from asphalt.core import Component, Context, merge_config, context_teardown
from async_generator import yield_
from typeguard import check_argument_types
from asphalt.exceptions import ExtrasProvider
from asphalt.wamp.client import WAMPClient
from asphalt.wamp.extras_providers import WAMPExtrasProvider
__all__ = ('WAMPComponent',)
logger = logging.getLogger(__name__)
class WAMPComponent(Component):
"""Creates one or more :class:`~asphalt.wamp.client.WAMPClient` resources."""
def __init__(self, clients: Dict[str, Dict[str, Any]] = None, **default_client_args) -> None:
"""
If the ``clients`` argument is omitted or empty, a default client with the context
attribute ``wamp`` will be created.
If ``clients`` is defined, any keyword arguments to the component become default options
for the clients.
If you wish to change the context attribute of a WAMP client, use the ``context_attr``
argument.
:param clients: a dictionary of resource name ⭢ :class:`.WAMPClient` constructor arguments
:param default_client_args: :class:`.WAMPClient` base options for all clients or arguments
for the default client if ``clients`` is not specified
"""
assert check_argument_types()
if not clients:
default_client_args.setdefault('context_attr', 'wamp')
clients = {'default': default_client_args}
self.clients = [] # type: List[Tuple[str, Optional[str], WAMPClient]]
for resource_name, config in clients.items():
config = merge_config(default_client_args, config)
context_attr = config.pop('context_attr', resource_name)
client = WAMPClient(**config)
self.clients.append((resource_name, context_attr, client))
@context_teardown
async def start(self, ctx: Context):
# Autobahn uses txaio to bridge the API gap between asyncio and Twisted so we need to set
# it up for asyncio here
txaio.use_asyncio()
txaio.config.loop = ctx.loop
ctx.add_resource(WAMPExtrasProvider(), 'wamp', types=[ExtrasProvider])
for resource_name, context_attr, client in self.clients:
await client.start(ctx)
ctx.add_resource(client, resource_name, context_attr)
logger.info('Configured WAMP client (%s / ctx.%s; host=%s; port=%d; realm=%s)',
resource_name, context_attr, client.host, client.port, client.realm)
await yield_()
for resource_name, context_attr, client in self.clients:
await client.stop()
logger.info('Shut down WAMP client (%s)', resource_name)
| 40.478261 | 98 | 0.67884 |
492d661208b3c4ea5ecda9849f0d2d518b6fd8ef | 571 | py | Python | exercises/fundamentals/test/test_mammal.py | bmazey/summer2020 | 0e943c356677f1d0ec55da5fe4b30a54b37507cd | [
"MIT"
] | null | null | null | exercises/fundamentals/test/test_mammal.py | bmazey/summer2020 | 0e943c356677f1d0ec55da5fe4b30a54b37507cd | [
"MIT"
] | null | null | null | exercises/fundamentals/test/test_mammal.py | bmazey/summer2020 | 0e943c356677f1d0ec55da5fe4b30a54b37507cd | [
"MIT"
] | null | null | null | from exercises.fundamentals.src.zoo.mammal import Mammal
def test_is_warm_blooded():
microbat = Mammal('microbat', True, True)
mandrill = Mammal('mandrill', True, True)
culpeo = Mammal('culpeo', True, True)
assert microbat.is_warm_blooded() == 'microbat is warm-blooded!'
assert mandrill.is_warm_blooded() == 'mandrill is warm-blooded!'
assert culpeo.is_warm_blooded() == 'culpeo is warm-blooded!'
def test_have_sweat_glands():
dolphin = Mammal('dolphin', True, True)
assert dolphin.have_sweat_glands() == 'dolphin has sweat glands!'
| 33.588235 | 69 | 0.714536 |
7c45e16ad8a65f0c6e76e5c6e67411842f48b6a8 | 218 | py | Python | __init__.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | 2 | 2019-05-01T00:34:28.000Z | 2021-02-10T09:18:10.000Z | __init__.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | __init__.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | __all__ = ['tbavg','extract_dynspec','__plot__','custom_colormap']
from numpy import load
def load_dict(savefile):
# load a dictionary that has been saved using numpy.save
return load(savefile).reshape(1)[0]
| 27.25 | 66 | 0.738532 |
d627f8b67d4a63be93d08b15e84ab0ace4d23b3c | 1,940 | py | Python | data/p4VQE/R4/benchmark/startCirq793.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startCirq793.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startCirq793.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=14
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.Y.on(input_qubit[3])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=7
c.append(cirq.Y.on(input_qubit[3])) # number=8
c.append(cirq.Y.on(input_qubit[3])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.X.on(input_qubit[1])) # number=12
c.append(cirq.X.on(input_qubit[1])) # number=13
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq793.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 29.393939 | 77 | 0.692784 |
3cab08629b30111114e01484ab49b594bbdb9dd0 | 3,948 | py | Python | apt_repoman/connection.py | memory/repoman | 4c5cdfba85afcab5a1219fa5629abc457de27ed5 | [
"Apache-2.0"
] | 1 | 2017-07-01T21:46:40.000Z | 2017-07-01T21:46:40.000Z | apt_repoman/connection.py | memory/repoman | 4c5cdfba85afcab5a1219fa5629abc457de27ed5 | [
"Apache-2.0"
] | null | null | null | apt_repoman/connection.py | memory/repoman | 4c5cdfba85afcab5a1219fa5629abc457de27ed5 | [
"Apache-2.0"
] | 6 | 2017-07-13T21:41:14.000Z | 2020-08-07T19:40:25.000Z |
# stdlib imports
import logging
import time
# pypi imports
from boto3 import Session
LOG = logging.getLogger(__name__)
class Connection(object):
def __init__(self, role_arn='', profile_name='', region=None):
self._log = LOG or logging.getLogger(__name__)
self.role_arn = role_arn
self.profile_name = profile_name
self.region = region
self._s3 = None
self._sdb = None
self._sts = None
self._iam = None
self._sns = None
self._session = None
self._caller_id = None
@property
def session(self):
'''Set our object's self._session attribute to a boto3
session object. If profile_name is set, use it to pull a
specific credentials profile from ~/.aws/credentials,
otherwise use the default credentials path.
If role_arn is set, use the first session object to
assume the role, and then overwrite self._session with
a new session object created using the role credentials.'''
if self._session is None:
self._session = self.get_session()
return self._session
@property
def s3(self):
if self._s3 is None:
self._s3 = self.get_resource('s3')
return self._s3
@property
def sdb(self):
if self._sdb is None:
self._sdb = self.get_client('sdb')
return self._sdb
@property
def sts(self):
if self._sts is None:
self._sts = self.get_client('sts')
return self._sts
@property
def iam(self):
if self._iam is None:
self._iam = self.get_client('iam')
return self._iam
@property
def sns(self):
if self._sns is None:
self._sns = self.get_client('sns')
return self._sns
@property
def caller_id(self):
if self._caller_id is None:
self._caller_id = self.sts.get_caller_identity()['Arn']
return self._caller_id
def get_session(self):
if self.profile_name:
self._log.info(
'using AWS credential profile %s', self.profile_name)
try:
kwargs = {'profile_name': self.profile_name}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
except Exception as ex:
self._log.fatal(
'Could not connect to AWS using profile %s: %s',
self.profile_name, ex)
raise
else:
self._log.debug(
'getting an AWS session with the default provider')
kwargs = {}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
if self.role_arn:
self._log.info(
'attempting to assume STS self.role %s', self.role_arn)
try:
self.role_creds = session.client('sts').assume_role(
RoleArn=self.role_arn,
RoleSessionName='repoman-%s' % time.time(),
DurationSeconds=3600)['Credentials']
except Exception as ex:
self._log.fatal(
'Could not assume self.role %s: %s',
self.role_arn, ex)
raise
kwargs = {
'aws_access_key_id': self.role_creds['AccessKeyId'],
'aws_secret_access_key': self.role_creds['SecretAccessKey'],
'aws_session_token': self.role_creds['SessionToken']}
if self.region:
kwargs['region_name'] = self.region
session = Session(**kwargs)
return session
def get_client(self, service_name):
return self.session.client(service_name)
def get_resource(self, service_name):
return self.session.resource(service_name)
| 31.584 | 76 | 0.563323 |
502d57dff9f935a1fde08efc0fe73238061be9b9 | 5,046 | py | Python | src/tests/ftest/pool/multiple_creates_test.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 1 | 2019-11-28T07:26:38.000Z | 2019-11-28T07:26:38.000Z | src/tests/ftest/pool/multiple_creates_test.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 52 | 2019-12-04T05:47:10.000Z | 2020-06-09T03:26:12.000Z | src/tests/ftest/pool/multiple_creates_test.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 8 | 2019-12-04T08:26:00.000Z | 2020-06-09T07:40:11.000Z | #!/usr/bin/python
'''
(C) Copyright 2018-2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
from apricot import TestWithServers
from command_utils import CommandFailure
import check_for_pool
class MultipleCreatesTest(TestWithServers):
"""
Tests DAOS pool creation, calling it repeatedly one after another
:avocado: recursive
"""
def create_pool(self):
"""Create and verify a pool."""
try:
pool = self.get_pool(connect=False)
except CommandFailure as error:
self.fail("Expecting to pass but test has failed with error '{}' \
.\n".format(error))
return pool
def verify_pool(self, host, uuid):
"""Verify the pool.
Args:
host (str): Server host name
uuid (str): Pool UUID to verify
"""
if check_for_pool.check_for_pool(host, uuid.lower()):
self.fail("Pool {0} not found on host {1}.".format(uuid, host))
def test_create_one(self):
"""
Test issuing a single pool create commands at once.
:avocado: tags=all,pool,smoke,pr,small,createone
"""
self.pool = self.create_pool()
print("uuid is {0}\n".format(self.pool.uuid))
host = self.hostlist_servers[0]
self.verify_pool(host, self.pool.uuid)
def test_create_two(self):
"""
Test issuing multiple pool create commands at once.
:avocado: tags=all,pool,smoke,pr,small,createtwo
"""
self.pool = [self.create_pool() for _ in range(2)]
for pool in self.pool:
print("uuid is {0}\n".format(pool.uuid))
for host in self.hostlist_servers:
for pool in self.pool:
self.verify_pool(host, pool.uuid)
def test_create_three(self):
"""
Test issuing multiple pool create commands at once.
:avocado: tags=all,pool,pr,small,createthree
"""
self.pool = [self.create_pool() for _ in range(3)]
for pool in self.pool:
print("uuid is {0}\n".format(pool.uuid))
for host in self.hostlist_servers:
for pool in self.pool:
self.verify_pool(host, pool.uuid)
# COMMENTED OUT because test environments don't always have enough
# memory to run this
#def test_create_five(self):
# """
# Test issuing five pool create commands at once.
# """
#
# Accumulate a list of pass/fail indicators representing what is
# expected for each parameter then "and" them to determine the
# expected result of the test
# expected_for_param = []
#
# modelist = self.params.get("mode",'/run/tests/modes/*')
# mode = modelist[0]
# expected_for_param.append(modelist[1])
#
# setidlist = self.params.get("setname",'/run/tests/setnames/*')
# setid = setidlist[0]
# expected_for_param.append(setidlist[1])
#
# uid = os.geteuid()
# gid = os.getegid()
#
# # if any parameter results in failure then the test should FAIL
# expected_result = 'PASS'
# for result in expected_for_param:
# if result == 'FAIL':
# expected_result = 'FAIL'
# break
# try:
# cmd = ('../../install/bin/orterun -np 1 '
# '--ompi-server file:{0} ./pool/wrap'
# 'per/SimplePoolTests {1} {2} {3} {4} {5}'.format(
# urifile, "create", mode, uid, gid, setid))
# process.system(cmd)
# process.system(cmd)
# process.system(cmd)
# process.system(cmd)
# process.system(cmd)
#
# if expected_result == 'FAIL':
# self.fail("Expected to fail but passed.\n")
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())
# if expected_result == 'PASS':
# self.fail("Expecting to pass but test has failed.\n")
| 32.980392 | 79 | 0.601467 |
539929170c60458d45328b54d37d8713afe38ab8 | 13,888 | py | Python | value_iteration/cartpole.py | milutter/value_iteration | c2e68af2d8dbc37c4f0d396e9c620c06c3dd0cf6 | [
"MIT"
] | 2 | 2021-11-10T12:25:28.000Z | 2021-11-25T08:17:48.000Z | value_iteration/cartpole.py | milutter/value_iteration | c2e68af2d8dbc37c4f0d396e9c620c06c3dd0cf6 | [
"MIT"
] | null | null | null | value_iteration/cartpole.py | milutter/value_iteration | c2e68af2d8dbc37c4f0d396e9c620c06c3dd0cf6 | [
"MIT"
] | 1 | 2021-11-10T12:25:16.000Z | 2021-11-10T12:25:16.000Z | import numpy as np
import torch
from value_iteration.pendulum import BaseSystem
from value_iteration.cost_functions import ArcTangent, SineQuadraticCost, BarrierCost
CUDA_AVAILABLE = torch.cuda.is_available()
class Cartpole(BaseSystem):
name = "Cartpole"
labels = ('x', 'theta', 'x_dot', 'theta_dot')
def __init__(self, cuda=CUDA_AVAILABLE, **kwargs):
super(Cartpole, self).__init__()
# Define Duration:
self.T = kwargs.get("T", 7.5)
self.dt = kwargs.get("dt", 1./500.)
# Define the System:
self.n_state = 4
self.n_dof = 2
self.n_act = 1
self.n_parameter = 5
# Continuous Joints:
# Right now only one continuous joint is supported
self.wrap, self.wrap_i = True, 1
# State Constraints:
# theta = 0, means the pendulum is pointing upward
self.x_target = torch.tensor([0.0, 0.0, 0.0, 0.0])
self.x_start = torch.tensor([0.0, np.pi, 0.0, 0.0])
self.x_start_var = torch.tensor([1.e-3, 5.e-2, 1.e-6, 1.e-6])
self.x_lim = torch.tensor([0.5, np.pi, 5.0, 20.0])
self.x_penalty = torch.tensor([0.4, 1.1 * np.pi, 1.1 * 5.0, 1.1 * 20.0])
self.x_init = torch.tensor([0.15, np.pi, 0.01, 0.01])
self.u_lim = torch.tensor([20., ])
# Define dynamics:
self.g = 9.81 # Gravitational acceleration [m/s^2]
mc = 0.57 # Mass of the cart [kg]
mp = 0.127 # Mass of the pole [kg]
pl = 0.3365 / 2. # Half of the pole length [m]
Beq = 0.1 # Equivalent Viscous damping Coefficient 5.4
Bp = 1.e-3 # Viscous coefficient at the pole 0.0024
# Dynamics parameter:
self.theta = torch.tensor([mc, mp, pl, Beq, Bp]).view(1, self.n_parameter, 1)
self.theta_min = 0.5 * torch.tensor([mc, mp, pl, Beq, Bp]).view(1, self.n_parameter, 1)
self.theta_max = 1.5 * torch.tensor([mc, mp, pl, Beq, Bp]).view(1, self.n_parameter, 1)
# Compute Linearized System:
out = self.dyn(self.x_target, gradient=True)
self.A = out[2].view(1, self.n_state, self.n_state).transpose(dim0=1, dim1=2).numpy()
self.B = out[1].view(1, self.n_state, self.n_act).numpy()
# Test Dynamics:
self.check_dynamics()
self.device = None
Cartpole.cuda(self) if cuda else Cartpole.cpu(self)
def dyn(self, x, dtheta=None, gradient=False):
cat = torch.cat
is_numpy = True if isinstance(x, np.ndarray) else False
x = torch.from_numpy(x) if isinstance(x, np.ndarray) else x
x = x.view(-1, self.n_state, 1)
n_samples = x.shape[0]
q, q_dot = x[:, :self.n_dof], x[:, self.n_dof:]
xc, th = x[:, 0].view(-1, 1, 1), x[:, 1].view(-1, 1, 1)
x_dot, th_dot = x[:, 2].view(-1, 1, 1), x[:, 3].view(-1, 1, 1)
sin_th, cos_th = torch.sin(th), torch.cos(th)
ones_1, zeros_1, zeros_n_dof = torch.ones_like(th), torch.zeros_like(th), torch.zeros((n_samples, 2, 1)).to(x.device)
# Update the dynamics parameters with disturbance:
if dtheta is not None:
dtheta = torch.from_numpy(dtheta).float() if isinstance(dtheta, np.ndarray) else dtheta
dtheta = dtheta.view(n_samples, self.n_parameter, 1)
theta = self.theta + dtheta
theta = torch.min(torch.max(theta, self.theta_min), self.theta_max)
else:
theta = self.theta
# Define mass matrix M = [[a, b], [b, c]]
H_00 = (theta[:, 1:2] + theta[:, 0:1]) * ones_1
H_01 = theta[:, 1:2] * theta[:, 2:3] * cos_th
H_11 = theta[:, 1:2] * theta[:, 2:3] ** 2 * ones_1
# H = cat([cat([H_00, H_01], dim=2), cat([H_01, H_11], dim=2)], dim=1)
invH = cat([cat([H_11, -H_01], dim=2), cat([-H_01, H_00], dim=2)], dim=1) / (H_00 * H_11 - H_01 * H_01)
# Calculate vector n = C(q, qd) + g(q):
n = cat([-theta[:, 1:2] * theta[:, 2:3] * sin_th * th_dot**2,
-theta[:, 1:2] * theta[:, 2:3] * self.g * sin_th], dim=1)
f = cat([-theta[:, 3:4] * x_dot, -theta[:, 4:5] * th_dot], dim=1)
# Construct Dynamics:
a = cat([q_dot, torch.matmul(invH, f - n)], dim=1)
B = cat([torch.zeros((n_samples, self.n_dof, 1)).to(x.device), invH[:, :, :1]], dim=1)
assert a.shape == (n_samples, self.n_state, 1)
assert B.shape == (n_samples, self.n_state, self.n_act)
out = (a, B)
if gradient:
zeros_nxn = torch.zeros((n_samples, self.n_dof, self.n_dof)).to(x.device)
ones_nxn = torch.ones((n_samples, self.n_dof, self.n_dof)).to(x.device)
dH_00_dq = zeros_n_dof.view(n_samples, self.n_dof, 1, 1)
dH_01_dq = cat([zeros_1.view((n_samples, 1, 1, 1)), (-theta[:, 1] * theta[:, 2] * sin_th).view((-1, 1, 1, 1))], dim=1)
dH_11_dq = zeros_n_dof.view(n_samples, self.n_dof, 1, 1)
dHdq = cat([cat([dH_00_dq, dH_01_dq], dim=3), cat([dH_01_dq, dH_11_dq], dim=3)], dim=2)
dinvH_dq = -torch.matmul(invH.view(-1, 1, self.n_dof, self.n_dof), torch.matmul(dHdq, invH.view(-1, 1, self.n_dof, self.n_dof)))
dn_dx = zeros_n_dof.view(n_samples, 2, 1)
dn_dth = cat([-theta[:, 1] * theta[:, 2] * cos_th * th_dot ** 2, -theta[:, 1] * theta[:, 2] * self.g * cos_th], dim=1)
dn_dxd = zeros_n_dof
dn_dthd = cat([-2. * theta[:, 1] * theta[:, 2] * sin_th * th_dot, zeros_1], dim=1)
dn_dq = cat([dn_dx, dn_dth], dim=2)
dn_dqd = cat([dn_dxd, dn_dthd], dim=2)
df_dqd = cat([cat([-theta[:, 3] * ones_1, zeros_1], dim=1), cat([zeros_1, -theta[:, 4] * ones_1], dim=1)], dim=2)
# Construct da/dx:
A_00 = zeros_nxn
A_01 = torch.eye(self.n_dof).view(1, self.n_dof, self.n_dof).to(x.device) * ones_nxn
A_10 = torch.matmul(dinvH_dq, (f - n).view(-1, 1, self.n_dof, 1)).squeeze(-1).transpose(dim0=1, dim1=2) - torch.matmul(invH, dn_dq)
A_11 = torch.matmul(invH, df_dqd - dn_dqd)
dadx = cat([cat([A_00, A_01], dim=2), cat([A_10, A_11], dim=2)], dim=1).transpose(dim0=1, dim1=2)
dBdx = cat([cat([zeros_nxn.view(n_samples, self.n_dof, self.n_dof, 1), dinvH_dq[:, :, :, :self.n_act]], dim=2),
torch.zeros(n_samples, self.n_dof, self.n_state, 1).to(x.device)], dim=1)
assert dadx.shape == (n_samples, self.n_state, self.n_state,)
assert dBdx.shape == (n_samples, self.n_state, self.n_state, self.n_act)
out = (a, B, dadx, dBdx)
if is_numpy:
out = [array.numpy() for array in out]
return out
def grad_dyn_theta(self, x):
cat = torch.cat
is_numpy = True if isinstance(x, np.ndarray) else False
x = torch.from_numpy(x) if isinstance(x, np.ndarray) else x
x = x.view(-1, self.n_state, 1)
n_samples = x.shape[0]
xc, th = x[:, 0].view(-1, 1, 1), x[:, 1].view(-1, 1, 1)
x_dot, th_dot = x[:, 2].view(-1, 1, 1), x[:, 3].view(-1, 1, 1)
sin_th, cos_th = torch.sin(th), torch.cos(th)
ones_1, zeros_1, zeros_n_dof = torch.ones_like(th), torch.zeros_like(th), torch.zeros((n_samples, 2, 1)).to(x.device)
# Define mass matrix M = [[a, b], [b, c]]
H_00 = (self.theta[:, 1] + self.theta[:, 0]) * ones_1
H_01 = self.theta[:, 1] * self.theta[:, 2] * cos_th
H_11 = self.theta[:, 1] * self.theta[:, 2] ** 2 * ones_1
# H = cat([cat([H_00, H_01], dim=2), cat([H_01, H_11], dim=2)], dim=1)
invH = cat([cat([H_11, -H_01], dim=2), cat([-H_01, H_00], dim=2)], dim=1) / (H_00 * H_11 - H_01 * H_01)
# Calculate vector n = C(q, qd) + g(q):
n = cat([-self.theta[:, 1] * self.theta[:, 2] * sin_th * th_dot**2,
-self.theta[:, 1] * self.theta[:, 2] * self.g * sin_th], dim=1).view(-1, 1, self.n_dof, 1)
f = cat([-self.theta[:, 3] * x_dot, -self.theta[:, 4] * th_dot], dim=1).view(-1, 1, self.n_dof, 1)
dHdp = torch.zeros(n_samples, self.n_parameter, self.n_dof, self.n_dof).to(x.device)
dndp = torch.zeros(n_samples, self.n_parameter, self.n_dof, 1).to(x.device)
dfdp = torch.zeros(n_samples, self.n_parameter, self.n_dof, 1).to(x.device)
# dM/dm_c
dHdp[:, 0, 0:1, 0:1] = ones_1
# dM/dm_p
dHdp[:, 1, 0:1, 0:1] = ones_1
dHdp[:, 1, 0:1, 1:2] = self.theta[:, 2] * cos_th
dHdp[:, 1, 1:2, 0:1] = self.theta[:, 2] * cos_th
dHdp[:, 1, 1:2, 1:2] = self.theta[:, 2]**2
# dM/dl_p
dHdp[:, 2, 0:1, 0:1] = zeros_1
dHdp[:, 2, 0:1, 1:2] = self.theta[:, 1] * cos_th
dHdp[:, 2, 1:2, 0:1] = self.theta[:, 1] * cos_th
dHdp[:, 2, 1:2, 1:2] = self.theta[:, 1] * self.theta[:, 2] * 2
# dn/dm_p
dndp[:, 1, 0:1] = -self.theta[:, 2] * sin_th * th_dot**2
dndp[:, 1, 1:2] = -self.theta[:, 2] * self.g * sin_th
# dn/dl_p
dndp[:, 2, 0:1] = -self.theta[:, 1] * sin_th * th_dot**2
dndp[:, 2, 1:2] = -self.theta[:, 1] * self.g * sin_th
# df/dB_c
dfdp[:, 3, 0:1] = -x_dot
dfdp[:, 4, 1:2] = -th_dot
invH_4d = invH.view(-1, 1, self.n_dof, self.n_dof)
dinvHdp = -torch.matmul(invH_4d, torch.matmul(dHdp, invH_4d))
dadp = torch.zeros(n_samples, self.n_parameter, self.n_state).to(x.device)
dadp[:, :, self.n_dof:, ] = (torch.matmul(dinvHdp, f - n) + torch.matmul(invH_4d, dfdp - dndp)).view(-1, self.n_parameter, self.n_dof)
dBdp = torch.zeros(n_samples, self.n_parameter, self.n_state, self.n_act).to(x.device)
dBdp[:, :, self.n_dof:, ] = dinvHdp[:, :, :, :self.n_act]
out = (dadp, dBdp)
if is_numpy:
out = [array.cpu().detach().numpy() for array in out]
return out
def cuda(self, device=None):
self.u_lim = self.u_lim.cuda(device=device)
self.theta_min = self.theta_min.cuda(device=device)
self.theta = self.theta.cuda(device=device)
self.theta_max = self.theta_max.cuda(device=device)
self.device = self.theta.device
return self
def cpu(self):
self.u_lim = self.u_lim.cpu()
self.theta_min = self.theta_min.cpu()
self.theta = self.theta.cpu()
self.theta_max = self.theta_max.cpu()
self.device = self.theta.device
return self
class CartpoleLogCos(Cartpole):
name = "Cartpole_LogCosCost"
def __init__(self, Q, R, cuda=False, **kwargs):
# Create the dynamics:
super(CartpoleLogCos, self).__init__(cuda=cuda, **kwargs)
self.u_lim = torch.tensor([12., ])
# Create the Reward Function:
assert Q.size == self.n_state and np.all(Q > 0.0)
self.Q = np.diag(Q).reshape((self.n_state, self.n_state))
assert R.size == self.n_act and np.all(R > 0.0)
self.R = np.diag(R).reshape((self.n_act, self.n_act))
self._q = SineQuadraticCost(self.Q, np.array([0.0, 1.0, 0.0, 0.0]), cuda=cuda)
self.q = BarrierCost(self._q, self.x_penalty, cuda)
# Determine beta s.t. the curvature at u = 0 is identical to 2R
beta = 4. * self.u_lim[0] ** 2 / np.pi * self.R
self.r = ArcTangent(alpha=self.u_lim.numpy()[0], beta=beta.numpy()[0, 0])
def rwd(self, x, u):
return self.q(x) + self.r(u)
def cuda(self, device=None):
super(CartpoleLogCos, self).cuda(device=device)
self.q.cuda(device=device)
return self
def cpu(self):
super(CartpoleLogCos, self).cpu()
self.q.cpu()
return self
if __name__ == "__main__":
from deep_differential_network.utils import jacobian
# GPU vs. CPU:
cuda = True
# Seed the test:
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Create system:
sys = Cartpole()
n_samples = 10000
x_lim = torch.from_numpy(sys.x_lim).float() if isinstance(sys.x_lim, np.ndarray) else sys.x_lim
x_test = torch.distributions.uniform.Uniform(-x_lim, x_lim).sample((n_samples,))
# x_test = torch.tensor([np.pi / 2., 0.5]).view(1, sys.n_state, 1)
dtheta = torch.zeros(1, sys.n_parameter, 1)
if cuda:
sys, x_test, dtheta = sys.cuda(), x_test.cuda(), dtheta.cuda()
###################################################################################################################
# Test dynamics gradient w.r.t. state:
dadx_shape = (n_samples, sys.n_state, sys.n_state)
dBdx_shape = (n_samples, sys.n_state, sys.n_state, sys.n_act)
a, B, dadx, dBdx = sys.dyn(x_test, gradient=True)
dadx_auto = torch.cat([jacobian(lambda x: sys.dyn(x)[0], x_test[i:i+1]) for i in range(n_samples)], dim=0)
dBdx_auto = torch.cat([jacobian(lambda x: sys.dyn(x)[1], x_test[i:i+1]) for i in range(n_samples)], dim=0)
err_a = (dadx_auto.view(dadx_shape) - dadx).abs().sum() / n_samples
err_B = (dBdx_auto.view(dBdx_shape) - dBdx).abs().sum() / n_samples
assert err_a <= 1.e-5 and err_B <= 1.e-6
###################################################################################################################
# Test dynamics gradient w.r.t. model parameter:
dadp_shape = (n_samples, sys.n_parameter, sys.n_state)
dBdp_shape = (n_samples, sys.n_parameter, sys.n_state, sys.n_act)
dadp, dBdp = sys.grad_dyn_theta(x_test)
dadp_auto = torch.cat([jacobian(lambda x: sys.dyn(x_test[i], dtheta=x)[0], dtheta) for i in range(n_samples)], dim=0)
dBdp_auto = torch.cat([jacobian(lambda x: sys.dyn(x_test[i], dtheta=x)[1], dtheta) for i in range(n_samples)], dim=0)
err_a = (dadp_auto.view(dadp_shape) - dadp).abs().sum() / n_samples
err_B = (dBdp_auto.view(dBdp_shape) - dBdp).abs().sum() / n_samples
assert err_a <= 2.e-4 and err_B <= 2.e-4 | 42.212766 | 143 | 0.558324 |
f236ef22492d0dc82e980fa6efe1d1f858814254 | 1,572 | py | Python | blender/blender_world_to_camera_coordinates_with_depth.py | quickgrid/code-lab | 634f97af389df7e1d120c4e0fcb5072a8e8f8d99 | [
"MIT"
] | null | null | null | blender/blender_world_to_camera_coordinates_with_depth.py | quickgrid/code-lab | 634f97af389df7e1d120c4e0fcb5072a8e8f8d99 | [
"MIT"
] | null | null | null | blender/blender_world_to_camera_coordinates_with_depth.py | quickgrid/code-lab | 634f97af389df7e1d120c4e0fcb5072a8e8f8d99 | [
"MIT"
] | null | null | null | """Get world coordinates to image coordinates with depth.
Warning: The image coordinates are flipped, so they or the image must be flipped horizontally to match.
References:
- https://blender.stackexchange.com/questions/882/how-to-find-image-coordinates-of-the-rendered-vertex
"""
import bpy
from bpy_extras.object_utils import world_to_camera_view
scene = bpy.context.scene
# needed to rescale 2d coordinates
render = scene.render
res_x = render.resolution_x
res_y = render.resolution_y
obj = bpy.data.objects['Cube']
cam = bpy.data.objects['Camera']
# Get 2d image point only.
# use generator expressions () or list comprehensions []
verts = (vert.co for vert in obj.data.vertices)
for coord in verts:
print(coord)
coords_2d = [world_to_camera_view(scene, cam, coord) for coord in verts]
print(coords_2d)
# 2d data printout:
rnd = lambda i: round(i)
print('x,y')
for x, y, distance_to_lens in coords_2d:
print("{},{}".format(rnd(res_x*x), rnd(res_y*y)))
# Get 2d image point with depth.
verts = (vert.co for vert in obj.data.vertices)
coords_2d = [world_to_camera_view(scene, cam, coord) for coord in verts]
# find min max distance, between eye and coordinate.
rnd = lambda i: round(i)
rnd3 = lambda i: round(i, 3)
limit_finder = lambda f: f(coords_2d, key=lambda i: i[2])[2]
limits = limit_finder(min), limit_finder(max)
limits = [rnd3(d) for d in limits]
print('min, max\n{},{}'.format(*limits))
# x, y, d=distance_to_lens
print('x,y,d')
for x, y, d in coords_2d:
print("{},{},{}".format(rnd(res_x*x), rnd(res_y*y), rnd3(d))) | 26.644068 | 106 | 0.715649 |
1fb036d01ead758e157ef52afb9904ccb1eba510 | 3,857 | py | Python | HandTrackingModule.py | dreamboysatvik/AIR-MOUSE | 4ccc8ced5b035f307e0b437b7166e619a1b5ae5d | [
"Apache-2.0"
] | null | null | null | HandTrackingModule.py | dreamboysatvik/AIR-MOUSE | 4ccc8ced5b035f307e0b437b7166e619a1b5ae5d | [
"Apache-2.0"
] | null | null | null | HandTrackingModule.py | dreamboysatvik/AIR-MOUSE | 4ccc8ced5b035f307e0b437b7166e619a1b5ae5d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 17:51:02 2021
@author: hp
"""
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
# totalFingers = fingers.count(1)
return fingers
def findDistance(self, p1, p2, img, draw=True,r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
if len(lmList) != 0:
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main_":
main()
| 31.357724 | 86 | 0.504278 |
b006c94ca8f0a16e4632d878a1b45d168a2550cb | 371 | py | Python | config.py | daixiangzi/Grad_Cam-pytorch-resnet50 | c9e9c3b2ddcd520d4893645fab36fdb236f88865 | [
"Apache-2.0"
] | 19 | 2020-03-05T12:01:01.000Z | 2021-11-25T07:18:06.000Z | config.py | daixiangzi/Grad_Cam-pytorch-resnet50 | c9e9c3b2ddcd520d4893645fab36fdb236f88865 | [
"Apache-2.0"
] | null | null | null | config.py | daixiangzi/Grad_Cam-pytorch-resnet50 | c9e9c3b2ddcd520d4893645fab36fdb236f88865 | [
"Apache-2.0"
] | 5 | 2020-10-03T07:43:28.000Z | 2022-02-01T12:30:08.000Z | class Config(object):
train_data='/home/daixiangzi/dataset/dog_cat/shell/train.txt'
test_data = '/home/daixiangzi/dataset/dog_cat/shell/val.txt'
save_dir = '/home/project/save_model/'
gpu_id = '0'
train_batch = 64
test_batch = 16
epochs = 100
seed = 666
workers=4
num_class=2
fre_print=2
weight_decay = 1e-4
lr = 0.003
optim = "Adam" #SGD,Adam
gamma = 0.1
| 21.823529 | 62 | 0.714286 |
43474dba7921279e9942b5fc76db4f2a859329cc | 3,044 | py | Python | commander/thirdparty/covertutils/datamanipulation/compressor.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | 1 | 2020-07-26T01:08:30.000Z | 2020-07-26T01:08:30.000Z | commander/thirdparty/covertutils/datamanipulation/compressor.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | null | null | null | commander/thirdparty/covertutils/datamanipulation/compressor.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | null | null | null | import bz2
import zlib
class Compressor :
"""
The Compressor class initializes the **bz2** and **zlib** compression routines.
It detects the used compression on a **trial and error** base, eliminating the need of flag bytes containing such information.
"""
def __init__( self ) :
self.comps = [bz2.compress, zlib.compress, self.__dummy_func]
self.decomps = [bz2.decompress, zlib.decompress, self.__dummy_func]
def __dummy_func( self, data ) : return data
def compress( self, message ) :
"""
This funtion performs all provided compression algorithm to the *message* parameter and decides which does the most efficient compression.
It does so by comparing the output lengths.
:param str message: The data to be compressed in raw bytes.
:rtype: str
:return: Data compressed by most efficient available algorithm.
"""
zips = []
for comp in self.comps :
zfile = comp( message )
zips.append( zfile )
sorted_zips = sorted( zips, key = lambda tup:len( tup ) )
winner = sorted_zips[0]
return winner
def decompress( self, zipped ) :
"""
This funtion performs all provided decompression algorithm to the provided data.
Based on the assumption that any decompression algorithm raises an Exception if the compressed data is not compatible, it finds the used compression algorithm and returns the decompressed data.
:param str message: The data to be compressed in raw bytes.
:rtype: str
:return: Data compressed by most efficient available algorithm.
"""
plain = zipped
for decomp in self.decomps :
try :
unzipped = decomp( zipped )
return unzipped
except :
pass
return plain
if __name__ == '__main__' :
import argparse, sys, base64, binascii
compressor = Compressor()
parser = argparse.ArgumentParser()
parser.add_argument("message", help = "The message to be compressed [use '-' for stdin]", type = str, default = '-' )
parser.add_argument('--input-type', '-i', help = 'Specify the form of the input', choices = ['b64', 'hex', 'plain'], default = 'plain')
parser.add_argument('--output-type', '-o', help = 'Specify the form of the ouput', choices = ['b64', 'hex', 'plain'], default = 'plain')
parser.add_argument('--decompress', '-d', help = 'Add if the message is in compressed form', action = 'store_true', default = False, )
parser.add_argument('-v', help = 'Display compression stats', action = 'store_true', default = False, )
args = parser.parse_args()
if args.message == '-' :
args.message = sys.stdin.read()
if args.input_type == 'hex' :
message = str(binascii.unhexlify(args.message))
elif args.input_type == 'b64' :
message = base64.b64decode(args.message)
else :
message = args.message
func = compressor.compress
if args.decompress :
func = compressor.decompress
raw_res = func(message)
res = raw_res
if args.output_type == 'hex' :
res = binascii.hexlify(raw_res)
if args.output_type == 'b64' :
res = base64.b64encode(raw_res)
print (res)
if args.v :
print( "Ratio %d %% " % ( len(raw_res) / float(len(message)) * 100 ) )
| 29.269231 | 193 | 0.703351 |
397c43649e0e4d6d680ed78b0257c3e4f48eeb76 | 570 | py | Python | apps/misc_tests/resources/lib/builder/none.py | PremierLangage/premierlangage | 7134a2aadffee2bf264abee6c4b23ea33f1b390b | [
"CECILL-B"
] | 8 | 2019-01-30T13:51:59.000Z | 2022-01-08T03:26:53.000Z | apps/misc_tests/resources/lib/builder/none.py | PremierLangage/premierlangage | 7134a2aadffee2bf264abee6c4b23ea33f1b390b | [
"CECILL-B"
] | 286 | 2019-01-18T21:35:51.000Z | 2022-03-24T18:53:59.000Z | home/lib/builder/none.py | PremierLangage/premierlangage | 7134a2aadffee2bf264abee6c4b23ea33f1b390b | [
"CECILL-B"
] | 4 | 2019-02-11T13:38:30.000Z | 2021-03-02T20:59:00.000Z | #!/usr/bin/env python3
# coding: utf-8
import sys, json, jsonpickle
if __name__ == "__main__":
if len(sys.argv) < 3:
msg = ("Sandbox did not call builder properly:\n"
+"Usage: python3 builder.py [input_json] [output_json]")
print(msg, file=sys.stderr)
sys.exit(1)
input_json = sys.argv[1]
output_json = sys.argv[2]
with open(input_json, "r") as f:
dic = json.load(f)
with open(output_json, "w+") as f:
f.write(jsonpickle.encode(dic, unpicklable=False))
sys.exit(0)
| 24.782609 | 71 | 0.578947 |
4941a81f08ca460855badcf767a89d0b7a6d0636 | 1,500 | py | Python | eden/integration/lib/util.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | eden/integration/lib/util.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | eden/integration/lib/util.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
from typing import Callable, List, Optional
def gen_tree(
path: str,
fanouts: List[int],
leaf_function: Callable[[str], None],
internal_function: Optional[Callable[[str], None]] = None,
) -> None:
"""
Helper function for recursively building a large branching directory
tree.
path is the leading path prefix to put before all directory names.
fanouts is an array of integers specifying the directory fan-out
dimensions. One layer of directories will be created for each element
in this array. e.g., [3, 4] would create 3 subdirectories inside the
top-level directory, and 4 subdirectories in each of those 3
directories.
Calls leaf_function on all leaf directories.
Calls internal_function on all internal (non-leaf) directories.
"""
for n in range(fanouts[0]):
subdir = os.path.join(path, "dir{:02}".format(n + 1))
sub_fanouts = fanouts[1:]
if sub_fanouts:
if internal_function is not None:
internal_function(subdir)
gen_tree(subdir, fanouts[1:], leaf_function, internal_function)
else:
leaf_function(subdir)
| 34.090909 | 77 | 0.69 |
1980e6d3014a413ffd6662caabfc00c90a42dc1e | 50,208 | py | Python | lib/python/pyflyby/_parse.py | rahasurana/pyflyby | 72fdd902972d61c8a283fee6072bd75695787f12 | [
"BSD-3-Clause"
] | 1 | 2019-09-06T07:10:08.000Z | 2019-09-06T07:10:08.000Z | lib/python/pyflyby/_parse.py | rahasurana/pyflyby | 72fdd902972d61c8a283fee6072bd75695787f12 | [
"BSD-3-Clause"
] | 1 | 2019-08-20T15:35:27.000Z | 2019-08-20T15:49:19.000Z | lib/python/pyflyby/_parse.py | rahasurana/pyflyby | 72fdd902972d61c8a283fee6072bd75695787f12 | [
"BSD-3-Clause"
] | 1 | 2021-04-17T15:31:30.000Z | 2021-04-17T15:31:30.000Z | # pyflyby/_parse.py.
# Copyright (C) 2011, 2012, 2013, 2014, 2015, 2018 Karl Chen.
# License: MIT http://opensource.org/licenses/MIT
from __future__ import (absolute_import, division, print_function,
with_statement)
import ast
from collections import namedtuple
from functools import total_ordering
from itertools import groupby
import re
import sys
from textwrap import dedent
import types
import six
from six import PY2, PY3, text_type as unicode
from six.moves import range
from pyflyby._file import FilePos, FileText, Filename
from pyflyby._flags import CompilerFlags
from pyflyby._log import logger
from pyflyby._util import cached_attribute, cmp
if PY3:
from ast import Bytes
else:
Bytes = ast.Str
def _is_comment_or_blank(line):
"""
Returns whether a line of python code contains only a comment is blank.
>>> _is_comment_or_blank("foo\\n")
False
>>> _is_comment_or_blank(" # blah\\n")
True
"""
return re.sub("#.*", "", line).rstrip() == ""
def _ast_str_literal_value(node):
if isinstance(node, (ast.Str, Bytes)):
return node.s
if isinstance(node, ast.Expr) and isinstance(node.value, (ast.Str, Bytes)):
return node.value.s
else:
return None
def _flatten_ast_nodes(arg):
if arg is None:
pass
elif isinstance(arg, ast.AST):
yield arg
elif isinstance(arg, (tuple, list, types.GeneratorType)):
for x in arg:
for y in _flatten_ast_nodes(x):
yield y
else:
raise TypeError(
"_flatten_ast_nodes: unexpected %s" % (type(arg).__name__,))
def _iter_child_nodes_in_order(node):
"""
Yield all direct child nodes of ``node``, that is, all fields that are nodes
and all items of fields that are lists of nodes.
``_iter_child_nodes_in_order`` yields nodes in the same order that they
appear in the source.
``ast.iter_child_nodes`` does the same thing, but not in source order.
e.g. for ``Dict`` s, it yields all key nodes before all value nodes.
"""
return _flatten_ast_nodes(_iter_child_nodes_in_order_internal_1(node))
def _iter_child_nodes_in_order_internal_1(node):
if not isinstance(node, ast.AST):
raise TypeError
if isinstance(node, ast.Dict):
assert node._fields == ("keys", "values")
yield list(zip(node.keys, node.values))
elif isinstance(node, ast.FunctionDef):
if six.PY2:
assert node._fields == ('name', 'args', 'body', 'decorator_list')
else:
assert node._fields == ('name', 'args', 'body', 'decorator_list', 'returns')
yield node.decorator_list, node.args, node.body
# node.name is a string, not an AST node
elif isinstance(node, ast.arguments):
if six.PY2:
assert node._fields == ('args', 'vararg', 'kwarg', 'defaults')
else:
assert node._fields == ('args', 'vararg', 'kwonlyargs', 'kw_defaults', 'kwarg', 'defaults')
defaults = node.defaults or ()
num_no_default = len(node.args)-len(defaults)
yield node.args[:num_no_default]
yield list(zip(node.args[num_no_default:], defaults))
# node.varags and node.kwarg are strings, not AST nodes.
elif isinstance(node, ast.IfExp):
assert node._fields == ('test', 'body', 'orelse')
yield node.body, node.test, node.orelse
elif isinstance(node, ast.ClassDef):
if six.PY2:
assert node._fields == ('name', 'bases', 'body', 'decorator_list')
else:
assert node._fields == ('name', 'bases', 'keywords', 'body', 'decorator_list')
yield node.decorator_list, node.bases, node.body
# node.name is a string, not an AST node
else:
# Default behavior.
yield ast.iter_child_nodes(node)
def _walk_ast_nodes_in_order(node):
"""
Recursively yield all child nodes of ``node``, in the same order that the
node appears in the source.
``ast.walk`` does the same thing, but yields nodes in an arbitrary order.
"""
# The implementation is basically the same as ``ast.walk``, but:
# 1. Use a stack instead of a deque. (I.e., depth-first search instead
# of breadth-first search.)
# 2. Use _iter_child_nodes_in_order instead of ``ast.iter_child_nodes``.
todo = [node]
while todo:
node = todo.pop()
yield node
todo.extend(reversed(list(_iter_child_nodes_in_order(node))))
def _flags_to_try(source, flags, auto_flags, mode):
"""
Flags to try for ``auto_flags``.
If ``auto_flags`` is False, then only yield ``flags``.
If ``auto_flags`` is True, then yield ``flags`` and ``flags ^ print_function``.
"""
flags = CompilerFlags(flags)
if not auto_flags:
yield flags
return
if PY3:
yield flags
return
if mode == "eval":
if re.search(r"\bprint\b", source):
flags = flags | CompilerFlags("print_function")
yield flags
return
yield flags
if re.search(r"\bprint\b", source):
yield flags ^ CompilerFlags("print_function")
def _parse_ast_nodes(text, flags, auto_flags, mode):
"""
Parse a block of lines into an AST.
Also annotate ``input_flags``, ``source_flags``, and ``flags`` on the
resulting ast node.
:type text:
``FileText``
:type flags:
``CompilerFlags``
:type auto_flags:
``bool``
:param auto_flags:
Whether to guess different flags if ``text`` can't be parsed with
``flags``.
:param mode:
Compilation mode: "exec", "single", or "eval".
:rtype:
``ast.Module``
"""
text = FileText(text)
flags = CompilerFlags(flags)
filename = str(text.filename) if text.filename else "<unknown>"
source = text.joined
source = dedent(source)
if PY2 and isinstance(source, unicode):
source = source.encode('utf-8')
if not source.endswith("\n"):
# Ensure that the last line ends with a newline (``ast`` barfs
# otherwise).
source += "\n"
exp = None
for flags in _flags_to_try(source, flags, auto_flags, mode):
cflags = ast.PyCF_ONLY_AST | int(flags)
try:
result = compile(
source, filename, mode, flags=cflags, dont_inherit=1)
except SyntaxError as e:
exp = e
pass
else:
# Attach flags to the result.
result.input_flags = flags
result.source_flags = CompilerFlags.from_ast(result)
result.flags = result.input_flags | result.source_flags
result.text = text
return result
raise exp # SyntaxError
def _test_parse_string_literal(text, flags):
r"""
Attempt to parse ``text``. If it parses cleanly to a single string
literal, return its value. Otherwise return ``None``.
>>> _test_parse_string_literal(r'"foo\n" r"\nbar"', 0)
'foo\n\\nbar'
"""
try:
module_node = _parse_ast_nodes(text, flags, False, "eval")
except SyntaxError:
return None
body = module_node.body
if not isinstance(body, (ast.Str, Bytes)):
return None
return body.s
AstNodeContext = namedtuple("AstNodeContext", "parent field index")
def _annotate_ast_nodes(ast_node):
"""
Annotate AST with:
- startpos and endpos
- [disabled for now: context as `AstNodeContext` ]
:type ast_node:
``ast.AST``
:param ast_node:
AST node returned by `_parse_ast_nodes`
:return:
``None``
"""
text = ast_node.text
flags = ast_node.flags
startpos = text.startpos
_annotate_ast_startpos(ast_node, None, startpos, text, flags)
# Not used for now:
# ast_node.context = AstNodeContext(None, None, None)
# _annotate_ast_context(ast_node)
def _annotate_ast_startpos(ast_node, parent_ast_node, minpos, text, flags):
"""
Annotate ``ast_node``. Set ``ast_node.startpos`` to the starting position
of the node within ``text``.
For "typical" nodes, i.e. those other than multiline strings, this is
simply FilePos(ast_node.lineno, ast_node.col_offset+1), but taking
``text.startpos`` into account.
For multiline string nodes, this function works by trying to parse all
possible subranges of lines until finding the range that is syntactically
valid and matches ``value``. The candidate range is
text[min_start_lineno:lineno+text.startpos.lineno+1].
This function is unfortunately necessary because of a flaw in the output
produced by the Python built-in parser. For some crazy reason, the
``ast_node.lineno`` attribute represents something different for multiline
string literals versus all other statements. For multiline string literal
nodes and statements that are just a string expression (or more generally,
nodes where the first descendant leaf node is a multiline string literal),
the compiler attaches the ending line number as the value of the ``lineno``
attribute. For all other than AST nodes, the compiler attaches the
starting line number as the value of the ``lineno`` attribute. This means
e.g. the statement "'''foo\nbar'''" has a lineno value of 2, but the
statement "x='''foo\nbar'''" has a lineno value of 1.
:type ast_node:
``ast.AST``
:type minpos:
`FilePos`
:param minpos:
Earliest position to check, in the number space of ``text``.
:type text:
`FileText`
:param text:
Source text that was used to parse the AST, whose ``startpos`` should be
used in interpreting ``ast_node.lineno`` (which always starts at 1 for
the subset that was parsed).
:type flags:
``CompilerFlags``
:param flags:
Compiler flags to use when re-compiling code.
:return:
``True`` if this node is a multiline string literal or the first child is
such a node (recursively); ``False`` otherwise.
:raise ValueError:
Could not find the starting line number.
"""
# First, traverse child nodes. If the first child node (recursively) is a
# multiline string, then we need to transfer its information to this node.
# Walk all nodes/fields of the AST. We implement this as a custom
# depth-first search instead of using ast.walk() or ast.NodeVisitor
# so that we can easily keep track of the preceding node's lineno.
child_minpos = minpos
is_first_child = True
leftstr_node = None
for child_node in _iter_child_nodes_in_order(ast_node):
leftstr = _annotate_ast_startpos(child_node, ast_node,
child_minpos, text, flags)
if is_first_child and leftstr:
leftstr_node = child_node
if hasattr(child_node, 'lineno'):
if child_node.startpos < child_minpos:
raise AssertionError(
"Got out-of-order AST node(s):\n"
" parent minpos=%s\n" % minpos +
" node: %s\n" % ast.dump(ast_node) +
" fields: %s\n" % (" ".join(ast_node._fields)) +
" children:\n" +
''.join(
" %s %9s: %s\n" % (
("==>" if cn is child_node else " "),
getattr(cn, 'startpos', ""),
ast.dump(cn))
for cn in _iter_child_nodes_in_order(ast_node)) +
"\n"
"This indicates a bug in pyflyby._\n"
"\n"
"pyflyby developer: Check if there's a bug or missing ast node handler in "
"pyflyby._parse._iter_child_nodes_in_order() - "
"probably the handler for ast.%s." % type(ast_node).__name__)
child_minpos = child_node.startpos
is_first_child = False
# If the node has no lineno at all, then skip it. This should only happen
# for nodes we don't care about, e.g. ``ast.Module`` or ``ast.alias``.
if not hasattr(ast_node, 'lineno'):
return False
# If col_offset is set then the lineno should be correct also.
if ast_node.col_offset >= 0:
# Not a multiline string literal. (I.e., it could be a non-string or
# a single-line string.)
# Easy.
delta = (ast_node.lineno-1, ast_node.col_offset)
startpos = text.startpos + delta
# Special case for 'with' statements. Consider the code:
# with X: pass
# ^0 ^5
# In python2.6, col_offset is 0.
# In python2.7, col_offset is 5.
# This is because python2.7 allows for multiple clauses:
# with X, Y: pass
# Since 'Y's col_offset isn't the beginning of the line, the authors
# of Python presumably changed 'X's col_offset to also not be the
# beginning of the line. If they had made the With ast node support
# multiple clauses, they wouldn't have needed to do that, but then
# that would introduce an API change in the AST. So it's
# understandable that they did that.
# Since we use startpos for breaking lines, we need to set startpos to
# the beginning of the line.
# In Python 3, the col_offset for the with is 0 again.
if (isinstance(ast_node, ast.With) and
not isinstance(parent_ast_node, ast.With) and
sys.version_info[:2] == (2,7)):
assert ast_node.col_offset >= 5
if startpos.lineno == text.startpos.lineno:
linestart = text.startpos.colno
else:
linestart = 1
line = text[(startpos.lineno,linestart):startpos]
m = re.search(r"\bwith\s+$", str(line))
assert m
lk = len(m.group()) # length of 'with ' including spaces
startpos = FilePos(startpos.lineno, startpos.colno - lk)
assert str(text[startpos:(startpos+(0,4))]) == "with"
ast_node.startpos = startpos
return False
assert ast_node.col_offset == -1
if leftstr_node:
# This is an ast node where the leftmost deepest leaf is a
# multiline string. The bug that multiline strings have broken
# lineno/col_offset infects ancestors up the tree.
#
# If the leftmost leaf is a multi-line string, then ``lineno``
# contains the ending line number, and col_offset is -1:
# >>> ast.parse("""'''foo\nbar'''+blah""").body[0].lineno
# 2
# But if the leftmost leaf is not a multi-line string, then
# ``lineno`` contains the starting line number:
# >>> ast.parse("""'''foobar'''+blah""").body[0].lineno
# 1
# >>> ast.parse("""blah+'''foo\nbar'''+blah""").body[0].lineno
# 1
#
# To fix that, we copy start_lineno and start_colno from the Str
# node once we've corrected the values.
assert not isinstance(ast_node, (ast.Str, Bytes))
assert leftstr_node.lineno == ast_node.lineno
assert leftstr_node.col_offset == -1
ast_node.startpos = leftstr_node.startpos
return True
# It should now be the case that we are looking at a multi-line string
# literal.
if not isinstance(ast_node, (ast.Str, Bytes)):
raise ValueError(
"got a non-string col_offset=-1: %s" % (ast.dump(ast_node)))
# The ``lineno`` attribute gives the ending line number of the multiline
# string ... unless it's multiple multiline strings that are concatenated
# by adjacency, in which case it's merely the end of the first one of
# them. At least we know that the start lineno is definitely not later
# than the ``lineno`` attribute.
first_end_lineno = text.startpos.lineno + ast_node.lineno - 1
# Compute possible start positions.
# The starting line number of this string could be anywhere between the
# end of the previous expression and ``first_end_lineno``.
startpos_candidates = []
assert minpos.lineno <= first_end_lineno
for start_lineno in range(minpos.lineno, first_end_lineno + 1):
start_line = text[start_lineno]
start_line_colno = (text.startpos.colno
if start_lineno==text.startpos.lineno else 1)
startpos_candidates.extend([
(_m.group()[-1], FilePos(start_lineno, _m.start()+start_line_colno))
for _m in re.finditer("[bBrRuU]*[\"\']", start_line)])
target_str = ast_node.s
# Loop over possible end_linenos. The first one we've identified is the
# by far most likely one, but in theory it could be anywhere later in the
# file. This could be because of a dastardly concatenated string like
# this:
# """ # L1
# two # L2
# """ """ # L3
# four # L4
# five # L5
# six # L6
# """ # L7
# There are two substrings on L1:L3 and L3:L7. The parser gives us a
# single concatenated string, but sets lineno to 3 instead of 7. We don't
# have much to go on to figure out that the real end_lineno is 7. If we
# don't find the string ending on L3, then search forward looking for the
# real end of the string. Yuck!
for end_lineno in range(first_end_lineno, text.endpos.lineno+1):
# Compute possible end positions. We're given the line we're ending
# on, but not the column position. Note that the ending line could
# contain more than just the string we're looking for -- including
# possibly other strings or comments.
end_line = text[end_lineno]
end_line_startcol = (
text.startpos.colno if end_lineno==text.startpos.lineno else 1)
endpos_candidates = [
(_m.group(), FilePos(end_lineno,_m.start()+end_line_startcol+1))
for _m in re.finditer("[\"\']", end_line)]
if not endpos_candidates:
# We found no endpos_candidates. This should not happen for
# first_end_lineno because there should be _some_ string that ends
# there.
if end_lineno == first_end_lineno:
raise AssertionError(
"No quote char found on line with supposed string")
continue
# Filter and sort the possible startpos candidates given this endpos
# candidate. It's possible for the starting quotechar and ending
# quotechar to be different in case of adjacent string concatenation,
# e.g. "foo"'''bar'''. That said, it's an unlikely case, so
# deprioritize checking them.
likely_candidates = []
unlikely_candidates = []
for end_quotechar, endpos in reversed(endpos_candidates):
for start_quotechar, startpos in startpos_candidates:
if not startpos < endpos:
continue
if start_quotechar == end_quotechar:
candidate_list = likely_candidates
else:
candidate_list = unlikely_candidates
candidate_list.append((startpos,endpos))
# Loop over sorted candidates.
matched_prefix = set()
for (startpos, endpos) in likely_candidates + unlikely_candidates:
# Try to parse the given range and see if it matches the target
# string literal.
subtext = text[startpos:endpos]
candidate_str = _test_parse_string_literal(subtext, flags)
if candidate_str is None:
continue
elif target_str == candidate_str:
# Success!
ast_node.startpos = startpos
ast_node.endpos = endpos
# This node is a multiline string; and, it's a leaf, so by
# definition it is the leftmost node.
return True # all done
elif target_str.startswith(candidate_str):
matched_prefix.add(startpos)
# We didn't find a string given the current end_lineno candidate.
# Only continue checking the startpos candidates that so far produced
# prefixes of the string we're looking for.
if not matched_prefix:
break
startpos_candidates = [
(sq, sp)
for (sq, sp) in startpos_candidates
if sp in matched_prefix
]
raise ValueError(
"Couldn't find exact position of %s"
% (ast.dump(ast_node)))
def _annotate_ast_context(ast_node):
"""
Recursively annotate ``context`` on ast nodes, setting ``context`` to
a `AstNodeContext` named tuple with values
``(parent, field, index)``.
Each ast_node satisfies ``parent.<field>[<index>] is ast_node``.
For non-list fields, the index part is ``None``.
"""
for field_name, field_value in ast.iter_fields(ast_node):
if isinstance(field_value, ast.AST):
child_node = field_value
child_node.context = AstNodeContext(ast_node, field_name, None)
_annotate_ast_context(child_node)
elif isinstance(field_value, list):
for i, item in enumerate(field_value):
if isinstance(item, ast.AST):
child_node = item
child_node.context = AstNodeContext(ast_node, field_name, i)
_annotate_ast_context(child_node)
def _split_code_lines(ast_nodes, text):
"""
Split the given ``ast_nodes`` and corresponding ``text`` by code/noncode
statement.
Yield tuples of (nodes, subtext). ``nodes`` is a list of ``ast.AST`` nodes,
length 0 or 1; ``subtext`` is a `FileText` sliced from ``text``.
FileText(...))} for code lines and ``(None, FileText(...))`` for non-code
lines (comments and blanks).
:type ast_nodes:
sequence of ``ast.AST`` nodes
:type text:
`FileText`
"""
if not ast_nodes:
yield ([], text)
return
assert text.startpos <= ast_nodes[0].startpos
assert ast_nodes[-1].startpos < text.endpos
if text.startpos != ast_nodes[0].startpos:
# Starting noncode lines.
yield ([], text[text.startpos:ast_nodes[0].startpos])
end_sentinel = _DummyAst_Node()
end_sentinel.startpos = text.endpos
for node, next_node in zip(ast_nodes, ast_nodes[1:] + [end_sentinel]):
startpos = node.startpos
next_startpos = next_node.startpos
assert startpos < next_startpos
# We have the start position of this node. Figure out the end
# position, excluding noncode lines (standalone comments and blank
# lines).
if hasattr(node, 'endpos'):
# We have an endpos for the node because this was a multi-line
# string. Start with the node endpos.
endpos = node.endpos
assert startpos < endpos <= next_startpos
# enpos points to the character *after* the ending quote, so we
# know that this is never at the beginning of the line.
assert endpos.colno != 1
# Advance past whitespace an inline comment, if any. Do NOT
# advance past other code that could be on the same line, nor past
# blank lines and comments on subsequent lines.
line = text[endpos : min(text.endpos, FilePos(endpos.lineno+1,1))]
if _is_comment_or_blank(line):
endpos = FilePos(endpos.lineno+1, 1)
else:
endpos = next_startpos
assert endpos <= text.endpos
# We don't have an endpos yet; what we do have is the next node's
# startpos (or the position at the end of the text). Start there
# and work backward.
if endpos.colno != 1:
if endpos == text.endpos:
# There could be a comment on the last line and no
# trailing newline.
# TODO: do this in a more principled way.
if _is_comment_or_blank(text[endpos.lineno]):
assert startpos.lineno < endpos.lineno
if not text[endpos.lineno-1].endswith("\\"):
endpos = FilePos(endpos.lineno,1)
else:
# We're not at end of file, yet the next node starts in
# the middle of the line. This should only happen with if
# we're not looking at a comment. [The first character in
# the line could still be "#" if we're inside a multiline
# string that's the last child of the parent node.
# Therefore we don't assert 'not
# _is_comment_or_blank(...)'.]
pass
if endpos.colno == 1:
while (endpos.lineno-1 > startpos.lineno and
_is_comment_or_blank(text[endpos.lineno-1]) and
(not text[endpos.lineno-2].endswith("\\") or
_is_comment_or_blank(text[endpos.lineno-2]))):
endpos = FilePos(endpos.lineno-1, 1)
assert startpos < endpos <= next_startpos
yield ([node], text[startpos:endpos])
if endpos != next_startpos:
yield ([], text[endpos:next_startpos])
def _ast_node_is_in_docstring_position(ast_node):
"""
Given a ``Str`` AST node, return whether its position within the AST makes
it eligible as a docstring.
The main way a ``Str`` can be a docstring is if it is a standalone string
at the beginning of a ``Module``, ``FunctionDef``, or ``ClassDef``.
We also support variable docstrings per Epydoc:
- If a variable assignment statement is immediately followed by a bare
string literal, then that assignment is treated as a docstring for
that variable.
:type ast_node:
``ast.Str``
:param ast_node:
AST node that has been annotated by ``_annotate_ast_nodes``.
:rtype:
``bool``
:return:
Whether this string ast node is in docstring position.
"""
if not isinstance(ast_node, (ast.Str, Bytes)):
raise TypeError
expr_node = ast_node.context.parent
if not isinstance(expr_node, ast.Expr):
return False
assert ast_node.context.field == 'value'
assert ast_node.context.index is None
expr_ctx = expr_node.context
if expr_ctx.field != 'body':
return False
parent_node = expr_ctx.parent
if not isinstance(parent_node, (ast.FunctionDef, ast.ClassDef, ast.Module)):
return False
if expr_ctx.index == 0:
return True
prev_sibling_node = parent_node.body[expr_ctx.index-1]
if isinstance(prev_sibling_node, ast.Assign):
return True
return False
def infer_compile_mode(arg):
"""
Infer the mode needed to compile ``arg``.
:type arg:
``ast.AST``
:rtype:
``str``
"""
# Infer mode from ast object.
if isinstance(arg, ast.Module):
mode = "exec"
elif isinstance(arg, ast.Expression):
mode = "eval"
elif isinstance(arg, ast.Interactive):
mode = "single"
else:
raise TypeError(
"Expected Module/Expression/Interactive ast node; got %s"
% (type(arg).__name__))
return mode
class _DummyAst_Node(object):
pass
class PythonStatement(object):
r"""
Representation of a top-level Python statement or consecutive
comments/blank lines.
>>> PythonStatement('print("x",\n file=None)\n', flags=0x10000)
PythonStatement('print("x",\n file=None)\n', flags=0x10000)
Implemented as a wrapper around a `PythonBlock` containing at most one
top-level AST node.
"""
def __new__(cls, arg, filename=None, startpos=None, flags=None):
if isinstance(arg, cls):
if filename is startpos is flags is None:
return arg
arg = arg.block
# Fall through
if isinstance(arg, (PythonBlock, FileText, str, six.text_type)):
block = PythonBlock(arg, filename=filename,
startpos=startpos, flags=flags)
statements = block.statements
if len(statements) != 1:
raise ValueError(
"Code contains %d statements instead of exactly 1: %r"
% (len(statements), block))
statement, = statements
assert isinstance(statement, cls)
return statement
raise TypeError("PythonStatement: unexpected %s" % (type(arg).__name__,))
@classmethod
def _construct_from_block(cls, block):
# Only to be used by PythonBlock.
assert isinstance(block, PythonBlock)
self = object.__new__(cls)
self.block = block
return self
@property
def text(self):
"""
:rtype:
`FileText`
"""
return self.block.text
@property
def filename(self):
"""
:rtype:
`Filename`
"""
return self.text.filename
@property
def startpos(self):
"""
:rtype:
`FilePos`
"""
return self.text.startpos
@property
def flags(self):
"""
:rtype:
`CompilerFlags`
"""
return self.block.flags
@property
def ast_node(self):
"""
A single AST node representing this statement, or ``None`` if this
object only represents comments/blanks.
:rtype:
``ast.AST`` or ``NoneType``
"""
ast_nodes = self.block.ast_node.body
if len(ast_nodes) == 0:
return None
if len(ast_nodes) == 1:
return ast_nodes[0]
raise AssertionError("More than one AST node in block")
@property
def is_comment_or_blank(self):
return self.ast_node is None
@property
def is_comment_or_blank_or_string_literal(self):
return (self.is_comment_or_blank
or _ast_str_literal_value(self.ast_node) is not None)
@property
def is_import(self):
return isinstance(self.ast_node, (ast.Import, ast.ImportFrom))
@property
def is_single_assign(self):
n = self.ast_node
return isinstance(n, ast.Assign) and len(n.targets) == 1
def get_assignment_literal_value(self):
"""
If the statement is an assignment, return the name and literal value.
>>> PythonStatement('foo = {1: {2: 3}}').get_assignment_literal_value()
('foo', {1: {2: 3}})
:return:
(target, literal_value)
"""
if not self.is_single_assign:
raise ValueError(
"Statement is not an assignment to a single name: %s" % self)
n = self.ast_node
target_name = n.targets[0].id
literal_value = ast.literal_eval(n.value)
return (target_name, literal_value)
def __repr__(self):
r = repr(self.block)
assert r.startswith("PythonBlock(")
r = "PythonStatement(" + r[12:]
return r
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, PythonStatement):
return NotImplemented
return self.block == other.block
def __ne__(self, other):
return not (self == other)
# The rest are defined by total_ordering
def __lt__(self, other):
if not isinstance(other, PythonStatement):
return NotImplemented
return self.block < other.block
def __cmp__(self, other):
if self is other:
return 0
if not isinstance(other, PythonStatement):
return NotImplemented
return cmp(self.block, other.block)
def __hash__(self):
return hash(self.block)
@total_ordering
class PythonBlock(object):
r"""
Representation of a sequence of consecutive top-level
`PythonStatement` (s).
>>> source_code = '# 1\nprint(2)\n# 3\n# 4\nprint(5)\nx=[6,\n 7]\n# 8\n'
>>> codeblock = PythonBlock(source_code)
>>> for stmt in PythonBlock(codeblock).statements:
... print(stmt)
PythonStatement('# 1\n')
PythonStatement('print(2)\n', startpos=(2,1))
PythonStatement('# 3\n# 4\n', startpos=(3,1))
PythonStatement('print(5)\n', startpos=(5,1))
PythonStatement('x=[6,\n 7]\n', startpos=(6,1))
PythonStatement('# 8\n', startpos=(8,1))
A ``PythonBlock`` has a ``flags`` attribute that gives the compiler_flags
associated with the __future__ features using which the code should be
parsed.
"""
def __new__(cls, arg, filename=None, startpos=None, flags=None,
auto_flags=None):
if isinstance(arg, PythonStatement):
arg = arg.block
# Fall through
if isinstance(arg, cls):
if filename is startpos is flags is None:
return arg
flags = CompilerFlags(flags, arg.flags)
arg = arg.text
# Fall through
if isinstance(arg, (FileText, Filename, str, six.text_type)):
return cls.from_text(
arg, filename=filename, startpos=startpos,
flags=flags, auto_flags=auto_flags)
raise TypeError("%s: unexpected %s"
% (cls.__name__, type(arg).__name__,))
@classmethod
def from_filename(cls, filename):
return cls.from_text(Filename(filename))
@classmethod
def from_text(cls, text, filename=None, startpos=None, flags=None,
auto_flags=False):
"""
:type text:
`FileText` or convertible
:type filename:
``Filename``
:param filename:
Filename, if not already given by ``text``.
:type startpos:
``FilePos``
:param startpos:
Starting position, if not already given by ``text``.
:type flags:
``CompilerFlags``
:param flags:
Input compiler flags.
:param auto_flags:
Whether to try other flags if ``flags`` fails.
:rtype:
`PythonBlock`
"""
text = FileText(text, filename=filename, startpos=startpos)
self = object.__new__(cls)
self.text = text
self._input_flags = CompilerFlags(flags)
self._auto_flags = auto_flags
return self
@classmethod
def __construct_from_annotated_ast(cls, annotated_ast_nodes, text, flags):
# Constructor for internal use by _split_by_statement() or
# concatenate().
ast_node = ast.Module(annotated_ast_nodes)
ast_node.text = text
ast_node.flags = flags
if not hasattr(ast_node, "source_flags"):
ast_node.source_flags = CompilerFlags.from_ast(annotated_ast_nodes)
self = object.__new__(cls)
self._ast_node_or_parse_exception = ast_node
self.ast_node = ast_node
self.annotated_ast_node = ast_node
self.text = text
self.flags = self._input_flags = flags
self._auto_flags = False
return self
@classmethod
def concatenate(cls, blocks, assume_contiguous=False):
"""
Concatenate a bunch of blocks into one block.
:type blocks:
sequence of `PythonBlock` s and/or `PythonStatement` s
:param assume_contiguous:
Whether to assume, without checking, that the input blocks were
originally all contiguous. This must be set to True to indicate the
caller understands the assumption; False is not implemented.
"""
if not assume_contiguous:
raise NotImplementedError
blocks = [PythonBlock(b) for b in blocks]
if len(blocks) == 1:
return blocks[0]
assert blocks
text = FileText.concatenate([b.text for b in blocks])
# The contiguous assumption is important here because ``ast_node``
# contains line information that would otherwise be wrong.
ast_nodes = [n for b in blocks for n in b.annotated_ast_node.body]
flags = blocks[0].flags
return cls.__construct_from_annotated_ast(ast_nodes, text, flags)
@property
def filename(self):
return self.text.filename
@property
def startpos(self):
return self.text.startpos
@property
def endpos(self):
return self.text.endpos
@cached_attribute
def _ast_node_or_parse_exception(self):
"""
Attempt to parse this block of code into an abstract syntax tree.
Cached (including exception case).
:return:
Either ast_node or exception.
"""
# This attribute may also be set by __construct_from_annotated_ast(),
# in which case this code does not run.
try:
return _parse_ast_nodes(
self.text, self._input_flags, self._auto_flags, "exec")
except Exception as e:
# Add the filename to the exception message to be nicer.
if self.text.filename:
try:
e = type(e)("While parsing %s: %s" % (self.text.filename, e))
except TypeError:
# Exception takes more than one argument
pass
# Cache the exception to avoid re-attempting while debugging.
return e
@cached_attribute
def parsable(self):
"""
Whether the contents of this ``PythonBlock`` are parsable as Python
code, using the given flags.
:rtype:
``bool``
"""
return isinstance(self._ast_node_or_parse_exception, ast.AST)
@cached_attribute
def parsable_as_expression(self):
"""
Whether the contents of this ``PythonBlock`` are parsable as a single
Python expression, using the given flags.
:rtype:
``bool``
"""
return self.parsable and self.expression_ast_node is not None
@cached_attribute
def ast_node(self):
"""
Parse this block of code into an abstract syntax tree.
The returned object type is the kind of AST as returned by the
``compile`` built-in (rather than as returned by the older, deprecated
``compiler`` module). The code is parsed using mode="exec".
The result is a ``ast.Module`` node, even if this block represents only
a subset of the entire file.
:rtype:
``ast.Module``
"""
r = self._ast_node_or_parse_exception
if isinstance(r, ast.AST):
return r
else:
raise r
@cached_attribute
def annotated_ast_node(self):
"""
Return ``self.ast_node``, annotated in place with positions.
All nodes are annotated with ``startpos``.
All top-level nodes are annotated with ``endpos``.
:rtype:
``ast.Module``
"""
result = self.ast_node
_annotate_ast_nodes(result)
return result
@cached_attribute
def expression_ast_node(self):
"""
Return an ``ast.Expression`` if ``self.ast_node`` can be converted into
one. I.e., return parse(self.text, mode="eval"), if possible.
Otherwise, return ``None``.
:rtype:
``ast.Expression``
"""
node = self.ast_node
if len(node.body) == 1 and isinstance(node.body[0], ast.Expr):
return ast.Expression(node.body[0].value)
else:
return None
def parse(self, mode=None):
"""
Parse the source text into an AST.
:param mode:
Compilation mode: "exec", "single", or "eval". "exec", "single",
and "eval" work as the built-in ``compile`` function do. If ``None``,
then default to "eval" if the input is a string with a single
expression, else "exec".
:rtype:
``ast.AST``
"""
if mode == "exec":
return self.ast_node
elif mode == "eval":
if self.expression_ast_node:
return self.expression_ast_node
else:
raise SyntaxError
elif mode == None:
if self.expression_ast_node:
return self.expression_ast_node
else:
return self.ast_node
elif mode == "exec":
raise NotImplementedError
else:
raise ValueError("parse(): invalid mode=%r" % (mode,))
def compile(self, mode=None):
"""
Parse into AST and compile AST into code.
:rtype:
``CodeType``
"""
ast_node = self.parse(mode=mode)
mode = infer_compile_mode(ast_node)
filename = str(self.filename or "<unknown>")
return compile(ast_node, filename, mode)
@cached_attribute
def statements(self):
r"""
Partition of this ``PythonBlock`` into individual ``PythonStatement`` s.
Each one contains at most 1 top-level ast node. A ``PythonStatement``
can contain no ast node to represent comments.
>>> code = "# multiline\n# comment\n'''multiline\nstring'''\nblah\n"
>>> print(PythonBlock(code).statements) # doctest:+NORMALIZE_WHITESPACE
(PythonStatement('# multiline\n# comment\n'),
PythonStatement("'''multiline\nstring'''\n", startpos=(3,1)),
PythonStatement('blah\n', startpos=(5,1)))
:rtype:
``tuple`` of `PythonStatement` s
"""
node = self.annotated_ast_node
nodes_subtexts = list(_split_code_lines(node.body, self.text))
if nodes_subtexts == [(self.ast_node.body, self.text)]:
# This block is either all comments/blanks or a single statement
# with no surrounding whitespace/comment lines. Return self.
return (PythonStatement._construct_from_block(self),)
cls = type(self)
statement_blocks = [
cls.__construct_from_annotated_ast(subnodes, subtext, self.flags)
for subnodes, subtext in nodes_subtexts]
# Convert to statements.
statements = []
for b in statement_blocks:
statement = PythonStatement._construct_from_block(b)
statements.append(statement)
# Optimization: set the new sub-block's ``statements`` attribute
# since we already know it contains exactly one statement, itself.
assert 'statements' not in b.__dict__
b.statements = (statement,)
return tuple(statements)
@cached_attribute
def source_flags(self):
"""
If the AST contains __future__ imports, then the compiler_flags
associated with them. Otherwise, 0.
The difference between ``source_flags`` and ``flags`` is that ``flags``
may be set by the caller (e.g. based on an earlier __future__ import)
and include automatically guessed flags, whereas ``source_flags`` is
only nonzero if this code itself contains __future__ imports.
:rtype:
`CompilerFlags`
"""
return self.ast_node.source_flags
@cached_attribute
def flags(self):
"""
The compiler flags for this code block, including both the input flags
(possibly automatically guessed), and the flags from "__future__"
imports in the source code text.
:rtype:
`CompilerFlags`
"""
return self.ast_node.flags
def groupby(self, predicate):
"""
Partition this block of code into smaller blocks of code which
consecutively have the same ``predicate``.
:param predicate:
Function that takes a `PythonStatement` and returns a value.
:return:
Generator that yields (group, `PythonBlock` s).
"""
cls = type(self)
for pred, stmts in groupby(self.statements, predicate):
blocks = [s.block for s in stmts]
yield pred, cls.concatenate(blocks, assume_contiguous=True)
def string_literals(self):
r"""
Yield all string literals anywhere in this block.
The string literals have ``startpos`` attributes attached.
>>> block = PythonBlock("'a' + ('b' + \n'c')")
>>> [(f.s, f.startpos) for f in block.string_literals()]
[('a', FilePos(1,1)), ('b', FilePos(1,8)), ('c', FilePos(2,1))]
:return:
Iterable of ``ast.Str`` or ``ast.Bytes`` nodes
"""
for node in _walk_ast_nodes_in_order(self.annotated_ast_node):
if isinstance(node, (ast.Str, Bytes)):
assert hasattr(node, 'startpos')
yield node
def _get_docstring_nodes(self):
"""
Yield docstring AST nodes.
We consider the following to be docstrings::
- First literal string of function definitions, class definitions,
and modules (the python standard)
- Literal strings after assignments, per Epydoc
:rtype:
Generator of ``ast.Str`` nodes
"""
# This is similar to ``ast.get_docstring``, but:
# - This function is recursive
# - This function yields the node object, rather than the string
# - This function yields multiple docstrings (even per ast node)
# - This function doesn't raise TypeError on other AST types
# - This function doesn't cleandoc
# A previous implementation did
# [n for n in self.string_literals()
# if _ast_node_is_in_docstring_position(n)]
# However, the method we now use is more straightforward, and doesn't
# require first annotating each node with context information.
docstring_containers = (ast.FunctionDef, ast.ClassDef, ast.Module)
for node in _walk_ast_nodes_in_order(self.annotated_ast_node):
if not isinstance(node, docstring_containers):
continue
if not node.body:
continue
# If the first body item is a literal string, then yield the node.
if (isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
yield node.body[0].value
for i in range(1, len(node.body)-1):
# If a body item is an assignment and the next one is a
# literal string, then yield the node for the literal string.
n1, n2 = node.body[i], node.body[i+1]
if (isinstance(n1, ast.Assign) and
isinstance(n2, ast.Expr) and
isinstance(n2.value, ast.Str)):
yield n2.value
def get_doctests(self):
r"""
Return doctests in this code.
>>> PythonBlock("x\n'''\n >>> foo(bar\n ... + baz)\n'''\n").get_doctests()
[PythonBlock('foo(bar\n + baz)\n', startpos=(3,2))]
:rtype:
``list`` of `PythonStatement` s
"""
import doctest
parser = doctest.DocTestParser()
doctest_blocks = []
filename = self.filename
flags = self.flags
for ast_node in self._get_docstring_nodes():
try:
examples = parser.get_examples(ast_node.s)
except Exception:
blob = ast_node.s
if len(blob) > 60:
blob = blob[:60] + '...'
# TODO: let caller decide how to handle
logger.warning("Can't parse docstring; ignoring: %r", blob)
continue
for example in examples:
lineno = ast_node.startpos.lineno + example.lineno
colno = ast_node.startpos.colno + example.indent # dubious
text = FileText(example.source, filename=filename,
startpos=(lineno,colno))
try:
block = PythonBlock(text, flags=flags)
block.ast_node # make sure we can parse
except Exception:
blob = text.joined
if len(blob) > 60:
blob = blob[:60] + '...'
logger.warning("Can't parse doctest; ignoring: %r", blob)
continue
doctest_blocks.append(block)
return doctest_blocks
def __repr__(self):
r = "%s(%r" % (type(self).__name__, self.text.joined)
if self.filename:
r += ", filename=%r" % (str(self.filename),)
if self.startpos != FilePos():
r += ", startpos=%s" % (self.startpos,)
if self.flags != self.source_flags:
r += ", flags=%s" % (self.flags,)
r += ")"
return r
def __str__(self):
return str(self.text)
def __text__(self):
return self.text
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, PythonBlock):
return NotImplemented
return self.text == other.text and self.flags == other.flags
def __ne__(self, other):
return not (self == other)
# The rest are defined by total_ordering
def __lt__(self, other):
if not isinstance(other, PythonBlock):
return NotImplemented
return (self.text, self.flags) < (other.text, other.flags)
def __cmp__(self, other):
if self is other:
return 0
if not isinstance(other, PythonBlock):
return NotImplemented
return cmp(self.text, other.text) or cmp(self.flags, other.flags)
def __hash__(self):
h = hash((self.text, self.flags))
self.__hash__ = lambda: h
return h
| 37.357143 | 103 | 0.594606 |
778c7c08982527db71d71a1375bdbb14ecd9ab77 | 1,700 | py | Python | openpecha/core/layer.py | ta4tsering/openpecha-toolkit | ff24b4813fb8146a4327e746e4024890b6807bea | [
"Apache-2.0"
] | 1 | 2020-01-28T05:10:14.000Z | 2020-01-28T05:10:14.000Z | openpecha/core/layer.py | ta4tsering/openpecha-toolkit | ff24b4813fb8146a4327e746e4024890b6807bea | [
"Apache-2.0"
] | 38 | 2019-11-12T10:49:25.000Z | 2021-04-07T12:10:24.000Z | openpecha/core/layer.py | ta4tsering/openpecha-toolkit | ff24b4813fb8146a4327e746e4024890b6807bea | [
"Apache-2.0"
] | 6 | 2019-11-14T12:30:35.000Z | 2020-05-12T01:50:13.000Z | from datetime import datetime
from enum import Enum
from typing import Dict, Optional
from pydantic import BaseModel, validator
from .ids import get_pecha_id, get_uuid
class LayerEnum(Enum):
index = "index"
book_title = "BookTitle"
sub_title = "SubTitle"
book_number = "BookNumber"
poti_title = "PotiTitle"
author = "Author"
chapter = "Chapter"
topic = "Text"
sub_topic = "SubText"
pagination = "Pagination"
citation = "Citation"
correction = "Correction"
error_candidate = "ErrorCandidate"
peydurma = "Peydurma"
sabche = "Sabche"
tsawa = "Tsawa"
yigchung = "Yigchung"
archaic = "Archaic"
durchen = "Durchen"
footnote = "Footnote"
segment = "Segment"
class InitialCreationEnum(Enum):
ocr = "ocr"
ebook = "ebook"
input = "input"
class Layer(BaseModel):
id: str = None
annotation_type: LayerEnum
revision: str = "00001"
annotations: Dict = {}
@validator("id", pre=True, always=True)
def set_id(cls, v):
return v or get_uuid()
@validator("revision")
def revision_must_int_parsible(cls, v):
assert v.isdigit(), "must integer parsible like `00002`"
return v
def bump_revision(self):
self.revision = f"{int(self.revision)+1:05}"
def reset(self):
self.revision = "00001"
self.annotations = {}
class PechaMetaData(BaseModel):
id: str = None
initial_creation_type: InitialCreationEnum
source_metadata: Optional[Dict] = {}
created_at: datetime = None
last_modified_at: datetime = None
@validator("id", pre=True, always=True)
def set_id(cls, v):
return v or get_pecha_id()
| 22.368421 | 64 | 0.645294 |
7100f1b6122d8c4203f3bedaddbd90e4ce2a966d | 6,081 | py | Python | tests/resource/generics/test_git.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | 1 | 2022-02-21T15:17:28.000Z | 2022-02-21T15:17:28.000Z | tests/resource/generics/test_git.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | 1 | 2022-03-12T16:06:23.000Z | 2022-03-12T16:06:23.000Z | tests/resource/generics/test_git.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | null | null | null | from unittest import TestCase
from pykotor.common.language import LocalizedString
from pykotor.common.misc import EquipmentSlot, Game, Color
from pykotor.resource.formats.gff import load_gff
from pykotor.resource.generics.git import construct_git, dismantle_git
TEST_FILE = "../../files/test.git"
class TestGIT(TestCase):
def test_io(self):
gff = load_gff(TEST_FILE)
git = construct_git(gff)
self.validate_io(git)
gff = dismantle_git(git)
git = construct_git(gff)
self.validate_io(git)
def validate_io(self, git):
self.assertEqual(127, git.ambient_volume)
self.assertEqual(17, git.ambient_sound_id)
self.assertEqual(1, git.env_audio)
self.assertEqual(41, git.music_battle_id)
self.assertEqual(15, git.music_standard_id)
self.assertEqual(20000, git.music_delay)
self.assertEqual(1, git.cameras[0].camera_id)
self.assertEqual(55, git.cameras[0].fov)
self.assertEqual(3.0, git.cameras[0].height)
self.assertEqual(0.0, git.cameras[0].mic_range)
self.assertAlmostEqual(69.699, git.cameras[0].pitch, 2)
self.assertAlmostEqual(0.971, git.cameras[0].orientation.x, 2)
self.assertAlmostEqual(0.000, git.cameras[0].orientation.y, 2)
self.assertAlmostEqual(0.000, git.cameras[0].orientation.z, 2)
self.assertAlmostEqual(0.235, git.cameras[0].orientation.w, 2)
self.assertAlmostEqual(-57.167, git.cameras[0].position.x, 2)
self.assertAlmostEqual(-28.255, git.cameras[0].position.y, 2)
self.assertAlmostEqual(0.000, git.cameras[0].position.z, 2)
self.assertEqual("c_ithorian001", git.creatures[0].resref)
self.assertAlmostEqual(-41.238, git.creatures[0].position.x, 2)
self.assertAlmostEqual(-53.214, git.creatures[0].position.y, 2)
self.assertAlmostEqual(0.000, git.creatures[0].position.z, 2)
self.assertAlmostEqual(146.249, git.creatures[0].bearing, 2)
self.assertAlmostEqual(1.0, git.doors[0].bearing, 2)
self.assertAlmostEqual(-43.763, git.doors[0].position.x, 2)
self.assertAlmostEqual(-20.143, git.doors[0].position.y, 2)
self.assertAlmostEqual(1.000, git.doors[0].position.z, 2)
self.assertEqual("linkedto", git.doors[0].linked_to)
self.assertEqual(1, git.doors[0].linked_to_flags.value)
self.assertEqual("resref", git.doors[0].linked_to_module)
self.assertEqual("Ithorian", git.doors[0].tag)
self.assertEqual("sw_door_taris007", git.doors[0].resref)
self.assertEqual(13, git.doors[0].transition_destination.stringref)
self.assertEqual(Color.from_bgr_integer(10197915), git.doors[0].tweak_color)
self.assertEqual("mercenariesentry", git.encounters[0].resref)
self.assertAlmostEqual(-41.319, git.encounters[0].position.x, 2)
self.assertAlmostEqual(-19.222, git.encounters[0].position.y, 2)
self.assertAlmostEqual(1.000, git.encounters[0].position.z, 2)
self.assertAlmostEqual(-5.890, git.encounters[0].geometry[0].x, 2)
self.assertAlmostEqual(3.072, git.encounters[0].geometry[0].y, 2)
self.assertAlmostEqual(0.025, git.encounters[0].geometry[0].z, 2)
self.assertAlmostEqual(-48.936, git.encounters[0].spawn_points[0].position.x, 2)
self.assertAlmostEqual(-29.831, git.encounters[0].spawn_points[0].position.y, 2)
self.assertAlmostEqual(1.000, git.encounters[0].spawn_points[0].position.z, 2)
self.assertAlmostEqual(0.196, git.encounters[0].spawn_points[0].orientation, 2)
self.assertEqual("k_trans_abort", git.placeables[0].resref)
self.assertAlmostEqual(1.0, git.placeables[0].bearing, 2)
self.assertAlmostEqual(-33.268, git.placeables[0].position.x, 2)
self.assertAlmostEqual(-15.299, git.placeables[0].position.y, 2)
self.assertAlmostEqual(9.536, git.placeables[0].position.z, 2)
self.assertEqual(Color.from_bgr_integer(10197915), git.placeables[0].tweak_color)
self.assertEqual("computerpanne001", git.sounds[0].resref)
self.assertAlmostEqual(-78.538, git.sounds[0].position.x, 2)
self.assertAlmostEqual(13.498, git.sounds[0].position.y, 2)
self.assertAlmostEqual(2.000, git.sounds[0].position.z, 2)
self.assertEqual("m_chano", git.stores[0].resref)
self.assertAlmostEqual(106.230, git.stores[0].position.x, 2)
self.assertAlmostEqual(-16.590, git.stores[0].position.y, 2)
self.assertAlmostEqual(0.063, git.stores[0].position.z, 2)
self.assertAlmostEqual(90.000, git.stores[0].bearing, 2)
self.assertEqual("newgeneric001", git.triggers[0].resref)
self.assertAlmostEqual(-29.903, git.triggers[0].position.x, 2)
self.assertAlmostEqual(-11.463, git.triggers[0].position.y, 2)
self.assertAlmostEqual(-2.384, git.triggers[0].position.z, 2)
self.assertEqual("from_204TEL", git.triggers[0].linked_to)
self.assertEqual(2, git.triggers[0].linked_to_flags.value)
self.assertEqual("203tel", git.triggers[0].linked_to_module)
self.assertEqual("to_203TEL", git.triggers[0].tag)
self.assertEqual(104245, git.triggers[0].transition_destination.stringref)
self.assertAlmostEqual(-7.433, git.triggers[0].geometry[0].x, 2)
self.assertAlmostEqual(1.283, git.triggers[0].geometry[0].y, 2)
self.assertAlmostEqual(0.025, git.triggers[0].geometry[0].z, 2)
self.assertEqual("wp_transabort", git.waypoints[0].resref)
self.assertEqual("wp_transabort", git.waypoints[0].tag)
self.assertEqual(135283, git.waypoints[0].name.stringref)
self.assertTrue(git.waypoints[0].map_note_enabled)
self.assertEqual(123, git.waypoints[0].map_note.stringref)
self.assertAlmostEqual(-33.620, git.waypoints[0].position.x, 2)
self.assertAlmostEqual(-16.065, git.waypoints[0].position.y, 2)
self.assertAlmostEqual(1.0, git.waypoints[0].position.z, 2)
self.assertAlmostEqual(90.000, git.waypoints[0].bearing, 2)
| 54.294643 | 89 | 0.691169 |
cd29365ca09399b754039b92cea3ec91c3ecd346 | 148 | py | Python | icevision/models/mmdet/models/faster_rcnn/backbones/__init__.py | matt-deboer/icevision | bf886e558cc0c5ba5c84559514b8330c64d72f8d | [
"Apache-2.0"
] | null | null | null | icevision/models/mmdet/models/faster_rcnn/backbones/__init__.py | matt-deboer/icevision | bf886e558cc0c5ba5c84559514b8330c64d72f8d | [
"Apache-2.0"
] | null | null | null | icevision/models/mmdet/models/faster_rcnn/backbones/__init__.py | matt-deboer/icevision | bf886e558cc0c5ba5c84559514b8330c64d72f8d | [
"Apache-2.0"
] | null | null | null | from icevision.models.mmdet.models.faster_rcnn.backbones.resnet_fpn import *
from icevision.models.mmdet.models.faster_rcnn.backbones.swin import *
| 49.333333 | 76 | 0.851351 |
5f1e62cc47587406b4741d3d7cd5a2aad0b6e1ab | 9,546 | py | Python | src/neuro_comma/dataset.py | marlon-br/neuro-comma | 575883efa30387dae3366034147e915ec2b19866 | [
"MIT"
] | 5 | 2021-09-09T11:30:34.000Z | 2021-12-13T19:37:06.000Z | src/neuro_comma/dataset.py | marlon-br/neuro-comma | 575883efa30387dae3366034147e915ec2b19866 | [
"MIT"
] | null | null | null | src/neuro_comma/dataset.py | marlon-br/neuro-comma | 575883efa30387dae3366034147e915ec2b19866 | [
"MIT"
] | null | null | null | from typing import Dict, List, Optional, Tuple, Union
from typing_extensions import TypedDict
import numpy as np
import torch
from torch.tensor import Tensor
from tqdm import tqdm
from transformers import PreTrainedTokenizer
from neuro_comma.augmentation import AUGMENTATIONS
from neuro_comma.pretrained import TOKEN_IDX
class BaseDataset(torch.utils.data.Dataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
*args,
**kwargs) -> None:
self.tokenizer = tokenizer
self.targets = targets
self.seq_len = sequence_len
self.token_style = token_style
if isinstance(files, list):
self.data = []
for file in files:
self.data += self._parse_data(file, *args, **kwargs)
else:
self.data = self._parse_data(files, *args, **kwargs)
def _parse_data(self, file_path: str, *args, **kwargs) -> List[List[List[int]]]:
"""Parse file to train data
Args:
file_path (`str`): text file path that contains tokens and punctuations separated by tab in lines
Returns:
list[Batch]: each having sequence_len punctuation_mask is used to ignore special indices like padding and intermediate sub-word token during evaluation
"""
with open(file_path, 'r', encoding='utf-8') as file:
x, y = [], []
for i, line in enumerate(file):
if (line.strip()):
line = line.strip()
token = line.rsplit('\t', 1)
if len(token) == 2:
x.append(token[0])
target = self.targets[token[1]]
y.append(target)
else:
continue
data = self.parse_tokens(x, self.tokenizer, self.seq_len, self.token_style, y, *args, **kwargs)
return data
@classmethod
def parse_tokens(cls,
tokens: Union[List[str], Tuple[str]],
tokenizer: PreTrainedTokenizer,
seq_len: int,
token_style: str,
targets: Optional[List[int]] = None,
*args,
**kwargs) -> List[List[List[int]]]:
"""
Convert tokenized data for model prediction
Args:
tokens (`Union[list[str], tuple[str]]`): splited tokens
tokenizer (`PreTrainedTokenizer`): tokenizer which split tokens to subtokens
seq_len (`int`): sequence length
token_style (`str`): token_style from pretrained.TOKEN_IDX
Returns:
(`list[BatchWithoutTarget]`): list of bathces
```txt
tokens : [token token ##token PAD ]
x : [321 1233 23121 101 ]
y : [tar 0 tar 0 ]
y_mask : [1 0 1 0 ]
attn_mask : [1 1 1 0 ]
```
"""
data_items = []
# loop until end of the entire text
idx = 0
debug = kwargs.get('debug')
if debug:
pbar = tqdm(total=len(tokens))
while idx < len(tokens):
x = [TOKEN_IDX[token_style]['START_SEQ']]
w_id = [-1] # word indexes
y = [0]
y_mask = [1] if targets else [0]
# loop until we have required sequence length
# -1 because we will have a special end of sequence token at the end
while len(x) < seq_len - 1 and idx < len(tokens):
word_pieces = tokenizer.tokenize(tokens[idx])
# if taking these tokens exceeds sequence length we finish
# current sequence with padding
# then start next sequence from this token
if len(word_pieces) + len(x) >= seq_len:
break
for i in range(len(word_pieces) - 1):
x.append(tokenizer.convert_tokens_to_ids(word_pieces[i]))
w_id.append(idx)
y.append(0)
y_mask.append(0)
if len(word_pieces) > 0:
x.append(tokenizer.convert_tokens_to_ids(word_pieces[-1]))
else:
x.append(TOKEN_IDX[token_style]['UNK'])
w_id.append(idx)
if targets:
y.append(targets[idx])
else:
y.append(0)
y_mask.append(1)
idx += 1
if debug:
pbar.update(1)
x.append(TOKEN_IDX[token_style]['END_SEQ'])
w_id.append(-1)
y.append(0)
if targets:
y_mask.append(1)
else:
y_mask.append(0)
# Fill with pad tokens
if len(x) < seq_len:
x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(seq_len - len(x))]
w_id = w_id + [-100 for _ in range(seq_len - len(w_id))]
y = y + [0 for _ in range(seq_len - len(y))]
y_mask = y_mask + [0 for _ in range(seq_len - len(y_mask))]
attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]
data_items.append([x, w_id, attn_mask, y, y_mask])
if debug:
pbar.close()
return data_items
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
x = torch.tensor(x) # type: ignore
attn_mask = torch.tensor(attn_mask) # type: ignore
y = torch.tensor(y) # type: ignore
y_mask = torch.tensor(y_mask) # type: ignore
return x, y, attn_mask, y_mask # type: ignore
class RepunctDataset(BaseDataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
is_train=False,
augment_rate=0.,
augment_type='substitute',
*args,
**kwargs) -> None:
"""Preprocess data for restore punctuation
Args:
files (`Union[str, list[str]]`): single file or list of text files containing tokens and punctuations separated by tab in lines
tokenizer (`PreTrainedTokenizer`): tokenizer that will be used to further tokenize word for BERT like models
targets (`dict[str, int]`): dict with targets
sequence_len (`int`): length of each sequence
token_style (`str`): For getting index of special tokens in pretrained.TOKEN_IDX
is_train (`bool, optional`): if false do not apply augmentation. Defaults to False.
augment_rate (`float, optional`): percent of data which should be augmented. Defaults to 0.0.
augment_type (`str, optional`): augmentation type. Defaults to 'substitute'.
"""
super().__init__(files, tokenizer, targets, sequence_len, token_style, *args, **kwargs)
self.is_train = is_train
self.augment_type = augment_type
self.augment_rate = augment_rate
def _augment(self, x, y, y_mask):
x_aug = []
y_aug = []
y_mask_aug = []
for i in range(len(x)):
r = np.random.rand()
if r < self.augment_rate:
AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)
else:
x_aug.append(x[i])
y_aug.append(y[i])
y_mask_aug.append(y_mask[i])
if len(x_aug) > self.seq_len:
# len increased due to insert
x_aug = x_aug[:self.seq_len]
y_aug = y_aug[:self.seq_len]
y_mask_aug = y_mask_aug[:self.seq_len]
elif len(x_aug) < self.seq_len:
# len decreased due to delete
x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.seq_len - len(x_aug))]
y_aug = y_aug + [0 for _ in range(self.seq_len - len(y_aug))]
y_mask_aug = y_mask_aug + [0 for _ in range(self.seq_len - len(y_mask_aug))]
attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]
return x_aug, y_aug, attn_mask, y_mask_aug
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
if self.is_train and self.augment_rate > 0:
x, y, attn_mask, y_mask = self._augment(x, y, y_mask)
x = torch.tensor(x) # type: ignore
attn_mask = torch.tensor(attn_mask) # type: ignore
y = torch.tensor(y) # type: ignore
y_mask = torch.tensor(y_mask) # type: ignore
return x, y, attn_mask, y_mask # type: ignore
| 38.184 | 163 | 0.528913 |
f23dbfa14bda6ff2db7419059e94da495c662ee5 | 1,893 | py | Python | tests/builtins/test_enumerate.py | jacebrowning/voc | 7bc84e8a870674d300ad5083748cf6b826e7fb68 | [
"BSD-3-Clause"
] | 850 | 2015-08-17T16:45:22.000Z | 2019-03-24T07:50:15.000Z | tests/builtins/test_enumerate.py | jacebrowning/voc | 7bc84e8a870674d300ad5083748cf6b826e7fb68 | [
"BSD-3-Clause"
] | 506 | 2015-09-26T18:20:00.000Z | 2019-03-19T18:16:18.000Z | tests/builtins/test_enumerate.py | jacebrowning/voc | 7bc84e8a870674d300ad5083748cf6b826e7fb68 | [
"BSD-3-Clause"
] | 670 | 2015-09-12T21:57:44.000Z | 2019-03-19T13:15:33.000Z | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class EnumerateTests(TranspileTestCase):
def test_enumerate(self):
self.assertCodeExecution("""
lst=['a','b','c','d','e']
print(list(enumerate(lst)))
lst=['a','b','c','d','e']
print(list(enumerate(lst,start=-40)))
lst=['a','b','c','d','e']
print(list(enumerate(lst,start=46)))
lst=[('a',4),'b','c',10,'e']
print(list(enumerate(lst)))
print(list(enumerate([])))
print(list(enumerate([], start=10)))
""")
def test_enumerate_invalid_start_args(self):
self.assertCodeExecution("""
try:
print(list(enumerate(['a','b','c'], start=None)))
except TypeError as err:
print(err)
try:
print(list(enumerate(['a','b','c'], start=1.5)))
except TypeError as err:
print(err)
try:
print(list(enumerate(['a','b','c'], start="start_string")))
except TypeError as err:
print(err)
""")
def test_enumerate_invalid_iterable(self):
self.assertCodeExecution("""
try:
num=10
print(list(enumerate(num, start=10)))
except TypeError as err:
print(err)
try:
print(list(enumerate()))
except TypeError as err:
print(err)
""")
class BuiltinEnumerateFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["enumerate"]
not_implemented = [
'test_bytearray',
'test_bytes',
'test_dict',
'test_frozenset',
'test_list',
'test_range',
'test_set',
'test_str',
'test_tuple',
]
| 30.047619 | 80 | 0.493925 |
694e6e8f59d1dd9bf02240fa7ff9fe3f833b6274 | 2,177 | py | Python | examples/advanced/exogenous/run.py | suddhu/pddlstream | 0a027154a24ce06a1ca074f6f2053fde7fb0a190 | [
"MIT"
] | 1 | 2019-02-08T17:06:41.000Z | 2019-02-08T17:06:41.000Z | examples/advanced/exogenous/run.py | suddhu/pddlstream | 0a027154a24ce06a1ca074f6f2053fde7fb0a190 | [
"MIT"
] | 1 | 2020-03-21T16:53:53.000Z | 2020-03-21T16:53:53.000Z | examples/advanced/exogenous/run.py | yijiangh/pddlstream | d0eb256e88b8b5174fbd136a82867fd9e9cebc67 | [
"MIT"
] | 1 | 2018-12-14T09:12:03.000Z | 2018-12-14T09:12:03.000Z | #!/usr/bin/env python
from __future__ import print_function
import cProfile
import pstats
import numpy as np
from pddlstream.algorithms.focused import solve_focused
from pddlstream.algorithms.incremental import solve_incremental
from pddlstream.language.generator import from_fn
from pddlstream.utils import print_solution, read, get_file_path
def pddlstream_from_belief():
domain_pddl = read(get_file_path(__file__, 'domain.pddl'))
constant_map = {}
stream_pddl = read(get_file_path(__file__, 'stream.pddl'))
stream_map = {
'inv-kin': from_fn(lambda p: (p + np.array([0, 1]),)),
'motion': from_fn(lambda q1, q2: ([t*(q2-q1) + q1 for t in [0, 1]],)), # linspace, arange
}
# Options
# - observation produces one of several poses
# - always at the pose, observation just makes it observable
# - object has a unobserved fluent
block = 'block1'
pose = None # Unknown
#pose = np.array([1, 0])
conf = np.array([0, 1])
booth = np.array([0, 2])
init = [
('Initial',), # Forces move first
('Conf', conf),
('Conf', booth),
('Booth', booth),
('AtConf', conf),
('HandEmpty',),
('Block', block),
('Pose', pose),
('AtPose', block, pose),
#('Observable', pose),
#('Latent', pose),
]
goal = ('Holding', block)
return domain_pddl, constant_map, stream_pddl, stream_map, init, goal
##################################################
def main(focused=True):
# TODO: maybe load problems as a domain explicitly
pddlstream_problem = pddlstream_from_belief()
_, _, _, _, init, goal = pddlstream_problem
print(sorted(init, key=lambda f: f[0]))
print(goal)
pr = cProfile.Profile()
pr.enable()
if focused:
solution = solve_focused(pddlstream_problem, unit_costs=False)
else:
#solution = solve_exhaustive(pddlstream_problem, unit_costs=False)
solution = solve_incremental(pddlstream_problem, unit_costs=False)
print_solution(solution)
pr.disable()
pstats.Stats(pr).sort_stats('tottime').print_stats(10)
if __name__ == '__main__':
main()
| 28.644737 | 97 | 0.629766 |
9d7ab812e943f6ad8e465c956a36af1cf93a2eb9 | 15,721 | py | Python | afqinsight/tests/test_datasets.py | richford/AFQ-Insight | 3bf80e389f423361b7e942815bd06c77f3ef4785 | [
"BSD-3-Clause"
] | null | null | null | afqinsight/tests/test_datasets.py | richford/AFQ-Insight | 3bf80e389f423361b7e942815bd06c77f3ef4785 | [
"BSD-3-Clause"
] | 66 | 2019-05-03T22:10:11.000Z | 2022-02-20T18:33:38.000Z | afqinsight/tests/test_datasets.py | richford/AFQ-Insight | 3bf80e389f423361b7e942815bd06c77f3ef4785 | [
"BSD-3-Clause"
] | 8 | 2018-05-17T04:16:30.000Z | 2022-02-07T18:00:57.000Z | import numpy as np
import os.path as op
import pandas as pd
import pytest
import tempfile
import torch
import afqinsight as afqi
from afqinsight.datasets import (
bundles2channels,
load_afq_data,
download_sarica,
download_weston_havens,
AFQDataset,
standardize_subject_id,
)
data_path = op.join(afqi.__path__[0], "data")
test_data_path = op.join(data_path, "test_data")
def test_bundles2channels():
X0 = np.random.rand(50, 4000)
X1 = bundles2channels(X0, n_nodes=100, n_channels=40, channels_last=True)
assert X1.shape == (50, 100, 40)
assert np.allclose(X0[:, :100], X1[:, :, 0])
X1 = bundles2channels(X0, n_nodes=100, n_channels=40, channels_last=False)
assert X1.shape == (50, 40, 100)
assert np.allclose(X0[:, :100], X1[:, 0, :])
with pytest.raises(ValueError):
bundles2channels(X0, n_nodes=1000, n_channels=7)
def test_standardize_subject_id():
assert standardize_subject_id("sub-01") == "sub-01"
assert standardize_subject_id("01") == "sub-01"
def test_afqdataset_label_encode():
sub_dicts = [
{"subject_id": "1", "age": 0, "site": "A"},
{"subject_id": "2", "age": 1, "site": "B"},
{"subject_id": "3", "age": 2},
]
node_dicts = [
{"subjectID": "sub-1", "tractID": "A", "nodeID": 0, "fa": 0.1},
{"subjectID": "sub-1", "tractID": "A", "nodeID": 1, "fa": 0.2},
{"subjectID": "sub-1", "tractID": "B", "nodeID": 0, "fa": 0.3},
{"subjectID": "sub-1", "tractID": "B", "nodeID": 1, "fa": 0.3},
{"subjectID": "sub-2", "tractID": "A", "nodeID": 0, "fa": 0.4},
{"subjectID": "sub-2", "tractID": "A", "nodeID": 1, "fa": 0.5},
{"subjectID": "sub-2", "tractID": "B", "nodeID": 0, "fa": 0.6},
{"subjectID": "sub-2", "tractID": "B", "nodeID": 1, "fa": 0.6},
{"subjectID": "3", "tractID": "A", "nodeID": 0, "fa": 0.7},
{"subjectID": "3", "tractID": "A", "nodeID": 1, "fa": 0.8},
{"subjectID": "3", "tractID": "B", "nodeID": 0, "fa": 0.9},
{"subjectID": "3", "tractID": "B", "nodeID": 1, "fa": 0.9},
]
subs = pd.DataFrame(sub_dicts)
nodes = pd.DataFrame(node_dicts)
with tempfile.TemporaryDirectory() as temp_dir:
subs.to_csv(op.join(temp_dir, "subjects.csv"), index=False)
nodes.to_csv(op.join(temp_dir, "nodes.csv"), index=False)
tmp_dataset = afqi.AFQDataset(
fn_nodes=op.join(temp_dir, "nodes.csv"),
fn_subjects=op.join(temp_dir, "subjects.csv"),
target_cols=["site"],
dwi_metrics=["fa"],
index_col="subject_id",
label_encode_cols=["site"],
)
assert tmp_dataset.y.shape == (3,)
tmp_dataset.drop_target_na()
assert tmp_dataset.y.shape == (2,)
tmp_dataset = afqi.AFQDataset(
fn_nodes=op.join(temp_dir, "nodes.csv"),
fn_subjects=op.join(temp_dir, "subjects.csv"),
target_cols=["age", "site"],
dwi_metrics=["fa"],
index_col="subject_id",
label_encode_cols=["site"],
)
assert tmp_dataset.y.shape == (3, 2)
tmp_dataset.drop_target_na()
assert tmp_dataset.y.shape == (2, 2)
def test_afqdataset_sub_prefix():
sub_dicts = [
{"subject_id": "1", "age": 0},
{"subject_id": "2", "age": 1},
{"subject_id": "3", "age": 2},
]
node_dicts = [
{"subjectID": "sub-1", "tractID": "A", "nodeID": 0, "fa": 0.1},
{"subjectID": "sub-1", "tractID": "A", "nodeID": 1, "fa": 0.2},
{"subjectID": "sub-1", "tractID": "B", "nodeID": 0, "fa": 0.3},
{"subjectID": "sub-1", "tractID": "B", "nodeID": 1, "fa": 0.3},
{"subjectID": "sub-2", "tractID": "A", "nodeID": 0, "fa": 0.4},
{"subjectID": "sub-2", "tractID": "A", "nodeID": 1, "fa": 0.5},
{"subjectID": "sub-2", "tractID": "B", "nodeID": 0, "fa": 0.6},
{"subjectID": "sub-2", "tractID": "B", "nodeID": 1, "fa": 0.6},
{"subjectID": "3", "tractID": "A", "nodeID": 0, "fa": 0.7},
{"subjectID": "3", "tractID": "A", "nodeID": 1, "fa": 0.8},
{"subjectID": "3", "tractID": "B", "nodeID": 0, "fa": 0.9},
{"subjectID": "3", "tractID": "B", "nodeID": 1, "fa": 0.9},
]
subs = pd.DataFrame(sub_dicts)
nodes = pd.DataFrame(node_dicts)
with tempfile.TemporaryDirectory() as temp_dir:
subs.to_csv(op.join(temp_dir, "subjects.csv"), index=False)
nodes.to_csv(op.join(temp_dir, "nodes.csv"), index=False)
tmp_dataset = afqi.AFQDataset(
fn_nodes=op.join(temp_dir, "nodes.csv"),
fn_subjects=op.join(temp_dir, "subjects.csv"),
target_cols=["age"],
dwi_metrics=["fa"],
index_col="subject_id",
)
assert set(tmp_dataset.subjects) == set([f"sub-{i}" for i in range(1, 4)])
assert tmp_dataset.X.shape == (3, 4)
assert tmp_dataset.y.shape == (3,)
assert np.isnan(tmp_dataset.y).sum() == 0
@pytest.mark.parametrize("target_cols", [["class"], ["age", "class"]])
def test_AFQDataset(target_cols):
sarica_dir = download_sarica()
afq_data = AFQDataset(
fn_nodes=op.join(sarica_dir, "nodes.csv"),
fn_subjects=op.join(sarica_dir, "subjects.csv"),
dwi_metrics=["md", "fa"],
target_cols=target_cols,
label_encode_cols=["class"],
)
y_shape = (48, 2) if len(target_cols) == 2 else (48,)
assert afq_data.X.shape == (48, 4000) # nosec
assert afq_data.y.shape == y_shape # nosec
assert len(afq_data.groups) == 40 # nosec
assert len(afq_data.feature_names) == 4000 # nosec
assert len(afq_data.group_names) == 40 # nosec
assert len(afq_data.subjects) == 48 # nosec
assert afq_data.bundle_means().shape == (48, 40) # nosec
# Test pytorch dataset method
pt_dataset = afq_data.as_torch_dataset()
assert len(pt_dataset) == 48
assert pt_dataset.X.shape == (48, 40, 100) # nosec
assert pt_dataset.y.shape == y_shape # nosec
assert np.allclose(
pt_dataset[0][0][0], afq_data.X[0, :100], equal_nan=True
) # nosec
pt_dataset = afq_data.as_torch_dataset(channels_last=True)
assert len(pt_dataset) == 48
assert pt_dataset.X.shape == (48, 100, 40) # nosec
assert pt_dataset.y.shape == y_shape # nosec
assert np.allclose(
pt_dataset[0][0][:, 0], afq_data.X[0, :100], equal_nan=True
) # nosec
pt_dataset = afq_data.as_torch_dataset(bundles_as_channels=False)
assert len(pt_dataset) == 48
assert pt_dataset.X.shape == (48, 4000) # nosec
assert pt_dataset.y.shape == y_shape # nosec
assert np.allclose(pt_dataset[0][0], afq_data.X[0], equal_nan=True) # nosec
# Test tensorflow dataset method
tf_dataset = list(afq_data.as_tensorflow_dataset().as_numpy_iterator())
assert len(tf_dataset) == 48
assert np.allclose(
tf_dataset[0][0][:, 0], afq_data.X[0, :100], equal_nan=True
) # nosec
tf_dataset = list(
afq_data.as_tensorflow_dataset(channels_last=False).as_numpy_iterator()
)
assert len(tf_dataset) == 48
assert np.allclose(
tf_dataset[0][0][0], afq_data.X[0, :100], equal_nan=True
) # nosec
tf_dataset = list(
afq_data.as_tensorflow_dataset(bundles_as_channels=False).as_numpy_iterator()
)
assert len(tf_dataset) == 48
assert np.allclose(tf_dataset[0][0], afq_data.X[0], equal_nan=True) # nosec
# Test the drop_target_na method
if len(target_cols) == 2:
afq_data.y[0, 0] = np.nan
y_shape = (47, 2)
else:
afq_data.y[0] = np.nan
y_shape = (47,)
afq_data.drop_target_na()
assert afq_data.X.shape == (47, 4000) # nosec
assert afq_data.y.shape == y_shape # nosec
assert len(afq_data.subjects) == 47 # nosec
# Do it all again for an unsupervised dataset
afq_data = AFQDataset(
fn_nodes=op.join(sarica_dir, "nodes.csv"),
fn_subjects=op.join(sarica_dir, "subjects.csv"),
dwi_metrics=["md", "fa"],
unsupervised=True,
)
assert afq_data.X.shape == (48, 4000) # nosec
assert afq_data.y is None # nosec
assert len(afq_data.groups) == 40 # nosec
assert len(afq_data.feature_names) == 4000 # nosec
assert len(afq_data.group_names) == 40 # nosec
assert len(afq_data.subjects) == 48 # nosec
pt_dataset = afq_data.as_torch_dataset()
assert len(pt_dataset) == 48
assert pt_dataset.X.shape == (48, 40, 100) # nosec
assert torch.all(torch.eq(pt_dataset.y, torch.tensor([]))) # nosec
assert np.allclose(pt_dataset[0][0], afq_data.X[0, :100], equal_nan=True) # nosec
tf_dataset = list(afq_data.as_tensorflow_dataset().as_numpy_iterator())
assert len(tf_dataset) == 48
assert np.allclose(
tf_dataset[0][:, 0], afq_data.X[0, :100], equal_nan=True
) # nosec
# Test the drop_target_na method does nothing in the unsupervised case
afq_data.drop_target_na()
assert afq_data.X.shape == (48, 4000) # nosec
assert afq_data.y is None # nosec
assert len(afq_data.subjects) == 48 # nosec
@pytest.mark.parametrize("dwi_metrics", [["md", "fa"], None])
@pytest.mark.parametrize("enforce_sub_prefix", [True, False])
def test_fetch(dwi_metrics, enforce_sub_prefix):
sarica_dir = download_sarica()
with pytest.raises(ValueError):
load_afq_data(
fn_nodes=op.join(sarica_dir, "nodes.csv"),
fn_subjects=op.join(sarica_dir, "subjects.csv"),
dwi_metrics=dwi_metrics,
target_cols=["class"],
label_encode_cols=["class"],
concat_subject_session=True,
)
X, y, groups, feature_names, group_names, subjects, _, _ = load_afq_data(
fn_nodes=op.join(sarica_dir, "nodes.csv"),
fn_subjects=op.join(sarica_dir, "subjects.csv"),
dwi_metrics=dwi_metrics,
target_cols=["class"],
label_encode_cols=["class"],
enforce_sub_prefix=enforce_sub_prefix,
)
n_features = 16000 if dwi_metrics is None else 4000
n_groups = 160 if dwi_metrics is None else 40
assert X.shape == (48, n_features) # nosec
assert y.shape == (48,) # nosec
assert len(groups) == n_groups # nosec
assert len(feature_names) == n_features # nosec
assert len(group_names) == n_groups # nosec
assert len(subjects) == 48 # nosec
assert op.isfile(
op.join(afqi.datasets._DATA_DIR, "sarica_data", "nodes.csv")
) # nosec
assert op.isfile(
op.join(afqi.datasets._DATA_DIR, "sarica_data", "subjects.csv")
) # nosec
wh_dir = download_weston_havens()
X, y, groups, feature_names, group_names, subjects, _, _ = load_afq_data(
fn_nodes=op.join(wh_dir, "nodes.csv"),
fn_subjects=op.join(wh_dir, "subjects.csv"),
dwi_metrics=dwi_metrics,
target_cols=["Age"],
)
n_features = 10000 if dwi_metrics is None else 4000
n_groups = 100 if dwi_metrics is None else 40
assert X.shape == (77, n_features) # nosec
assert y.shape == (77,) # nosec
assert len(groups) == n_groups # nosec
assert len(feature_names) == n_features # nosec
assert len(group_names) == n_groups # nosec
assert len(subjects) == 77 # nosec
assert op.isfile(
op.join(afqi.datasets._DATA_DIR, "weston_havens_data", "nodes.csv")
) # nosec
assert op.isfile(
op.join(afqi.datasets._DATA_DIR, "weston_havens_data", "subjects.csv")
) # nosec
with tempfile.TemporaryDirectory() as td:
_ = download_sarica(data_home=td)
_ = download_weston_havens(data_home=td)
assert op.isfile(op.join(td, "sarica_data", "nodes.csv")) # nosec
assert op.isfile(op.join(td, "sarica_data", "subjects.csv")) # nosec
assert op.isfile(op.join(td, "weston_havens_data", "nodes.csv")) # nosec
assert op.isfile(op.join(td, "weston_havens_data", "subjects.csv")) # nosec
def test_load_afq_data_smoke():
output = load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
target_cols=["test_class"],
label_encode_cols=["test_class"],
)
assert len(output) == 8 # nosec
output = load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
target_cols=["test_class"],
label_encode_cols=["test_class"],
unsupervised=True,
)
assert len(output) == 8 # nosec
assert output.y is None # nosec
assert output.classes is None # nosec
output = load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
target_cols=["test_class"],
label_encode_cols=["test_class"],
unsupervised=True,
)
assert len(output) == 8 # nosec
assert output.y is None # nosec
assert output.classes is None # nosec
@pytest.mark.parametrize("dwi_metrics", [["volume", "md"], None])
def test_load_afq_data(dwi_metrics):
(X, y, groups, feature_names, group_names, subjects, _, classes) = load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
dwi_metrics=dwi_metrics,
target_cols=["test_class"],
label_encode_cols=["test_class"],
return_bundle_means=False,
enforce_sub_prefix=False,
)
nodes = pd.read_csv(op.join(test_data_path, "nodes.csv"))
X_ref = np.load(op.join(test_data_path, "test_transform_x.npy"))
y_ref = np.load(op.join(test_data_path, "test_data_y.npy"))
groups_ref = np.load(op.join(test_data_path, "test_transform_groups.npy"))
cols_ref = [
tuple(item)
for item in np.load(op.join(test_data_path, "test_transform_cols.npy"))
]
assert np.allclose(X, X_ref, equal_nan=True) # nosec
assert np.allclose(y, y_ref) # nosec
assert np.allclose(groups, groups_ref) # nosec
assert feature_names == cols_ref # nosec
assert group_names == [tup[0:2] for tup in cols_ref if tup[2] == 0] # nosec
assert set(subjects) == set(nodes.subjectID.unique()) # nosec
assert all(classes["test_class"] == np.array(["c0", "c1"])) # nosec
(X, y, groups, feature_names, group_names, subjects, _, classes) = load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
dwi_metrics=dwi_metrics,
target_cols=["test_class"],
label_encode_cols=["test_class"],
return_bundle_means=True,
enforce_sub_prefix=False,
)
means_ref = (
nodes.groupby(["subjectID", "tractID"])
.agg("mean")
.drop("nodeID", axis="columns")
.unstack("tractID")
)
assert np.allclose(X, means_ref.to_numpy(), equal_nan=True) # nosec
assert group_names == means_ref.columns.to_list() # nosec
assert feature_names == means_ref.columns.to_list() # nosec
assert set(subjects) == set(nodes.subjectID.unique()) # nosec
with pytest.raises(ValueError):
load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
target_cols=["test_class"],
label_encode_cols=["test_class", "error"],
)
with pytest.raises(ValueError) as ee:
load_afq_data(
fn_nodes=op.join(test_data_path, "nodes.csv"),
fn_subjects=op.join(test_data_path, "subjects.csv"),
)
assert "please set `unsupervised=True`" in str(ee.value) # nosec
| 37.430952 | 86 | 0.614974 |
b4a213980b8bfc4388a321adc4e7fe68e8cfe038 | 24,847 | py | Python | pgmpy/estimators/ConstraintBasedEstimator.py | Lostefra/pgmpy | d39a0c86005f86a7543d93ae7703f22f026bf3e6 | [
"MIT"
] | 1 | 2022-01-18T08:37:04.000Z | 2022-01-18T08:37:04.000Z | pgmpy/estimators/ConstraintBasedEstimator.py | Lostefra/pgmpy | d39a0c86005f86a7543d93ae7703f22f026bf3e6 | [
"MIT"
] | null | null | null | pgmpy/estimators/ConstraintBasedEstimator.py | Lostefra/pgmpy | d39a0c86005f86a7543d93ae7703f22f026bf3e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from warnings import warn
from itertools import combinations
from pgmpy.base import UndirectedGraph
from pgmpy.base import DAG
from pgmpy.estimators import StructureEstimator
from pgmpy.independencies import Independencies, IndependenceAssertion
class ConstraintBasedEstimator(StructureEstimator):
def __init__(self, data, max_ci_vars=None, **kwargs):
"""
Class for constraint-based estimation of DAGs from a given
data set. Identifies (conditional) dependencies in data set using
chi_square dependency test and uses the PC algorithm to estimate a DAG
pattern that satisfies the identified dependencies. The DAG pattern can
then be completed to a faithful DAG, if possible.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
max_ci_vars: int (default: `len(nodes) - 1`)
The maximum number of variables to use as conditional variables in
independence tests.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
----------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques,
2009, Section 18.2
[2] Neapolitan, Learning Bayesian Networks, Section 10.1.2 for the PC algorithm (page 550),
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
"""
self.max_ci_vars = max_ci_vars
super(ConstraintBasedEstimator, self).__init__(data, **kwargs)
def estimate(self, significance_level=0.01):
"""
Estimates a DAG for the data set, using the PC constraint-based
structure learning algorithm. Independencies are identified from the
data set using a chi-squared statistic with the acceptance threshold of
`significance_level`. PC identifies a partially directed acyclic graph (PDAG), given
that the tested independencies admit a faithful Bayesian network representation.
This method returns a DAG that is a completion of this PDAG.
Parameters
----------
significance_level: float, default: 0.01
The significance level to use for conditional independence tests in the data set.
`significance_level` is the desired Type 1 error probability of
falsely rejecting the null hypothesis that variables are independent,
given that they are. The lower `significance_level`, the less likely
we are to accept dependencies, resulting in a sparser graph.
Returns
-------
model: DAG()-instance
An estimate for the DAG for the data set (not yet parametrized).
References
----------
Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(2500, 3)), columns=list('XYZ'))
>>> data['sum'] = data.sum(axis=1)
>>> print(data)
X Y Z sum
0 3 0 1 4
1 1 4 3 8
2 0 0 3 3
3 0 2 3 5
4 2 1 1 4
... .. .. .. ...
2495 2 3 0 5
2496 1 1 2 4
2497 0 4 2 6
2498 0 0 0 0
2499 2 4 0 6
[2500 rows x 4 columns]
>>> c = ConstraintBasedEstimator(data)
>>> model = c.estimate()
>>> print(model.edges())
[('Z', 'sum'), ('X', 'sum'), ('Y', 'sum')]
"""
skel, separating_sets = self.estimate_skeleton(
significance_level, self.max_ci_vars
)
pdag = self.skeleton_to_pdag(skel, separating_sets)
model = self.pdag_to_dag(pdag)
return model
def estimate_skeleton(self, significance_level=0.01, max_ci_vars=None):
"""Estimates a graph skeleton (UndirectedGraph) for the data set.
Uses the build_skeleton method (PC algorithm); independencies are
determined using a chisquare statistic with the acceptance threshold
of `significance_level`. Returns a tuple `(skeleton, separating_sets)`.
Parameters
----------
significance_level: float, default: 0.01
The significance level to use for conditional independence tests in the data set.
`significance_level` is the desired Type 1 error probability of
falsely rejecting the null hypothesis that variables are independent,
given that they are. The lower `significance_level`, the less likely
we are to accept dependencies, resulting in a sparser graph.
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set of variables that makes then conditionally independent.
(needed for edge orientation procedures)
References
----------
.. [1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
.. [2] Chi-square test https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Test_of_independence
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 5)), columns=list('ABCDE'))
>>> data['F'] = data['A'] + data['B'] + data ['C']
>>> est = ConstraintBasedEstimator(data)
>>> skel, sep_sets = est.estimate_skeleton()
>>> skel.edges()
[('A', 'F'), ('B', 'F'), ('C', 'F')]
>>> # all independencies are unconditional:
>>> sep_sets
{('D', 'A'): (), ('C', 'A'): (), ('C', 'E'): (), ('E', 'F'): (), ('B', 'D'): (),
('B', 'E'): (), ('D', 'F'): (), ('D', 'E'): (), ('A', 'E'): (), ('B', 'A'): (),
('B', 'C'): (), ('C', 'D'): ()}
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 3)), columns=list('XYZ'))
>>> data['X'] += data['Z']
>>> data['Y'] += data['Z']
>>> est = ConstraintBasedEstimator(data)
>>> skel, sep_sets = est.estimate_skeleton()
>>> skel.edges()
[('X', 'Z'), ('Y', 'Z')]
>>> # X, Y dependent, but conditionally independent given Z:
>>> sep_sets
{('X', 'Y'): ('Z',)}
"""
nodes = self.state_names.keys()
def is_independent(X, Y, Zs):
"""Returns result of hypothesis test for the null hypothesis that
X _|_ Y | Zs, using a chi2 statistic and threshold `significance_level`.
"""
return self.test_conditional_independence(X, Y, Zs, method="chi_square")
return self.build_skeleton(nodes, is_independent, max_ci_vars)
@staticmethod
def estimate_from_independencies(nodes, independencies):
"""Estimates a DAG from an Independencies()-object or a
decision function for conditional independencies. This requires that
the set of independencies admits a faithful representation (e.g. is a
set of d-separation for some BN or is closed under the semi-graphoid
axioms). See `build_skeleton`, `skeleton_to_pdag`, `pdag_to_dag` for
details.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
model: DAG instance
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> ind = ind.closure()
>>> skel = ConstraintBasedEstimator.estimate_from_independencies("ABCD", ind)
>>> print(skel.edges())
[('B', 'D'), ('A', 'D'), ('C', 'D')]
>>> model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel = ConstraintBasedEstimator.estimate_from_independencies(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('B', 'C'), ('A', 'C'), ('C', 'E'), ('D', 'B')]
>>> # note that ('D', 'B') is flipped compared to the original network;
>>> # Both networks belong to the same PDAG/are I-equivalent
"""
skel, separating_sets = ConstraintBasedEstimator.build_skeleton(
nodes, independencies
)
pdag = ConstraintBasedEstimator.skeleton_to_pdag(skel, separating_sets)
dag = ConstraintBasedEstimator.pdag_to_dag(pdag)
return dag
@staticmethod
def pdag_to_dag(pdag):
"""Completes a PDAG to a DAG, without adding v-structures, if such a
completion exists. If no faithful extension is possible, some fully
oriented DAG that corresponds to the PDAG is returned and a warning is
generated. This is a static method.
Parameters
----------
pdag: DAG
A directed acyclic graph pattern, consisting in (acyclic) directed edges
as well as "undirected" edges, represented as both-way edges between
nodes.
Returns
-------
dag: DAG
A faithful orientation of pdag, if one exists. Otherwise any
fully orientated DAG/BayesianModel with the structure of pdag.
References
----------
[1] Chickering, Learning Equivalence Classes of Bayesian-Network Structures,
2002; See page 454 (last paragraph) for the algorithm pdag_to_dag
http://www.jmlr.org/papers/volume2/chickering02a/chickering02a.pdf
[2] Dor & Tarsi, A simple algorithm to construct a consistent extension
of a partially oriented graph, 1992,
http://ftp.cs.ucla.edu/pub/stat_ser/r185-dor-tarsi.pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.base import DAG
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 3)), columns=list('ABD'))
>>> data['C'] = data['A'] - data['B']
>>> data['D'] += data['A']
>>> c = ConstraintBasedEstimator(data)
>>> pdag = c.skeleton_to_pdag(*c.estimate_skeleton())
>>> pdag.edges()
[('B', 'C'), ('D', 'A'), ('A', 'D'), ('A', 'C')]
>>> c.pdag_to_dag(pdag).edges()
[('B', 'C'), ('A', 'D'), ('A', 'C')]
>>> # pdag_to_dag is static:
... pdag1 = DAG([('A', 'B'), ('C', 'B'), ('C', 'D'), ('D', 'C'), ('D', 'A'), ('A', 'D')])
>>> ConstraintBasedEstimator.pdag_to_dag(pdag1).edges()
[('D', 'C'), ('C', 'B'), ('A', 'B'), ('A', 'D')]
>>> # example of a pdag with no faithful extension:
... pdag2 = DAG([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')])
>>> ConstraintBasedEstimator.pdag_to_dag(pdag2).edges()
UserWarning: PDAG has no faithful extension (= no oriented DAG with the same v-structures as PDAG).
Remaining undirected PDAG edges oriented arbitrarily.
[('B', 'C'), ('A', 'B'), ('A', 'C')]
"""
pdag = pdag.copy()
dag = DAG()
dag.add_nodes_from(pdag.nodes())
# add already directed edges of pdag to dag
for X, Y in pdag.edges():
if not pdag.has_edge(Y, X):
dag.add_edge(X, Y)
while pdag.number_of_nodes() > 0:
# find node with (1) no directed outgoing edges and
# (2) the set of undirected neighbors is either empty or
# undirected neighbors + parents of X are a clique
found = False
for X in pdag.nodes():
directed_outgoing_edges = set(pdag.successors(X)) - set(
pdag.predecessors(X)
)
undirected_neighbors = set(pdag.successors(X)) & set(
pdag.predecessors(X)
)
neighbors_are_clique = all(
(
pdag.has_edge(Y, Z)
for Z in pdag.predecessors(X)
for Y in undirected_neighbors
if not Y == Z
)
)
if not directed_outgoing_edges and (
not undirected_neighbors or neighbors_are_clique
):
found = True
# add all edges of X as outgoing edges to dag
for Y in pdag.predecessors(X):
dag.add_edge(Y, X)
pdag.remove_node(X)
break
if not found:
warn(
"PDAG has no faithful extension (= no oriented DAG with the "
+ "same v-structures as PDAG). Remaining undirected PDAG edges "
+ "oriented arbitrarily."
)
for X, Y in pdag.edges():
if not dag.has_edge(Y, X):
try:
dag.add_edge(X, Y)
except ValueError:
pass
break
return dag
@staticmethod
def model_to_pdag(model):
"""Construct the DAG pattern (representing the I-equivalence class) for
a given DAG. This is the "inverse" to pdag_to_dag.
"""
if not isinstance(model, DAG):
raise TypeError(f"model: Expected DAG instance, got type {type(model)}")
skel, separating_sets = ConstraintBasedEstimator.build_skeleton(
model.nodes(), model.get_independencies()
)
pdag = ConstraintBasedEstimator.skeleton_to_pdag(skel, separating_sets)
return pdag
@staticmethod
def skeleton_to_pdag(skel, separating_sets):
"""Orients the edges of a graph skeleton based on information from
`separating_sets` to form a DAG pattern (DAG).
Parameters
----------
skel: UndirectedGraph
An undirected graph skeleton as e.g. produced by the
estimate_skeleton method.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation)
Returns
-------
pdag: DAG
An estimate for the DAG pattern of the BN underlying the data. The
graph might contain some nodes with both-way edges (X->Y and Y->X).
Any completion by (removing one of the both-way edges for each such
pair) results in a I-equivalent Bayesian network DAG.
References
----------
Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 3)), columns=list('ABD'))
>>> data['C'] = data['A'] - data['B']
>>> data['D'] += data['A']
>>> c = ConstraintBasedEstimator(data)
>>> pdag = c.skeleton_to_pdag(*c.estimate_skeleton())
>>> pdag.edges() # edges: A->C, B->C, A--D (not directed)
[('B', 'C'), ('A', 'C'), ('A', 'D'), ('D', 'A')]
"""
pdag = skel.to_directed()
node_pairs = combinations(pdag.nodes(), 2)
# 1) for each X-Z-Y, if Z not in the separating set of X,Y, then orient edges as X->Z<-Y
# (Algorithm 3.4 in Koller & Friedman PGM, page 86)
for X, Y in node_pairs:
if not skel.has_edge(X, Y):
for Z in set(skel.neighbors(X)) & set(skel.neighbors(Y)):
if Z not in separating_sets[frozenset((X, Y))]:
pdag.remove_edges_from([(Z, X), (Z, Y)])
progress = True
while progress: # as long as edges can be oriented (removed)
num_edges = pdag.number_of_edges()
# 2) for each X->Z-Y, orient edges to Z->Y
for X, Y in node_pairs:
for Z in (set(pdag.successors(X)) - set(pdag.predecessors(X))) & (
set(pdag.successors(Y)) & set(pdag.predecessors(Y))
):
pdag.remove(Y, Z)
# 3) for each X-Y with a directed path from X to Y, orient edges to X->Y
for X, Y in node_pairs:
for path in nx.all_simple_paths(pdag, X, Y):
is_directed = True
for src, dst in path:
if pdag.has_edge(dst, src):
is_directed = False
if is_directed:
pdag.remove(Y, X)
break
# 4) for each X-Z-Y with X->W, Y->W, and Z-W, orient edges to Z->W
for X, Y in node_pairs:
for Z in (
set(pdag.successors(X))
& set(pdag.predecessors(X))
& set(pdag.successors(Y))
& set(pdag.predecessors(Y))
):
for W in (
(set(pdag.successors(X)) - set(pdag.predecessors(X)))
& (set(pdag.successors(Y)) - set(pdag.predecessors(Y)))
& (set(pdag.successors(Z)) & set(pdag.predecessors(Z)))
):
pdag.remove(W, Z)
progress = num_edges > pdag.number_of_edges()
return pdag
@staticmethod
def build_skeleton(nodes, independencies, max_ci_vars=None):
"""Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-separations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
max_ci_vars: int (default: `len(nodes) - 1`)
The maximum number of variables to use as conditional variables in
independence tests.
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
References
----------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
"""
nodes = list(nodes)
if max_ci_vars is None:
max_ci_vars = len(nodes) - 1
if isinstance(independencies, Independencies):
def is_independent(X, Y, Zs):
return IndependenceAssertion(X, Y, Zs) in independencies
elif callable(independencies):
is_independent = independencies
else:
raise ValueError(
"'independencies' must be either Independencies-instance "
+ "or a ternary function that decides independencies."
)
graph = UndirectedGraph(combinations(nodes, 2))
lim_neighbors = 0
separating_sets = dict()
while not all(
[len(list(graph.neighbors(node))) < lim_neighbors for node in nodes]
) and (lim_neighbors <= max_ci_vars):
for node in nodes:
for neighbor in list(graph.neighbors(node)):
# search if there is a set of neighbors (of size lim_neighbors)
# that makes X and Y independent:
for separating_set in combinations(
set(graph.neighbors(node)) - set([neighbor]), lim_neighbors
):
if is_independent(node, neighbor, separating_set):
separating_sets[
frozenset((node, neighbor))
] = separating_set
graph.remove_edge(node, neighbor)
break
lim_neighbors += 1
return graph, separating_sets
| 43.212174 | 115 | 0.568439 |
efff3cf42cac9a3ada269941b6c067bb73a60c70 | 4,055 | py | Python | lib/python2.7/site-packages/networkx/readwrite/tests/test_sparse6.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | 15 | 2018-04-26T08:17:18.000Z | 2021-03-05T08:44:13.000Z | lib/python2.7/site-packages/networkx/readwrite/tests/test_sparse6.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | null | null | null | lib/python2.7/site-packages/networkx/readwrite/tests/test_sparse6.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | 6 | 2018-04-12T15:49:27.000Z | 2022-01-27T12:34:50.000Z | #!/usr/bin/env python
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from nose.tools import *
import networkx as nx
import networkx.readwrite.sparse6 as sg6
import os,tempfile
class TestSparseGraph6(object):
def test_parse_sparse6(self):
data=""":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"""
G=nx.parse_sparse6(data)
assert_equal(sorted(G.nodes()),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17])
assert_equal([e for e in sorted(G.edges())],
[(0, 1), (0, 2), (0, 3), (1, 12), (1, 14), (2, 13),
(2, 15), (3, 16), (3, 17), (4, 7), (4, 9), (4, 11),
(5, 6), (5, 8), (5, 9), (6, 10), (6, 11), (7, 8),
(7, 10), (8, 12), (9, 15), (10, 14), (11, 13),
(12, 16), (13, 17), (14, 17), (15, 16)])
def test_parse_multigraph_graph(self):
graph_data = ':An'
G = nx.parse_sparse6(graph_data)
assert_true(type(G), nx.Graph)
multigraph_data = ':Ab'
M = nx.parse_sparse6(multigraph_data)
assert_true(type(M), nx.MultiGraph)
def test_read_sparse6(self):
data=""":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"""
G=nx.parse_sparse6(data)
fh = StringIO(data)
Gin=nx.read_sparse6(fh)
assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
assert_equal(sorted(G.edges()),sorted(Gin.edges()))
def test_read_many_graph6(self):
# Read many graphs into list
data=':Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n'+\
':Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM'
fh = StringIO(data)
glist=nx.read_sparse6(fh)
assert_equal(len(glist),2)
for G in glist:
assert_equal(sorted(G.nodes()),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17])
def test_generate_sparse6(self):
# Checked against sage encoder
assert_equal(nx.generate_sparse6(nx.empty_graph(0)), '>>sparse6<<:?')
assert_equal(nx.generate_sparse6(nx.empty_graph(1)), '>>sparse6<<:@')
assert_equal(nx.generate_sparse6(nx.empty_graph(5)), '>>sparse6<<:D')
assert_equal(nx.generate_sparse6(nx.empty_graph(68)),
'>>sparse6<<:~?@C')
assert_equal(nx.generate_sparse6(nx.empty_graph(258049)),
'>>sparse6<<:~~???~?@')
G1 = nx.complete_graph(4)
assert_equal(nx.generate_sparse6(G1, header=True),
'>>sparse6<<:CcKI')
assert_equal(nx.generate_sparse6(G1, header=False), ':CcKI')
# Padding testing
assert_equal(nx.generate_sparse6(nx.path_graph(4), header=False),
':Cdv')
assert_equal(nx.generate_sparse6(nx.path_graph(5), header=False),
':DaYn')
assert_equal(nx.generate_sparse6(nx.path_graph(6), header=False),
':EaYnN')
assert_equal(nx.generate_sparse6(nx.path_graph(7), header=False),
':FaYnL')
assert_equal(nx.generate_sparse6(nx.path_graph(8), header=False),
':GaYnLz')
def test_write_sparse6(self):
fh = StringIO()
nx.write_sparse6(nx.complete_bipartite_graph(6,9), fh)
fh.seek(0)
assert_equal(fh.read(),
'>>sparse6<<:Nk?G`cJ?G`cJ?G`cJ?G`'+
'cJ?G`cJ?G`cJ?G`cJ?G`cJ?G`cJ\n')
# Compared with sage
def test_generate_and_parse_sparse6(self):
for i in list(range(13)) + [31, 47, 62, 63, 64, 72]:
m = min(2 * i, i * i // 2)
g = nx.random_graphs.gnm_random_graph(i, m, seed=i)
gstr = nx.generate_sparse6(g, header=False)
g2 = nx.parse_sparse6(gstr)
assert_equal(g2.order(), g.order())
assert_equal(sorted(g2.edges()), sorted(g.edges()))
@raises(nx.NetworkXError)
def directed_error(self):
nx.generate_sparse6(nx.DiGraph())
| 39.368932 | 77 | 0.545993 |
16d26c77ca8ec6eab37d0c88914cde725bd28845 | 7,457 | py | Python | tests/python/relay/test_depthwise_conv2d_nhwc_texture.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | tests/python/relay/test_depthwise_conv2d_nhwc_texture.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863 | 2017-08-17T19:55:50.000Z | 2019-11-04T17:18:41.000Z | tests/python/relay/test_depthwise_conv2d_nhwc_texture.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from utils.adreno_utils import gpu_preprocess, build_run_compare
@tvm.testing.requires_opencl
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1():
target = "opencl --device=adreno"
dtype = "float16"
input_shape = (1, 129, 129, 144)
filter_shape = (3, 3, 144, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
mod = relay.Function([A, B, bias], conv)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
def test_depthwise_conv2d_deeplabv3_4_35_35_576x3_3_576_1():
target = "opencl --device=adreno"
dtype = "float16"
input_shape = (4, 35, 35, 576)
filter_shape = (3, 3, 576, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
mod = relay.Function([A, B, bias], conv)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1_with_padding():
target = "opencl --device=adreno"
dtype = "float16"
input_shape = (1, 129, 129, 144)
filter_shape = (3, 3, 144, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
def test_depthwise_conv2d_1_513_513_7x3_3_7_1():
target = "opencl --device=adreno"
dtype = "float16"
input_shape = (1, 513, 513, 7)
filter_shape = (3, 3, 7, 1)
bias_shape = (filter_shape[2],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
channels=filter_shape[2],
groups=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
def test_depthwise_conv2d_1_513_513_3x3_3_3_1():
target = "opencl --device=adreno"
dtype = "float16"
input_shape = (1, 513, 513, 3)
filter_shape = (3, 3, 3, 1)
bias_shape = (filter_shape[2],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
channels=filter_shape[2],
groups=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
| 31.867521 | 75 | 0.653748 |
835ebd2e8fb20370d26bab61eabf079b536011aa | 451 | py | Python | tests/codec/der/__main__.py | pysnmp/pyasn1 | d52a38c74df682d0b82d771028c145e8aba23ff4 | [
"BSD-2-Clause"
] | null | null | null | tests/codec/der/__main__.py | pysnmp/pyasn1 | d52a38c74df682d0b82d771028c145e8aba23ff4 | [
"BSD-2-Clause"
] | 3 | 2021-11-16T20:28:20.000Z | 2021-11-24T20:27:24.000Z | tests/codec/der/__main__.py | pysnmp/pyasn1 | d52a38c74df682d0b82d771028c145e8aba23ff4 | [
"BSD-2-Clause"
] | null | null | null | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
try:
import unittest2 as unittest
except ImportError:
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
["tests.codec.der.test_encoder.suite", "tests.codec.der.test_decoder.suite"]
)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite)
| 22.55 | 80 | 0.733925 |
18cac0129bff79996f6442ff16408c3b7d473796 | 10,801 | py | Python | bin/gene_test.py | fuxialexander/marvel | 7fed7515ebe0a3fde9e842c0ee81a39f4413b457 | [
"MIT"
] | 8 | 2020-02-20T15:28:43.000Z | 2021-06-09T04:08:21.000Z | bin/gene_test.py | fuxialexander/marvel | 7fed7515ebe0a3fde9e842c0ee81a39f4413b457 | [
"MIT"
] | 3 | 2020-12-26T09:07:55.000Z | 2021-09-08T17:32:40.000Z | bin/gene_test.py | fuxialexander/marvel | 7fed7515ebe0a3fde9e842c0ee81a39f4413b457 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import os
import sys
from glob import glob
from os.path import basename
import numpy as np
import pandas as pd
import rpy2
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri as n2r
from rpy2.rinterface import RRuntimeWarning
from scipy.sparse import csr_matrix, load_npz, save_npz, vstack
from scipy.stats import mode
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
n2r.activate()
r = ro.r
r.library('glmpath')
def get_pe(pe_pair_file):
ep_dis = dict()
ep_pair = dict()
with open(pe_pair_file, 'r') as pairs:
for pair in pairs:
p, e, d = pair.strip("\n").split("\t")
ep_dis[p + e] = int(d)
if p in ep_pair:
ep_pair[p].append(e)
else:
ep_pair[p] = []
ep_pair[p].append(e)
return ep_dis, ep_pair
def get_pg(pg_pair_file):
pg_pair = dict()
glist = []
with open(pg_pair_file, 'r') as f:
for line in f:
loc, gid = line.strip('\n').split('\t')
glist.append(gid)
if gid in pg_pair:
pg_pair[gid].append(loc)
else:
pg_pair[gid] = []
pg_pair[gid].append(loc)
return pg_pair, np.unique(np.array(glist))
def get_weight(distance, breaks, weight):
diff = breaks.values - abs(distance)
print(diff)
print(np.where(diff > 0)[0])
print(diff[diff > 0].argmin())
if sum(diff > 0) > 0:
return weight[np.where(diff > 0)[0][diff[diff > 0].argmin()]]
else:
return 0
def get_distance(p, e, ep_dis):
if (p + e) in ep_dis:
return ep_dis[p + e]
else:
return 1000000 - 1
def get_gene(pg_pair, glist, ep_pair, sample_count, motif_count):
xs = []
for i, g in enumerate(glist):
x = np.zeros((sample_count, motif_count))
promoters = np.array(pg_pair[g])
enhancers = [
np.array(ep_pair[p]) for p in promoters if (p in ep_pair)
]
if len(enhancers) > 0:
enhancers = np.unique(np.concatenate(enhancers))
else:
continue
pids = np.array([
np.where(promoter_results['regions'] == promoter)[0][0]
for promoter in promoters
])
for enhancer in enhancers:
eid = np.where(enhancer_results['regions'] == enhancer)[0][0]
weight = get_weight(
get_distance(promoters[0], enhancer, ep_dis), distance_breaks,
distance_weight)
x += weight * enhancer_profiles[eid * sample_count:
(eid + 1) * sample_count, :].todense()
x += distance_weight[0] * sum([
promoter_profiles[pid * sample_count:
(pid + 1) * sample_count, :].todense() for pid in pids
])
x = x / float(len(promoters) + len(enhancers))
xs.append(x)
return xs
def normalize(x):
x = np.array(x - x.mean(0))
x_std = np.array(x.std(0))
x[:, x_std != 0] = x[:, x_std != 0] / x_std[x_std != 0]
return x
def perm_test(x, y, covariates):
x = normalize(x)
if x.std() == 0:
return np.repeat(-1, args.size_multiplier), np.repeat(0, args.size_multiplier)
stats_j = np.zeros(args.size_multiplier)
sels_j = np.zeros(args.size_multiplier)
logistic_alt = LogisticRegression(C=1e42, penalty='l2', solver='liblinear')
logistic_null = LogisticRegression(
C=1e42, penalty='l2', solver='liblinear')
covariates = normalize(covariates)
for i in range(args.size_multiplier):
y_perm = np.random.permutation(y)
nullmodel = logistic_null.fit(covariates, y_perm)
Lnullmodel = np.log(nullmodel.predict_proba(covariates))
try:
reg = r['glmpath'](
x, y_perm, **{'min.lambda': 1e-2, 'max.steps': 10, 'max.vars': 10})
coefs_ = np.asanyarray(reg.rx2('b.predictor'))
coefs = coefs_[-1][1:]
sel = np.where(coefs != 0)[0]
if sel.shape[0] == 0:
stat = -1
nsel = 0
else:
xalt = np.concatenate((x[:, sel], covariates),
axis=1).astype(float)
altmodel = logistic_alt.fit(xalt, y_perm)
_Laltmodel = altmodel.predict_proba(xalt)
if (_Laltmodel <= 0).any():
stat = -1
nsel = len(sel)
else:
Laltmodel = np.log(_Laltmodel)
_y = np.vstack([1 - y_perm, y_perm])
stat = 2 * (np.matmul(_y, Laltmodel - Lnullmodel).trace())
nsel = len(sel)
if np.isnan(stat):
stat = -1
nsel = len(sel)
except rpy2.rinterface_lib.embedded.RRuntimeError:
stat = -1
nsel = 0
stats_j[i] = stat
sels_j[i] = nsel
return stats_j, sels_j
def test(x, y, covariates):
x = normalize(x)
if x.std() == 0:
return -1, None, None
covariates = normalize(covariates)
logistic_alt = LogisticRegression(C=1e42, penalty='l2', solver='liblinear')
logistic_null = LogisticRegression(
C=1e42, penalty='l2', solver='liblinear')
nullmodel = logistic_null.fit(covariates, y)
Lnullmodel = nullmodel.predict_log_proba(covariates)
try:
reg = r['glmpath'](
x, y, **{'min.lambda': 1e-2, 'max.steps': 10, 'max.vars': 10})
coefs_ = np.asanyarray(reg.rx2('b.predictor'))
coefs = coefs_[-1][1:]
sel = np.where(coefs != 0)[0]
if sel.shape[0] == 0:
# not selected
sel = None
coefs = None
stat = -1
else:
coefs = coefs[sel]
xalt = np.concatenate(
(x[:, sel], covariates), axis=1).astype(float)
altmodel = logistic_alt.fit(xalt, y)
_Laltmodel = altmodel.predict_proba(xalt)
if (_Laltmodel <= 0).any():
stat = -1
else:
Laltmodel = np.log(_Laltmodel)
_y = np.vstack([1 - y, y])
stat = 2 * (np.matmul(_y, Laltmodel - Lnullmodel).trace())
if np.isnan(stat):
stat = -1
except rpy2.rinterface_lib.embedded.RRuntimeError:
sel = None
coefs = None
stat = -1
return stat, sel, coefs
# argument
parser = argparse.ArgumentParser()
parser.add_argument(
'-w', '--weights', type=str, default='enhancer_promoter_distance_weight.csv',
help='CSV file that specifying enhancer-promoter distance weights. (default: enhancer_promoter_distance_weight.csv)')
parser.add_argument(
'-r', '--results', type=str, default=None, required=True,
help='The results folder of region-based test. (default: None)')
parser.add_argument(
'-e', '--promoter-enhancer-pair', type=str, default='promoter_enhancer_pair.txt',
help='File specifying promoter enhancer relationship (default: Promoter_enhancer_pair.txt)')
parser.add_argument(
'-g', '--promoter-gene-pair', type=str, default='promoter_gene_pair.txt',
help='File specifying promoter gene relationship (default: promoter_gene_pair.txt)')
parser.add_argument(
'-m', '--motifs', type=str, default=None, required=True,
help='Path to motif PWMs (default: None)')
parser.add_argument(
'-p', '--phenocov', type=str, default='pheno_covar.txt',
help='Phenotype and covariate file (default: pheno_covar.txt)')
parser.add_argument('-n', '--size-multiplier', action='store', type=int, help='Specify the number of permutation per region (default: 10)', default=10)
parser.add_argument('-o', "--output-file", default="gene",
type=str, help="output directory")
args = parser.parse_args()
enhancer_results = np.load(args.results + '/enhancer_results.collected.npz', allow_pickle=True)
promoter_results = np.load(args.results + '/promoter_results.collected.npz', allow_pickle=True)
enhancer_profiles = load_npz(args.results + '/enhancer_profiles.collected.npz')
promoter_profiles = load_npz(args.results + '/promoter_profiles.collected.npz')
ep_dis, ep_pair = get_pe(args.promoter_enhancer_pair)
pg_pair, glist = get_pg(args.promoter_gene_pair)
distance_breaks = pd.read_csv(args.weights)['breaks']
distance_weight = pd.read_csv(args.weights)['weights']
phenocov = pd.read_csv(args.phenocov, sep="\t")
phenocov = phenocov.sort_values("samples")
sample_count = phenocov['samples'].shape[0]
motif_count = enhancer_profiles.shape[1]
matrix_names = np.array([basename(x)[0:-20]
for x in glob(args.motifs+'/*.pwm')])
y = phenocov.pheno.values
covariates = phenocov.iloc[:, 3:].values
profiles = []
regions = []
stats = []
sels = []
coefs = []
perm_regions = []
perm_stats = []
perm_sels = []
for g in tqdm(glist):
regions.append(g)
perm_regions.append(g)
x = np.zeros((sample_count, motif_count))
promoters = np.array(pg_pair[g])
enhancers = [
np.array(ep_pair[p]) for p in promoters if p in ep_pair
]
if len(enhancers) > 0:
enhancers = np.unique(np.concatenate(enhancers))
else:
continue
print(promoter_results['regions'], promoters)
pids = np.array([
np.where(promoter_results['regions'] == promoter)[0][0]
for promoter in promoters
])
for enhancer in enhancers:
eid = np.where(enhancer_results['regions'] == enhancer)[0][0]
weight = get_weight(
get_distance(promoters[0], enhancer, ep_dis), distance_breaks,
distance_weight)
x += weight * enhancer_profiles[eid * sample_count:
(eid + 1) * sample_count, :].todense()
x += distance_weight[0] * sum([
promoter_profiles[pid * sample_count:
(pid + 1) * sample_count, :].todense() for pid in pids
])
x = x / float(len(promoters) + len(enhancers))
x = csr_matrix(x)
profiles.append(x)
stat, sel, coef = test(x, y, covariates)
stats.append(stat)
sels.append(sel)
coefs.append(coef)
perm_stat, perm_sel = perm_test(x, y, covariates)
perm_stats.append(perm_stat)
perm_sels.append(perm_sel)
stats = np.array(stats)
sels = np.array(sels)
coefs = np.array(coefs)
perm_stats = np.array(perm_stats)
perm_sels = np.array(perm_sels)
np.savez(file=args.output_file+"_real_results.npz",
stats=stats, sels=sels, coefs=coefs, regions=regions)
np.savez(file=args.output_file+"_perm_results.npz",
stats=perm_stats, sels=perm_sels, regions=perm_regions)
save_npz(args.output_file+"_profiles.npz", vstack(profiles))
| 34.18038 | 152 | 0.594297 |
22d67fd9f9e1124e8b02a094c7a3e906845eb9b0 | 1,466 | py | Python | infoblox_netmri/api/remote/models/auth_server_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 12 | 2016-02-19T12:37:54.000Z | 2022-03-04T20:11:08.000Z | infoblox_netmri/api/remote/models/auth_server_grid_remote.py | azinfoblox/infoblox-netmri | 02372c5231e2677ab6299cb659a73c9a41b4b0f4 | [
"Apache-2.0"
] | 18 | 2015-11-12T18:37:00.000Z | 2021-05-19T07:59:55.000Z | infoblox_netmri/api/remote/models/auth_server_grid_remote.py | azinfoblox/infoblox-netmri | 02372c5231e2677ab6299cb659a73c9a41b4b0f4 | [
"Apache-2.0"
] | 18 | 2016-01-07T12:04:34.000Z | 2022-03-31T11:05:41.000Z | from ..remote import RemoteModel
class AuthServerGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``priority:`` none
| ``attribute type:`` string
| ``auth_server:`` none
| ``attribute type:`` string
| ``auth_port:`` none
| ``attribute type:`` string
| ``auth_encryption:`` none
| ``attribute type:`` string
| ``auth_cert:`` none
| ``attribute type:`` string
| ``certificate:`` none
| ``attribute type:`` string
| ``ocsp_certs:`` none
| ``attribute type:`` string
| ``interface:`` none
| ``attribute type:`` string
| ``status:`` none
| ``attribute type:`` string
| ``auth_shared_secret:`` none
| ``attribute type:`` string
| ``enabled_ind:`` none
| ``attribute type:`` string
| ``auth_version:`` none
| ``attribute type:`` string
| ``source_interface_id:`` none
| ``attribute type:`` string
"""
properties = ("id",
"priority",
"auth_server",
"auth_port",
"auth_encryption",
"auth_cert",
"certificate",
"ocsp_certs",
"interface",
"status",
"auth_shared_secret",
"enabled_ind",
"auth_version",
"source_interface_id",
)
| 21.558824 | 40 | 0.474079 |
1bf4a9044125d7f7fc06023311141c170fcf9cf4 | 4,497 | py | Python | src/models/mnist_module.py | KLOSYX/lightning-hydra-template | f522f50fa2e38c476e27cacbd5f656dc45870948 | [
"Unlicense",
"MIT"
] | 996 | 2021-04-05T08:50:09.000Z | 2022-03-31T20:22:11.000Z | src/models/mnist_module.py | KLOSYX/lightning-hydra-template | f522f50fa2e38c476e27cacbd5f656dc45870948 | [
"Unlicense",
"MIT"
] | 114 | 2021-04-04T00:22:27.000Z | 2022-03-27T15:57:27.000Z | src/models/mnist_module.py | KLOSYX/lightning-hydra-template | f522f50fa2e38c476e27cacbd5f656dc45870948 | [
"Unlicense",
"MIT"
] | 182 | 2021-04-05T01:50:57.000Z | 2022-03-31T09:45:44.000Z | from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from torchmetrics import MaxMetric
from torchmetrics.classification.accuracy import Accuracy
from src.models.components.simple_dense_net import SimpleDenseNet
class MNISTLitModule(LightningModule):
"""
Example of LightningModule for MNIST classification.
A LightningModule organizes your PyTorch code into 5 sections:
- Computations (init).
- Train loop (training_step)
- Validation loop (validation_step)
- Test loop (test_step)
- Optimizers (configure_optimizers)
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html
"""
def __init__(
self,
input_size: int = 784,
lin1_size: int = 256,
lin2_size: int = 256,
lin3_size: int = 256,
output_size: int = 10,
lr: float = 0.001,
weight_decay: float = 0.0005,
):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
# it also ensures init params will be stored in ckpt
self.save_hyperparameters(logger=False)
self.model = SimpleDenseNet(hparams=self.hparams)
# loss function
self.criterion = torch.nn.CrossEntropyLoss()
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
# for logging best so far validation accuracy
self.val_acc_best = MaxMetric()
def forward(self, x: torch.Tensor):
return self.model(x)
def step(self, batch: Any):
x, y = batch
logits = self.forward(x)
loss = self.criterion(logits, y)
preds = torch.argmax(logits, dim=1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
# log train metrics
acc = self.train_acc(preds, targets)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
# we can return here dict with any tensors
# and then read it in some callback or in `training_epoch_end()`` below
# remember to always return loss from `training_step()` or else backpropagation will fail!
return {"loss": loss, "preds": preds, "targets": targets}
def training_epoch_end(self, outputs: List[Any]):
# `outputs` is a list of dicts returned from `training_step()`
pass
def validation_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
# log val metrics
acc = self.val_acc(preds, targets)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("val/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def validation_epoch_end(self, outputs: List[Any]):
acc = self.val_acc.compute() # get val accuracy from current epoch
self.val_acc_best.update(acc)
self.log("val/acc_best", self.val_acc_best.compute(), on_epoch=True, prog_bar=True)
def test_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
# log test metrics
acc = self.test_acc(preds, targets)
self.log("test/loss", loss, on_step=False, on_epoch=True)
self.log("test/acc", acc, on_step=False, on_epoch=True)
return {"loss": loss, "preds": preds, "targets": targets}
def test_epoch_end(self, outputs: List[Any]):
pass
def on_epoch_end(self):
# reset metrics at the end of every epoch
self.train_acc.reset()
self.test_acc.reset()
self.val_acc.reset()
def configure_optimizers(self):
"""Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
See examples here:
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
"""
return torch.optim.Adam(
params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay
)
| 35.409449 | 112 | 0.651101 |
6517072c7e216e694b7d851dffb245ed67414bb3 | 26,374 | py | Python | trax/layers/combinators_test.py | dedsec-9/trax | c394f9df7ee9dfe918cd67f4af2217d361f0f733 | [
"Apache-2.0"
] | 7,220 | 2019-10-07T23:46:53.000Z | 2022-03-31T16:28:05.000Z | trax/layers/combinators_test.py | dedsec-9/trax | c394f9df7ee9dfe918cd67f4af2217d361f0f733 | [
"Apache-2.0"
] | 301 | 2019-10-08T06:42:04.000Z | 2022-03-12T07:03:46.000Z | trax/layers/combinators_test.py | dedsec-9/trax | c394f9df7ee9dfe918cd67f4af2217d361f0f733 | [
"Apache-2.0"
] | 783 | 2019-10-08T06:36:36.000Z | 2022-03-25T02:00:29.000Z | # coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for combinator layers."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from trax import fastmath
from trax import shapes
import trax.layers as tl
def DivideBy(val): # pylint: disable=invalid-name
"""Returns a simple division layer with n_in == 1 and n_out == 1."""
return tl.Fn('DivideBy', lambda x: x / val)
def ReturnConst(val): # pylint: disable=invalid-name
"""Returns a simple const layer with n_in == 0 and n_out == 1."""
return tl.Fn('ReturnConst', lambda: val)
def SmallerThan(val): # pylint: disable=invalid-name
"""Checks if the input is smaller than certain value."""
return tl.Fn('SmallerThan', lambda x: x < val)
# TODO(jonni): Consider a more generic home for this utiliity function.
def as_list(outputs):
"""Converts layer outputs to a nested list, for easier equality testing.
Args:
outputs: A tensor or tuple/list of tensors coming from the forward
application of a layer. Each tensor is NumPy ndarray-like, which
complicates simple equality testing (e.g., via `assertEquals`):
such tensors require equality testing to use either `all` (all
elements match) or `any` (at least one element matches), which is not
directly supported in absltest.
Returns:
A nested list structure containing all the output values, but now directly
testable using `assertEquals`.
"""
if isinstance(outputs, (list, tuple)):
return [as_list(y) for y in outputs]
else:
return outputs.tolist()
class SerialTest(absltest.TestCase):
def test_none_is_no_op(self):
layer = tl.Serial(None)
xs = [np.array([1, 2, 3, 4]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3, 4],
[10, 20, 30]])
def test_empty_list_is_no_op(self):
layer = tl.Serial([])
xs = [np.array([1, 2, 3, 4]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3, 4],
[10, 20, 30]])
def test_one_in_one_out(self):
layer = tl.Serial(DivideBy(3))
x = np.array([3, 6, 9, 12])
y = layer(x)
self.assertEqual(as_list(y), [1, 2, 3, 4])
def test_zero_in_one_out(self):
layer = tl.Serial(ReturnConst(np.array([3, 4, 5, 6])))
y = layer(())
self.assertEqual(as_list(y), [3, 4, 5, 6])
def test_one_in_two_out(self):
layer = tl.Serial(DivideBy(3),
ReturnConst(np.array([3, 4, 5, 6])))
x = np.array([3, 6, 9, 12])
y = layer(x)
self.assertEqual(as_list(y), [[3, 4, 5, 6],
[1, 2, 3, 4]])
def test_const_div(self):
layer = tl.Serial(ReturnConst(np.array([3, 6, 9, 12])),
DivideBy(3))
y = layer(())
self.assertEqual(as_list(y), [1, 2, 3, 4])
def test_div_div(self):
layer = tl.Serial(DivideBy(2.0), DivideBy(5.0))
x = np.array([10, 20, 30])
y = layer(x)
self.assertEqual(as_list(y), [1, 2, 3])
def test_dup_dup(self):
layer = tl.Serial(tl.Dup(), tl.Dup())
x = np.array([1, 2, 3])
ys = layer(x)
self.assertEqual(as_list(ys), [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
def test_default_name(self):
layer = tl.Serial(tl.Dup(), tl.Dup())
self.assertIn('Serial', str(layer))
def test_custom_name(self):
layer = tl.Serial(tl.Dup(), tl.Dup(), name='Branch')
self.assertIn('Branch', str(layer))
def test_weights(self):
model = tl.Serial(tl.Dense(4), tl.Dense(5), tl.Dense(7))
self.assertIsInstance(model.weights, tuple)
self.assertLen(model.weights, 3)
def test_flat_weights_and_state(self):
model = tl.Serial(tl.Dup(), tl.Dense(5), tl.Serial(tl.Dense(7), tl.Dup()))
sample_input_signature = shapes.signature(np.zeros((2, 3)))
model.init(sample_input_signature)
flat_weights, flat_state = tl.flatten_weights_and_state(
model.weights, model.state)
# Model has 2 pairs of trainable weights: (w, b) for the 2 dense layers.
# So after making them flat, there are 4 trainable weights.
self.assertLen(flat_weights, 4)
self.assertEmpty(flat_state)
model2 = tl.Serial(tl.Dense(5), tl.Dup(), tl.Dense(7))
sig = model2.weights_and_state_signature(sample_input_signature)
weights2, state2 = tl.unflatten_weights_and_state(
flat_weights, flat_state, sig)
model2.weights = weights2
model2.state = state2
self.assertLen(model2.weights, 3)
self.assertEqual(model.weights[1], model2.weights[0])
self.assertEqual(model.weights[2][0], model2.weights[2])
def test_flat_weights_and_state_shared(self):
shared = tl.Dense(5)
model = tl.Serial(tl.Dense(5), shared, tl.Serial(shared, tl.Dup()))
sample_input_signature = shapes.signature(np.zeros((2, 3)))
model.init(sample_input_signature)
flat_weights, flat_state = tl.flatten_weights_and_state(
model.weights, model.state)
# Model has 2 pairs of trainable weights: (w, b) for the 2 dense layers.
# So after making them flat, there are 4 trainable weights.
self.assertLen(flat_weights, 4)
self.assertEmpty(flat_state)
model2 = tl.Serial(tl.Dense(5), tl.Dup(), tl.Dense(5))
sig = model2.weights_and_state_signature(sample_input_signature)
weights2, state2 = tl.unflatten_weights_and_state(
flat_weights, flat_state, sig)
model2.weights = weights2
model2.state = state2
self.assertLen(model2.weights, 3)
self.assertEqual(model.weights[0], model2.weights[0])
self.assertEqual(model.weights[1], model2.weights[2])
def test_assign_sublayer_weights(self):
layer = tl.Dense(5, use_bias=False)
model = tl.Serial(tl.Serial(layer, tl.Dense(6)), tl.Dense(7))
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
new_layer_weights = np.random.uniform(weights[0][0].shape)
layer.weights = new_layer_weights
self.assertIs(model.weights[0][0], new_layer_weights)
def test_shared_weights(self):
layer = tl.Dense(5)
model = tl.Serial(layer, layer)
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_nested(self):
layer = tl.Dense(5)
model = tl.Serial(layer, tl.Serial(layer))
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1][0], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_double_nested(self):
layer = tl.Dense(5)
model = tl.Serial(tl.Serial(layer), tl.Serial(layer))
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1][0], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_for_shared_serial(self):
layer = tl.Serial(tl.Dense(5), tl.Dense(5))
model = tl.Serial(layer, layer)
sample_input = np.array([1, 2, 3, 4, 5])
# Init gives weights reflecting weight sharing.
weights, _ = model.init(shapes.signature(sample_input))
self.assertIsNot(weights[0], tl.GET_WEIGHTS_FROM_CACHE)
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
# Forward pass runs successfully.
y = model(sample_input)
self.assertEqual(y.shape, (5,))
def test_state(self):
model = tl.Serial(tl.Dense(4), tl.Dense(5), tl.Dense(7))
self.assertIsInstance(model.state, tuple)
self.assertLen(model.state, 3)
def test_set_rng_recurse_two_levels(self):
dense_00 = tl.Dense(2)
dense_01 = tl.Dense(2)
dense_10 = tl.Dense(2)
dense_11 = tl.Dense(2)
layer = tl.Serial(
tl.Serial(dense_00, dense_01),
tl.Serial(dense_10, dense_11),
)
input_signature = shapes.ShapeDtype((1, 2))
_, _ = layer.init(input_signature)
weights = layer.weights
dense_00_w, dense_00_b = weights[0][0]
dense_01_w, dense_01_b = weights[0][1]
dense_10_w, dense_10_b = weights[1][0]
dense_11_w, dense_11_b = weights[1][1]
# Setting rng's recursively during init should yield differing weights.
self.assertFalse(np.array_equal(dense_00_w, dense_01_w))
self.assertFalse(np.array_equal(dense_00_b, dense_01_b))
self.assertFalse(np.array_equal(dense_10_w, dense_11_w))
self.assertFalse(np.array_equal(dense_10_b, dense_11_b))
class ParallelTest(absltest.TestCase):
def test_dup_dup(self):
layer = tl.Parallel(tl.Dup(), tl.Dup())
xs = [np.array([1, 2, 3]),
np.array([10, 20])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3],
[1, 2, 3],
[10, 20],
[10, 20]])
def test_div_div(self):
layer = tl.Parallel(DivideBy(0.5), DivideBy(3.0))
xs = [np.array([1, 2, 3]),
np.array([30, 60])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[2, 4, 6],
[10, 20]])
def test_two_no_ops(self):
layer = tl.Parallel([], None)
xs = [np.array([1, 2, 3]),
np.array([10, 20])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3],
[10, 20]])
def test_default_name(self):
layer = tl.Parallel(tl.Dup(), tl.Dup())
self.assertIn('Parallel', str(layer))
def test_custom_name(self):
layer = tl.Parallel(tl.Dup(), tl.Dup(), name='DupDup')
self.assertIn('DupDup', str(layer))
def test_weights(self):
model = tl.Parallel(tl.Dense(3), tl.Dense(5))
self.assertIsInstance(model.weights, tuple)
self.assertLen(model.weights, 2)
def test_shared_weights(self):
layer = tl.Dense(5)
model = tl.Parallel(layer, layer)
sample_input = (np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]))
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_nested(self):
layer = tl.Dense(5)
model = tl.Parallel([layer, tl.Dense(2)],
[layer, tl.Dense(2)])
sample_input = (np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]))
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1][0], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_for_shared_parallel(self):
layer = tl.Parallel(tl.Dense(5), tl.Dense(7))
model = tl.Parallel(layer, layer)
sample_input = [
np.array([1, 2, 3]),
np.array([10, 20, 30]),
np.array([100, 200, 300]),
np.array([1000, 2000, 3000]),
]
# Init gives weights reflecting weight sharing.
weights, _ = model.init(shapes.signature(sample_input))
self.assertIsNot(weights[0], tl.GET_WEIGHTS_FROM_CACHE)
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
# Forward pass runs successfully.
y0, y1, y2, y3 = model(sample_input)
self.assertEqual(y0.shape, (5,))
self.assertEqual(y1.shape, (7,))
self.assertEqual(y2.shape, (5,))
self.assertEqual(y3.shape, (7,))
def test_state(self):
model = tl.Parallel(tl.Dense(3), tl.Dense(5))
self.assertIsInstance(model.state, tuple)
self.assertLen(model.state, 2)
class ConcatenateTest(absltest.TestCase):
def test_n_in_n_out(self):
layer = tl.Concatenate()
self.assertEqual(layer.n_in, 2)
self.assertEqual(layer.n_out, 1)
def test_with_defaults(self):
layer = tl.Concatenate() # Default n_items=2, axis=-1
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3, 10, 20, 30],
[4, 5, 6, 40, 50, 60]])
def test_axis_0(self):
layer = tl.Concatenate(axis=0)
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]])]
y = layer(xs)
self.assertEqual(as_list(y), [[1, 2, 3],
[4, 5, 6],
[10, 20, 30],
[40, 50, 60]])
def test_axis_1(self):
layer = tl.Concatenate(axis=1)
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]])]
y = layer(xs)
self.assertEqual(as_list(y), [[1, 2, 3, 10, 20, 30],
[4, 5, 6, 40, 50, 60]])
def test_n_items_is_not_default(self):
layer = tl.Concatenate(n_items=3)
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]]),
np.array([[100, 200, 300],
[400, 500, 600]])]
y = layer(xs)
self.assertEqual(y.shape, (2, 9))
self.assertEqual(as_list(y), [[1, 2, 3, 10, 20, 30, 100, 200, 300],
[4, 5, 6, 40, 50, 60, 400, 500, 600]])
def test_repr(self):
layer = tl.Concatenate()
self.assertEqual(repr(layer), 'Concatenate_in2')
layer = tl.Concatenate(axis=0)
self.assertEqual(repr(layer), 'Concatenate_axis0_in2')
layer = tl.Concatenate(axis=1)
self.assertEqual(repr(layer), 'Concatenate_axis1_in2')
layer = tl.Concatenate(n_items=3)
self.assertEqual(repr(layer), 'Concatenate_in3')
class BranchTest(absltest.TestCase):
def test_noop_dup(self):
layer = tl.Branch([], tl.Dup())
x = np.array([1, 2, 3])
ys = layer(x)
self.assertEqual(as_list(ys), [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
def test_add_div(self):
layer = tl.Branch(tl.Add(), DivideBy(0.5))
xs = [np.array([1, 2, 3]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[11, 22, 33],
[2, 4, 6]])
def test_one_sublayer(self):
layer = tl.Branch(DivideBy(0.5))
x = np.array([1, 2, 3])
ys = layer(x)
self.assertEqual(as_list(ys), [2, 4, 6])
def test_default_name(self):
layer = tl.Branch(tl.Add(), DivideBy(0.5))
self.assertIn('Branch', str(layer))
def test_printing_sublayers(self):
layer = tl.Branch(tl.Add(), tl.Add())
expected_result = 'Branch_in2_out2[\n Add_in2\n Add_in2\n]'
self.assertEqual(expected_result, str(layer))
class SelectTest(absltest.TestCase):
def test_computes_n_in(self):
layer = tl.Select([0, 0])
self.assertEqual(layer.n_in, 1)
layer = tl.Select([1, 0])
self.assertEqual(layer.n_in, 2)
layer = tl.Select([2])
self.assertEqual(layer.n_in, 3)
def test_given_n_in(self):
layer = tl.Select([0], n_in=2)
self.assertEqual(layer.n_in, 2)
layer = tl.Select([0], n_in=3)
self.assertEqual(layer.n_in, 3)
def test_first_of_3(self):
layer = tl.Select([0], n_in=3)
xs = [np.array([1, 2, 3]),
np.array([10, 20]),
np.array([100])]
y = layer(xs)
self.assertEqual(as_list(y), [1, 2, 3])
def test_second_of_3(self):
layer = tl.Select([1], n_in=3)
xs = [np.array([1, 2, 3]),
np.array([10, 20]),
np.array([100])]
y = layer(xs)
self.assertEqual(as_list(y), [10, 20])
class DropTest(absltest.TestCase):
def test_drop(self):
layer = tl.Drop()
x = np.array([1, 2, 3])
y = layer(x)
self.assertEqual(as_list(y), [])
class SwapTest(absltest.TestCase):
def test_swap(self):
layer = tl.Swap()
xs = [np.array([1, 2, 3]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[10, 20, 30],
[1, 2, 3]])
class ChunkTest(absltest.TestCase):
def test_chunk(self):
layer = tl.Dense(4)
x = np.array([[1, 2, 3], [4, 5, 6]])
layer.init(x)
y = layer(x)
z = tl.Chunk(layer, 1)(x)
self.assertLess(np.sum((y - z)**2), 1e-5) # y == z upto numerics
def test_chunk_uneven_numbers(self):
layer = tl.Dense(4)
x = np.array([[1, 2, 3], [4, 5, 6]])
layer.init(x)
y = layer(x)
z = tl.Chunk(layer, 3)(x) # By default it should just pass
self.assertLess(np.sum((y - z)**2), 1e-5) # y == z upto numerics
chunk_with_test = tl.Chunk(layer, 3, pass_unchunkable=False)
self.assertRaises(tl.LayerError, lambda: chunk_with_test(x))
class SerialWithSideOutputsTest(absltest.TestCase):
def test_serial_with_side_outputs_div_div(self):
def some_layer():
return tl.Parallel(DivideBy(2.0), DivideBy(5.0))
layer = tl.SerialWithSideOutputs([some_layer(), some_layer()])
xs = (np.array([1, 2, 3]),
np.array([10, 20, 30, 40, 50]),
np.array([100, 200]))
ys = layer(xs)
output_shapes = [y.shape for y in ys]
self.assertEqual(output_shapes, [(3,), (5,), (2,)])
BACKENDS = [fastmath.Backend.JAX]
@parameterized.named_parameters(
('_' + b.value, b) for b in BACKENDS)
class ScanTest(parameterized.TestCase):
def _AddWithCarry(self): # pylint: disable=invalid-name
del self
def f(x, carry):
res = x + carry
return res, res # output and carry are the same
return tl.Fn('AddWithCarry', f, n_out=2)
def test_default_axis(self, backend):
with fastmath.use_backend(backend):
layer = tl.Scan(self._AddWithCarry())
xs = [
np.array([[0, 1, 2, 3],
[0, 10, 20, 30],
[0, 100, 200, 300]]),
np.array([9000, 8000, 7000, 6000])
]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[9000, 8001, 7002, 6003],
[9000, 8011, 7022, 6033],
[9000, 8111, 7222, 6333]
],
[9000, 8111, 7222, 6333]
])
def test_axis_1(self, backend):
with fastmath.use_backend(backend):
layer = tl.Scan(self._AddWithCarry(), axis=1)
xs = [
np.array([[0, 1, 2, 3],
[0, 10, 20, 30],
[0, 100, 200, 300]]),
np.array([9000,
8000,
7000])
]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[9000, 9001, 9003, 9006],
[8000, 8010, 8030, 8060],
[7000, 7100, 7300, 7600]
],
[9006,
8060,
7600]
])
def test_predict(self, backend):
with fastmath.use_backend(backend):
layer = tl.Scan(self._AddWithCarry(), axis=1, mode='predict')
xs = [np.array([[0, 1, 2]]),
np.array([90])]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[90, 91, 93]],
[93]])
xs = [np.array([[3, 4]]),
np.array([90])]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[96, 100]],
[100]])
def test_multi_input(self, backend):
def _MultiInputFn(): # pylint: disable=invalid-name
def f(a, b, carry):
return a + b, b, carry + 1
return tl.Fn('MultiInputFn', f, n_out=2)
with fastmath.use_backend(backend):
layer = tl.Scan(_MultiInputFn(), axis=1)
xs = [
np.array([[0, 1, 2],
[0, 10, 20]]),
np.array([[4, 5, 6],
[40, 50, 60]]),
np.array([9000,
8000])
]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[4, 6, 8],
[40, 60, 80]],
[[4, 5, 6],
[40, 50, 60]],
[9003,
8003]
])
def test_no_carry(self, backend):
def _AddOne(): # pylint: disable=invalid-name
return tl.Fn('AddOne', lambda x: x + 1)
with fastmath.use_backend(backend):
layer = tl.Scan(_AddOne(), n_carry=0)
x = np.array([[1, 3, 7],
[10, 30, 70]])
y = layer(x)
self.assertEqual(as_list(y), [[2, 4, 8],
[11, 31, 71]])
class CondTest(absltest.TestCase):
def test_basic_true(self):
cond = ReturnConst(True)
true = ReturnConst([2])
false = ReturnConst([5])
layer = tl.Cond(cond, true, false)
layer.init(())
xs = tuple()
ys = layer(xs)
self.assertEqual(as_list(ys), 2)
def test_basic_false(self):
cond = ReturnConst(False)
true = ReturnConst([2])
false = ReturnConst([5])
layer = tl.Cond(cond, true, false)
layer.init(())
xs = tuple()
ys = layer(xs)
self.assertEqual(as_list(ys), 5)
def test_complex_blocks(self):
cond = ReturnConst(True)
true = DivideBy(2.)
false = DivideBy(4.)
layer = tl.Cond(cond, true, false)
xs = [np.arange(5).astype(np.float32)]
layer.init(shapes.signature(xs))
ys = layer(xs)
self.assertEqual(as_list(ys), [0., 0.5, 1.0, 1.5, 2.0])
def test_condition_func_true(self):
cond = SmallerThan(3.0)
true = DivideBy(2.)
false = DivideBy(4.)
layer = tl.Cond(cond, true, false)
xs = (np.array(2.), np.array([4., 12.]))
layer.init(shapes.signature(xs))
ys = layer(xs)
self.assertEqual(as_list(ys), [2., 6.])
def test_condition_func_false(self):
cond = SmallerThan(3.0)
true = DivideBy(2.)
false = DivideBy(4.)
layer = tl.Cond(cond, true, false)
xs = (np.array(4.), np.array([4., 12.]))
layer.init(shapes.signature(xs))
ys = layer(xs)
self.assertEqual(as_list(ys), [1., 3.])
def test_condition_func_default_false(self):
cond = SmallerThan(3.0)
true = DivideBy(2.)
layer = tl.Cond(cond, true)
xs = (np.array(4.), np.array([4., 12.]))
layer.init(shapes.signature(xs))
ys = layer(xs)
self.assertEqual(as_list(ys), [4., 12.])
def test_exception_n_out(self):
cond = SmallerThan(3.0)
true = DivideBy(2.)
false = tl.Dup()
self.assertRaises(ValueError, lambda: tl.Cond(cond, true, false))
def test_exception_n_in(self):
cond = SmallerThan(3.0)
true = ReturnConst(2.)
false = DivideBy(2.)
self.assertRaises(ValueError, lambda: tl.Cond(cond, true, false))
def test_exception_run1(self):
# We expect exactly one input.
cond = SmallerThan(3.0)
true = ReturnConst(2.)
false = ReturnConst(5.)
def init_and_run(layer, xs):
layer.init(shapes.signature(xs))
layer(xs)
# It will pass with one input.
xs = np.array(4.)
layer = tl.Cond(cond, true, false)
init_and_run(layer, xs)
# It will fail with zero or two inputs.
for xs in ((), (np.array(4.), np.array([4., 12.]))):
layer = tl.Cond(cond, true, false)
# pylint: disable=cell-var-from-loop
self.assertRaises(Exception, lambda: init_and_run(layer, xs))
def test_exception_run2(self):
# We expect exactly two inputs.
cond = SmallerThan(3.0)
true = DivideBy(2.)
false = DivideBy(5.)
def init_and_run(layer, xs):
layer.init(shapes.signature(xs))
layer(xs)
# It will pass with two inputs.
xs = (np.array(4.), np.array([4., 12.]))
layer = tl.Cond(cond, true, false)
init_and_run(layer, xs)
# It will fail with zero or one input.
for xs in ((), (np.array(4.))):
# pylint: disable=cell-var-from-loop
self.assertRaises(Exception, lambda: init_and_run(layer, xs))
def test_weights_and_state(self):
cond = SmallerThan(3.0)
true = tl.Dense(5)
false = tl.Dense(5)
different = tl.Dense(5)
layer = tl.Cond(cond, true, false)
xs = (np.array(2.), np.array([0., 1., 2.]))
layer.init(shapes.signature(xs))
# weights
self.assertEqual(as_list(layer.weights),
as_list((cond.weights, true.weights, false.weights)))
self.assertNotEqual(as_list(true.weights), as_list(false.weights))
self.assertNotEqual(as_list(true.weights), as_list(different.weights))
false.weights = true.weights
self.assertEqual(as_list(layer.weights),
as_list((cond.weights, true.weights, true.weights)))
layer.weights = (cond.weights, true.weights, different.weights)
self.assertEqual(as_list(layer.weights),
as_list((cond.weights, true.weights, different.weights)))
# state
self.assertEqual(as_list(layer.state),
as_list((cond.state, true.state, false.state)))
# just check if simple assignments (setter from base.Layer) work correctly
# with Cond.init_weights_and_state ; all states are empty so there is no
# point in checking equality
false.state = true.state
layer.state = (cond.state, true.state, different.state)
class BatchLeadingAxesTest(absltest.TestCase):
def _Id3Dim(self): # pylint: disable=invalid-name
del self
def f(x):
assert len(x.shape) == 3
return x
return tl.Fn('Id3Dim', f, n_out=1)
def test_2axes(self):
layer = tl.BatchLeadingAxes(self._Id3Dim(), n_last_axes_to_keep=2)
ys = layer(np.zeros((3, 4, 5)))
self.assertEqual(ys.shape, (3, 4, 5))
ys = layer(np.zeros((2, 3, 4, 5)))
self.assertEqual(ys.shape, (2, 3, 4, 5))
ys = layer(np.zeros((1, 2, 3, 4, 5)))
self.assertEqual(ys.shape, (1, 2, 3, 4, 5))
class BidirectionalTest(absltest.TestCase):
def test_dimensionality(self):
x = np.ones((2, 3, 8))
layer = tl.Bidirectional(tl.GRU(n_units=8))
input_signature = shapes.signature(x)
_, _ = layer.init(input_signature)
yhat = layer(x)
self.assertEqual(yhat.shape, (2, 3, 8 + 8))
if __name__ == '__main__':
absltest.main()
| 32.803483 | 78 | 0.588458 |
d9dbc7a201378c5dcda059b7e60214ae5053e324 | 2,388 | py | Python | userbot/plugins/gmute.py | UNIQUEXD/UNIQUEBOT | 09c22f417b994d0774aea46ee1246fbfb4369394 | [
"MIT"
] | 1 | 2020-05-30T22:19:12.000Z | 2020-05-30T22:19:12.000Z | userbot/plugins/gmute.py | UNIQUEXD/UniqueBot | 09c22f417b994d0774aea46ee1246fbfb4369394 | [
"MIT"
] | null | null | null | userbot/plugins/gmute.py | UNIQUEXD/UniqueBot | 09c22f417b994d0774aea46ee1246fbfb4369394 | [
"MIT"
] | null | null | null | '''
ThankYou @pureindialover
'''
from userbot.plugins.sql_helper.mute_sql import is_muted, mute, unmute
import asyncio
from userbot.utils import admin_cmd
#@command(outgoing=True, pattern=r"^.gmute ?(\d+)?")
@borg.on(admin_cmd(pattern=r"gmute ?(\d+)?"))
async def startgmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Unexpected issues or ugly errors may occur!")
await asyncio.sleep(3)
private = True
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Please reply to a user or add their into the command to gmute them.")
chat_id = event.chat_id
chat = await event.get_chat()
if is_muted(userid, "gmute"):
return await event.edit("This user is already gmuted")
try:
mute(userid, "gmute")
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Successfully gmuted that person")
#@command(outgoing=True, pattern=r"^.ungmute ?(\d+)?")
@borg.on(admin_cmd(pattern=r"ungmute ?(\d+)?"))
async def endgmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Unexpected issues or ugly errors may occur!")
await asyncio.sleep(3)
private = True
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Please reply to a user or add their into the command to ungmute them.")
chat_id = event.chat_id
if not is_muted(userid, "gmute"):
return await event.edit("This user is not gmuted")
try:
unmute(userid, "gmute")
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Successfully ungmuted that person")
@command(incoming=True)
async def watcher(event):
if is_muted(event.sender_id, "gmute"):
await event.delete()
| 33.633803 | 104 | 0.657873 |
76d52723499d65593345e9c4673ebb8b8a18ffbe | 5,741 | py | Python | evaluation/IJB/mapping_results-full-rotonly.py | dwhite54/insightface | ea172e4921c3960c0684404afff6d0d862447eae | [
"MIT"
] | null | null | null | evaluation/IJB/mapping_results-full-rotonly.py | dwhite54/insightface | ea172e4921c3960c0684404afff6d0d862447eae | [
"MIT"
] | null | null | null | evaluation/IJB/mapping_results-full-rotonly.py | dwhite54/insightface | ea172e4921c3960c0684404afff6d0d862447eae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import traceback
# In[2]:
import IJB_evals as IJB
# In[3]:
from IPython.display import display
# In[4]:
class Args:
def __init__(self, subset='IJBC', is_bunch=False, restore_embs_left=None, restore_embs_right=None, fit_mapping=False, fit_flips=False, decay_coef=0.0, pre_template_map=False, is_rotation_map=True, save_result="IJB_result/{model_name}_{subset}.npz"):
self.subset = subset
self.is_bunch=is_bunch
self.restore_embs_left = restore_embs_left
self.restore_embs_right = restore_embs_right
self.fit_mapping = fit_mapping
self.fit_flips = fit_flips
self.decay_coef = decay_coef
self.pre_template_map = pre_template_map
self.is_rotation_map = is_rotation_map
self.save_result = save_result
self.save_embeddings = False
self.model_file = None
self.data_path = './'
self.batch_size=64
self.save_label=False
self.force_reload=False
self.is_one_2_N=False
self.plot_only=None
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
# In[5]:
dataframes = {}
fit_flips = False
decay_coef = 0.0
pre_template_map= True
is_rotation_map = True
# In[6]:
embs_list = [('/s/red/b/nobackup/data/portable/tbiom/models/insightface/evaluation/IJB/IJB_result/MS1MV2-ResNet100-Arcface_IJBC.npz', 'MS1MV2', 'ResNet100', 'ArcFace'),
('/s/red/b/nobackup/data/portable/tbiom/models/insightface/evaluation/IJB/IJB_result/VGG2-ResNet50-Arcface_IJBC.npz', 'VGGFace2', 'ResNet50', 'ArcFace'),
('/s/red/b/nobackup/data/portable/tbiom/models/insightface/evaluation/IJB/IJB_result/glint360k_r100FC_0.1_IJBC.npz', 'Glint360k', 'ResNet100', 'PartialFC_r0.1'),
('/s/red/b/nobackup/data/portable/tbiom/models/insightface/evaluation/IJB/IJB_result/glint360k_r100FC_1.0_IJBC.npz', 'Glint360k', 'ResNet100', 'PartialFC_r1.0'),
('/s/red/b/nobackup/data/portable/tbiom/models/arcface-tf2/ijbc_embs_arc_res50.npy', 'MS1M', 'ResNet50', 'ArcFace'),
('/s/red/b/nobackup/data/portable/tbiom/models/arcface-tf2/ijbc_embs_arc_mbv2.npy', 'MS1M', 'MobileNetV2', 'ArcFace'),
('/s/red/b/nobackup/data/portable/tbiom/models/facenet/vggface2_ir2_ijbc_embs.npy', 'VGGFace2', 'InceptionResNetV1', 'CenterLoss'),
('/s/red/b/nobackup/data/portable/tbiom/models/facenet/casia_ir2_ijbc_embs.npy', 'CASIA-WebFace', 'InceptionResNetV1', 'CenterLoss'),
('/s/red/b/nobackup/data/portable/tbiom/models/Probabilistic-Face-Embeddings/ijbc_embs_pfe_sphere64_msarcface_am.npy', 'MS1M', '64-CNN', 'SphereFace+PFE'),
('/s/red/b/nobackup/data/portable/tbiom/models/Probabilistic-Face-Embeddings/ijbc_embs_pfe_sphere64_casia_am.npy', 'CASIA-WebFace', '64-CNN', 'SphereFace+PFE')]
# # cache maps
# In[7]:
import map_tools
# In[8]:
import importlib
importlib.reload(IJB)
importlib.reload(map_tools)
# ```
#
# for left_embs_fn, left_dataset, left_architecture, left_head in embs_list:
# for right_embs_fn, right_dataset, right_architecture, right_head in embs_list:
# if left_embs_fn == right_embs_fn:
# continue
#
# print(left_embs_fn, 'to', right_embs_fn)
#
# #for pre_template_map in pre_template_map:
# save_result_name = 'rot-maps/{}_TO_{}.npy'.format(left_embs_fn.split('/')[-1].split('.')[0], right_embs_fn.split('/')[-1].split('.')[0])
# if '.npz' in left_embs_fn:
# left_embs = np.load(left_embs_fn)['embs']
# else:
# left_embs = np.load(left_embs_fn)
# if '.npz' in right_embs_fn:
# right_embs = np.load(right_embs_fn)['embs']
# else:
# right_embs = np.load(right_embs_fn)
# M = map_tools.fit_rot_map(left_embs, right_embs, 11856, LR=100.0, LOG_INTERVAL=10, EPOCHS=100)
# np.save(save_result_name, M)
# ```
# # eval maps
# In[9]:
for left_embs, left_dataset, left_architecture, left_head in embs_list:
for right_embs, right_dataset, right_architecture, right_head in embs_list:
if left_embs == right_embs:
continue
try:
#for pre_template_map in pre_template_map:
save_result_name = '{}_TO_{}_rotonly'.format(left_embs.split('/')[-1].split('.')[0], right_embs.split('/')[-1].split('.')[0])
save_result = '../../../../results/{}.npz'.format(save_result_name)
args = Args(subset='IJBC',
is_bunch=False,
restore_embs_left=left_embs,
restore_embs_right=right_embs,
fit_mapping=True,
fit_flips=fit_flips,
decay_coef=decay_coef,
pre_template_map=pre_template_map,
is_rotation_map=is_rotation_map,
save_result=save_result)
df, fig = IJB.main(args)
df['L_DATASET'] = left_dataset
df['L_ARCH'] = left_architecture
df['L_HEAD'] = left_head
df['R_DATASET'] = right_dataset
df['R_ARCH'] = right_architecture
df['R_HEAD'] = right_head
display(df)
dataframes[save_result_name] = df
print('saving to', args.save_result + '.csv')
df.to_csv(args.save_result + '.csv')
except Exception:
traceback.print_exc()
# In[ ]:
# In[ ]:
import pandas as pd
# In[ ]:
superdf = pd.concat([df for df in dataframes.values()])
# In[ ]:
superdf
# In[ ]:
superdf.to_csv('../../../../results/ALL_aggregated_results_rotonly_v1.csv')
# In[ ]:
| 30.375661 | 253 | 0.642048 |
a704cb06a87b2833d5e4c868963c619f6dd2ec63 | 7,940 | py | Python | script.module.exodus/lib/resources/lib/sources/de/seriesever.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:38:10.000Z | 2019-03-05T09:38:10.000Z | script.module.exodus/lib/resources/lib/sources/de/seriesever.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | script.module.exodus/lib/resources/lib/sources/de/seriesever.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | # -*- coding: utf-8 -*-
"""
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import base64
import json
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import control
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['seriesever.net']
self.base_link = 'http://seriesever.net'
self.search_link = 'service/search?q=%s'
self.part_link = 'service/get_video_part'
self.login_link = 'service/login'
self.user = control.setting('seriesever.user')
self.password = control.setting('seriesever.pass')
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
s = 'staffel-%s-episode-%s' % (season, episode)
s = '(?<=<a class=\"episode-name\" href=\")(.*?)(?='+s+')(.*?)(?=\")'
url = '/serien' + re.sub('\.\w+$', '', url)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, mobile=True)
p = dom_parser.parse_dom(r, 'div', attrs={'id': 'seasonss'})
url = re.search(s, p[0][1]).group()
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
return sources
url = urlparse.urljoin(self.base_link, url)
cookie = self.__get_premium_cookie()
r = client.request(url, mobile=True, cookie=cookie)
query = urlparse.urljoin(self.base_link, self.part_link)
id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0]
p = dom_parser.parse_dom(r, 'a', attrs={'class': 'changePart', 'data-part': re.compile('\d+p')}, req='data-part')
for i in p:
i = i.attrs['data-part']
p = urllib.urlencode({'video_id': id, 'part_name': i, 'page': '0'})
p = client.request(query, cookie=cookie, mobile=True, XHR=True, post=p, referer=url)
p = json.loads(p)
p = p.get('part_count', 0)
for part_count in range(0, p):
try:
r = urllib.urlencode({'video_id': id, 'part_name': i, 'page': part_count})
r = client.request(query, cookie=cookie, mobile=True, XHR=True, post=r, referer=url)
r = json.loads(r)
r = r.get('part', {})
s = r.get('source', '')
url = r.get('code', '')
if s == 'url' and 'http' not in url:
url = self.__decode_hash(url)
elif s == 'other':
url = dom_parser.parse_dom(url, 'iframe', req='src')
if len(url) < 1: continue
url = url[0].attrs['src']
if '/old/seframer.php' in url: url = self.__get_old_url(url)
if 'keepup' in url:
print url
# needs to be fixed (keepup.gq)
elif self.domains[0] in url:
url = re.search('(?<=id=).*$', url).group()
url = 'https://drive.google.com/file/d/' + url
valid, host = source_utils.is_host_valid(url, hostDict)
if not valid: continue
if i in ['720p', 'HD']: quali = 'HD'
elif i in ['1080p', '1440p']: quali = i
elif i in ['2160p']: quali = '4K'
else: quali = 'SD'
urls, host, direct = source_utils.check_directstreams(url, host, quali)
for i in urls: sources.append({'source': host, 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
if url.startswith('/'): url = 'http:%s' % url
return url
def __search(self, titles, year):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query, XHR=True)
if r and r.startswith('{'): r = '[%s]' % r
r = json.loads(r)
r = [(i['url'], i['name']) for i in r if 'name' in i and 'url' in i]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})?\)*$', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
url = source_utils.strip_domain(r)
url = url.replace('serien/', '')
return url
except:
return
def __decode_hash(self, hash):
hash = hash.replace("!BeF", "R")
hash = hash.replace("@jkp", "Ax")
hash += '=' * (-len(hash) % 4)
try: return base64.b64decode(hash)
except: return
def __get_old_url(self, url):
try:
r = client.request(url, mobile=True)
url = re.findall('url="(.*?)"', r)
if len(url) == 0:
url = dom_parser.parse_dom(r, 'iframe', req='src')[0].attrs['src']
if "play/se.php" in url:
r = client.request(url, mobile=True)
return self.__decode_hash(re.findall('link:"(.*?)"', r)[0])
else:
return url[0]
except:
return
def __get_premium_cookie(self):
try:
if (self.user == '' or self.password == ''): raise Exception()
login = urlparse.urljoin(self.base_link, self.login_link)
post = urllib.urlencode({'username': self.user, 'password': self.password})
cookie = client.request(login, mobile=True, post=post, XHR=True, output='cookie')
r = client.request(urlparse.urljoin(self.base_link, 'api'), mobile=True, cookie=cookie)
return cookie if r == '1' else ''
except:
return ''
| 38.173077 | 170 | 0.516499 |
fdc4c9f016a6fafaa9a7869f9a51a3c6a4e05d5b | 968 | py | Python | core/src/zeit/wochenmarkt/browser/ingredients.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 5 | 2019-05-16T09:51:29.000Z | 2021-05-31T09:30:03.000Z | core/src/zeit/wochenmarkt/browser/ingredients.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 107 | 2019-05-24T12:19:02.000Z | 2022-03-23T15:05:56.000Z | core/src/zeit/wochenmarkt/browser/ingredients.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 3 | 2020-08-14T11:01:17.000Z | 2022-01-08T17:32:19.000Z | from zeit.wochenmarkt.sources import ingredientsSource
import grokcore.component as grok
import logging
import zeit.cms.browser.interfaces
import zeit.cms.browser.view
import zeit.wochenmarkt.interfaces
import zope.component
import zope.component.hooks
log = logging.getLogger(__name__)
class IngredientsSearch(zeit.cms.browser.view.JSON):
def json(self):
term = self.request.form.get('term')
if term:
tags = ingredientsSource.factory.search(term)
else:
tags = []
return [dict(label=x.name, value=x.code)
for x in tags]
@grok.adapter(
zeit.wochenmarkt.interfaces.IIngredientsSource,
zeit.cms.browser.interfaces.ICMSLayer)
@grok.implementer(zeit.cms.browser.interfaces.ISourceQueryURL)
def IngredientsSearchURL(context, request):
base = zope.traversing.browser.absoluteURL(
zope.component.hooks.getSite(), request)
return (
base + '/@@ingredients_find')
| 27.657143 | 62 | 0.713843 |
902b186bd4fc5781a22f31c73b53398174c86841 | 1,263 | py | Python | tests/test_cli_v1.py | NovaDev94/pip-compile-multi | 5ec43dd5c54bde2ac7d1f4770f7e27a97802a7c1 | [
"MIT"
] | null | null | null | tests/test_cli_v1.py | NovaDev94/pip-compile-multi | 5ec43dd5c54bde2ac7d1f4770f7e27a97802a7c1 | [
"MIT"
] | null | null | null | tests/test_cli_v1.py | NovaDev94/pip-compile-multi | 5ec43dd5c54bde2ac7d1f4770f7e27a97802a7c1 | [
"MIT"
] | null | null | null | """End to end tests for CLI v2"""
import sys
from click.testing import CliRunner
import pytest
from pipcompilemulti.cli_v1 import cli
from pipcompilemulti.options import OPTIONS
from .utils import temp_dir
@pytest.fixture(autouse=True)
def requirements_dir():
"""Create temporary requirements directory for test time."""
with temp_dir() as tmp_dir:
OPTIONS['base_dir'] = tmp_dir
yield
@pytest.mark.parametrize('command', ['--no-upgrade', '--upgrade',
'--upgrade-package=pip-tools'])
def test_v1_command_exits_with_zero(command):
"""Run pip-compile-multi on self"""
local = (
'local'
if sys.version_info[0] >= 3
else 'local27'
)
runner = CliRunner()
parameters = ['--only-name', local]
parameters.append(command)
result = runner.invoke(cli, parameters)
parameters[:0] = ['--generate-hashes', local,
'--in-ext', 'txt',
'--out-ext', 'hash']
result = runner.invoke(cli, parameters)
assert result.exit_code == 0
def test_v1_verify_exits_with_zero():
"""Run pip-compile-multi on self"""
runner = CliRunner()
result = runner.invoke(cli, ['verify'])
assert result.exit_code == 0
| 28.704545 | 68 | 0.627078 |
2ddf7737c3560fadbc544f9a488716622f8551b0 | 7,093 | py | Python | pipeline/mk_all_level1_fsf_bbr.py | lbconner/openfMRI | 265d8ef013dad14fd1741d5817d00f9411d85103 | [
"BSD-2-Clause"
] | 33 | 2015-02-17T17:21:43.000Z | 2021-08-23T08:27:10.000Z | pipeline/mk_all_level1_fsf_bbr.py | lbconner/openfMRI | 265d8ef013dad14fd1741d5817d00f9411d85103 | [
"BSD-2-Clause"
] | 13 | 2015-01-14T15:17:09.000Z | 2017-07-10T02:17:06.000Z | pipeline/mk_all_level1_fsf_bbr.py | lbconner/openfMRI | 265d8ef013dad14fd1741d5817d00f9411d85103 | [
"BSD-2-Clause"
] | 24 | 2015-01-27T10:02:47.000Z | 2021-03-19T20:05:35.000Z | #!/usr/bin/env python
""" mk_all_level1_fsf.py - make fsf files for all subjects
USAGE: python mk_all_level1_fsf_bbr.py <name of dataset> <modelnum> <basedir - default is staged> <nonlinear - default=1> <smoothing - default=0> <tasknum - default to all>
"""
## Copyright 2011, Russell Poldrack. All rights reserved.
## Redistribution and use in source and binary forms, with or without modification, are
## permitted provided that the following conditions are met:
## 1. Redistributions of source code must retain the above copyright notice, this list of
## conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice, this list
## of conditions and the following disclaimer in the documentation and/or other materials
## provided with the distribution.
## THIS SOFTWARE IS PROVIDED BY RUSSELL POLDRACK ``AS IS'' AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
## FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RUSSELL POLDRACK OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import glob
from mk_level1_fsf_bbr import *
from mk_level1_fsf import *
import launch_qsub
import argparse
import sys
def usage():
"""Print the docstring and exit with error."""
sys.stdout.write(__doc__)
sys.exit(2)
def parse_command_line():
parser = argparse.ArgumentParser(description='setup_subject')
#parser.add_argument('integers', metavar='N', type=int, nargs='+',help='an integer for the accumulator')
# set up boolean flags
parser.add_argument('--taskid', dest='taskid',
required=True,help='Task ID')
parser.add_argument('--parenv', dest='parenv',
default='2way',help='Parallel environment')
parser.add_argument('--anatimg', dest='anatimg',
default='',help='Specified anatomy image')
parser.add_argument('--tasknum', dest='tasknum',type=int,
help='Task number')
parser.add_argument('--basedir', dest='basedir',
default=os.getcwd(),help='Base directory (above taskid directory)')
parser.add_argument('--smoothing', dest='smoothing',type=int,
default=0,help='Smoothing (mm FWHM)')
parser.add_argument('--noconfound', dest='confound', action='store_false',
default=True,help='Omit motion/confound modeling')
parser.add_argument('--use_inplane', dest='use_inplane', type=int,
default=0,help='Use inplane image')
parser.add_argument('--nonlinear', dest='nonlinear', action='store_true',
default=False,help='Use nonlinear regristration')
parser.add_argument('--nobbr', dest='nobbr', action='store_true',
default=False,help='Use standard reg instead of BBR')
parser.add_argument('--nohpf', dest='hpf', action='store_false',
default=True,help='Turn off high pass filtering')
parser.add_argument('--nowhiten', dest='whiten', action='store_false',
default=True,help='Turn off prewhitening')
parser.add_argument('--test', dest='test', action='store_true',
default=False,help='Test mode (do not run job)')
parser.add_argument('--nolaunch', dest='launch', action='store_false',
default=True,help='Do not launch job')
parser.add_argument('--modelnum', dest='modelnum',type=int,
default=1,help='Model number')
parser.add_argument('--ncores', dest='ncores',type=int,
default=0,help='number of cores (ncores * way = 12)')
args = parser.parse_args()
return args
def main():
args=parse_command_line()
print args
smoothing=args.smoothing
use_inplane=args.use_inplane
basedir=os.path.abspath(args.basedir)
nonlinear=args.nonlinear
modelnum=args.modelnum
if not args.confound:
print 'omitting confound modeling'
dataset=args.taskid
if not args.test:
outfile=open('mk_all_level1_%s.sh'%dataset,'w')
tasknum_spec='task*'
if not args.tasknum==None:
tasknum_spec='task%03d*'%args.tasknum
dsdir=os.path.join(basedir,dataset)
bolddirs=glob.glob(os.path.join(dsdir,'sub*/BOLD/%s'%tasknum_spec))
print bolddirs
for root in bolddirs:
#print 'ROOT:',root
for m in glob.glob(os.path.join(root,'bold_mcf_brain.nii.gz')):
#print 'BOLDFILE:',m
f_split=root.split('/')
#print f_split
scankey='/'+'/'.join(f_split[1:7])+'/scan_key.txt'
taskid=f_split[6]
subnum=int(f_split[7].lstrip('sub'))
taskinfo=f_split[9].split('_')
tasknum=int(taskinfo[0].lstrip('task'))
runnum=int(taskinfo[1].lstrip('run'))
#tr=float(load_scankey(scankey)['TR'])
# check for inplane
inplane='/'+'/'.join(f_split[1:8])+'/anatomy/inplane001_brain.nii.gz'
## if args.nobbr:
## print 'using nobbr option'
## print 'mk_level1_fsf("%s",%d,%d,%d,%d,%d,"%s",%d)'%(taskid,subnum,tasknum,runnum,smoothing,use_inplane,basedir,modelnum)
## else:
## print 'mk_level1_fsf_bbr("%s",%d,%d,%d,%d,%d,"%s",%d)'%(taskid,subnum,tasknum,runnum,smoothing,use_inplane,basedir,modelnum)
if not args.test:
if args.nobbr:
fname=mk_level1_fsf(taskid,subnum,tasknum,runnum,smoothing,use_inplane,basedir,nonlinear,modelnum)
else:
fname=mk_level1_fsf_bbr(taskid,subnum,tasknum,runnum,smoothing,use_inplane,basedir,nonlinear,modelnum,args.anatimg,args.confound,args.hpf,args.whiten)
#print 'CMD: mk_level1_fsf_bbr(taskid,subnum,tasknum,runnum,smoothing,use_inplane,basedir,nonlinear,modelnum,args.anatimg,args.confound)'
outfile.write('feat %s\n'%fname)
if not args.test:
outfile.close()
if not args.test:
print 'now launching all feats:'
print "find %s/sub*/model/*.fsf |sed 's/^/feat /' > run_all_feats.sh; sh run_all_feats.sh"%args.taskid
f=open('mk_all_level1_%s.sh'%dataset)
l=f.readlines()
f.close()
njobs=len(l)
if args.parenv=='':
args.parenv='6way'
way=float(args.parenv.replace('way',''))
if args.ncores==0:
ncores=(njobs/way)*12.0
else:
ncores=args.ncores
if args.launch:
launch_qsub.launch_qsub(script_name='mk_all_level1_%s.sh'%dataset,runtime='04:00:00',jobname='%sl1'%dataset,email=False,parenv=args.parenv,ncores=ncores)
if __name__ == '__main__':
main()
| 41.238372 | 172 | 0.661638 |
7afa7264569cb15afeaba3dccca69bb9a360ca51 | 10,230 | py | Python | modin/core/io/text/excel_dispatcher.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/io/text/excel_dispatcher.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/io/text/excel_dispatcher.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses `ExcelDispatcher` class, that is used for reading excel files."""
import pandas
import re
import sys
import warnings
from modin.core.io.text.text_file_dispatcher import TextFileDispatcher
from modin.config import NPartitions
EXCEL_READ_BLOCK_SIZE = 4096
class ExcelDispatcher(TextFileDispatcher):
"""Class handles utils for reading excel files."""
@classmethod
def _read(cls, io, **kwargs):
"""
Read data from `io` according to the passed `read_excel` `kwargs` parameters.
Parameters
----------
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
`io` parameter of `read_excel` function.
**kwargs : dict
Parameters of `read_excel` function.
Returns
-------
new_query_compiler : BaseQueryCompiler
Query compiler with imported data for further processing.
"""
if (
kwargs.get("engine", None) is not None
and kwargs.get("engine") != "openpyxl"
):
warnings.warn(
"Modin only implements parallel `read_excel` with `openpyxl` engine, "
'please specify `engine=None` or `engine="openpyxl"` to '
"use Modin's parallel implementation."
)
return cls.single_worker_read(io, **kwargs)
if sys.version_info < (3, 7):
warnings.warn("Python 3.7 or higher required for parallel `read_excel`.")
return cls.single_worker_read(io, **kwargs)
from zipfile import ZipFile
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.worksheet._reader import WorksheetReader
from openpyxl.reader.excel import ExcelReader
from modin.core.storage_formats.pandas.parsers import PandasExcelParser
sheet_name = kwargs.get("sheet_name", 0)
if sheet_name is None or isinstance(sheet_name, list):
warnings.warn(
"`read_excel` functionality is only implemented for a single sheet at a "
"time. Multiple sheet reading coming soon!"
)
return cls.single_worker_read(io, **kwargs)
warnings.warn(
"Parallel `read_excel` is a new feature! Please email "
"bug_reports@modin.org if you run into any problems."
)
# NOTE: ExcelReader() in read-only mode does not close file handle by itself
# work around that by passing file object if we received some path
io_file = open(io, "rb") if isinstance(io, str) else io
try:
ex = ExcelReader(io_file, read_only=True)
ex.read()
wb = ex.wb
# Get shared strings
ex.read_manifest()
ex.read_strings()
ws = Worksheet(wb)
finally:
if isinstance(io, str):
# close only if it were us who opened the object
io_file.close()
pandas_kw = dict(kwargs) # preserve original kwargs
with ZipFile(io) as z:
from io import BytesIO
# Convert index to sheet name in file
if isinstance(sheet_name, int):
sheet_name = "sheet{}".format(sheet_name + 1)
else:
sheet_name = "sheet{}".format(wb.sheetnames.index(sheet_name) + 1)
if any(sheet_name.lower() in name for name in z.namelist()):
sheet_name = sheet_name.lower()
elif any(sheet_name.title() in name for name in z.namelist()):
sheet_name = sheet_name.title()
else:
raise ValueError("Sheet {} not found".format(sheet_name.lower()))
# Pass this value to the workers
kwargs["sheet_name"] = sheet_name
f = z.open("xl/worksheets/{}.xml".format(sheet_name))
f = BytesIO(f.read())
total_bytes = cls.file_size(f)
# Read some bytes from the sheet so we can extract the XML header and first
# line. We need to make sure we get the first line of the data as well
# because that is where the column names are. The header information will
# be extracted and sent to all of the nodes.
sheet_block = f.read(EXCEL_READ_BLOCK_SIZE)
end_of_row_tag = b"</row>"
while end_of_row_tag not in sheet_block:
sheet_block += f.read(EXCEL_READ_BLOCK_SIZE)
idx_of_header_end = sheet_block.index(end_of_row_tag) + len(end_of_row_tag)
sheet_header = sheet_block[:idx_of_header_end]
# Reset the file pointer to begin at the end of the header information.
f.seek(idx_of_header_end)
kwargs["_header"] = sheet_header
footer = b"</sheetData></worksheet>"
# Use openpyxml to parse the data
reader = WorksheetReader(
ws, BytesIO(sheet_header + footer), ex.shared_strings, False
)
# Attach cells to the worksheet
reader.bind_cells()
data = PandasExcelParser.get_sheet_data(
ws, kwargs.get("convert_float", True)
)
# Extract column names from parsed data.
column_names = pandas.Index(data[0])
index_col = kwargs.get("index_col", None)
# Remove column names that are specified as `index_col`
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
if not all(column_names):
# some column names are empty, use pandas reader to take the names from it
pandas_kw["nrows"] = 1
df = pandas.read_excel(io, **pandas_kw)
column_names = df.columns
# Compute partition metadata upfront so it is uniform for all partitions
chunk_size = max(1, (total_bytes - f.tell()) // NPartitions.get())
column_widths, num_splits = cls._define_metadata(
pandas.DataFrame(columns=column_names), column_names
)
kwargs["fname"] = io
# Skiprows will be used to inform a partition how many rows come before it.
kwargs["skiprows"] = 0
row_count = 0
data_ids = []
index_ids = []
dtypes_ids = []
kwargs["num_splits"] = num_splits
while f.tell() < total_bytes:
args = kwargs
args["skiprows"] = row_count + args["skiprows"]
args["start"] = f.tell()
chunk = f.read(chunk_size)
# This edge case can happen when we have reached the end of the data
# but not the end of the file.
if b"<row" not in chunk:
break
row_close_tag = b"</row>"
row_count = re.subn(row_close_tag, b"", chunk)[1]
# Make sure we are reading at least one row.
while row_count == 0:
chunk += f.read(chunk_size)
row_count += re.subn(row_close_tag, b"", chunk)[1]
last_index = chunk.rindex(row_close_tag)
f.seek(-(len(chunk) - last_index) + len(row_close_tag), 1)
args["end"] = f.tell()
# If there is no data, exit before triggering computation.
if b"</row>" not in chunk and b"</sheetData>" in chunk:
break
remote_results_list = cls.deploy(cls.parse, num_splits + 2, args)
data_ids.append(remote_results_list[:-2])
index_ids.append(remote_results_list[-2])
dtypes_ids.append(remote_results_list[-1])
# The end of the spreadsheet
if b"</sheetData>" in chunk:
break
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids)
data_ids = cls.build_partition(data_ids, row_lengths, column_widths)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
data_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if index_col is None:
new_query_compiler._modin_frame.synchronize_labels(axis=0)
return new_query_compiler
| 43.164557 | 90 | 0.598631 |
e14dd916a75b19338befb59cd8e20500ac2eef91 | 591 | py | Python | test/code/d1/t0.py | BBN-Q/pyqgl2 | 7acc8b244ee7799c21df03ecff8325e15cdb94d3 | [
"Apache-2.0"
] | 10 | 2017-09-30T14:31:42.000Z | 2021-12-12T07:52:05.000Z | test/code/d1/t0.py | BBN-Q/pyqgl2 | 7acc8b244ee7799c21df03ecff8325e15cdb94d3 | [
"Apache-2.0"
] | 56 | 2017-04-11T15:47:15.000Z | 2019-10-11T15:50:35.000Z | test/code/d1/t0.py | BBN-Q/pyqgl2 | 7acc8b244ee7799c21df03ecff8325e15cdb94d3 | [
"Apache-2.0"
] | 3 | 2019-06-20T07:08:39.000Z | 2020-10-24T19:29:05.000Z |
from qgl2.qgl2 import concur, seq
from qgl2.qgl2 import qgl2decl, qgl2main
from qgl2.qgl2 import classical, pulse, qreg
from qgl2.qgl1 import QubitFactory
# Note that for this next import to work you must run from the directory containing this file
import t1
from t2 import second_level as fred
@qgl2main
def main():
a = QubitFactory("1")
b = QubitFactory("2")
c = QubitFactory("3")
with concur:
t1.t1(a, b)
t1.t1(b, c)
t1.t1(c, a)
# After exapansion, the with concur becomes 2 with seq blocks using overlapping qbits
# and that gives an error
| 22.730769 | 93 | 0.700508 |
39a2d24d03adbca68622b766cf30797dc980282b | 2,359 | py | Python | scripts/markovian_features.py | aametwally/MC_MicroSimilarities | b625fcbe7eb1fcd8f04fedec1a111b4d3a1bde3a | [
"MIT"
] | 2 | 2019-02-22T03:08:56.000Z | 2019-11-17T02:30:58.000Z | scripts/markovian_features.py | aametwally/MC_MicroSimilarities | b625fcbe7eb1fcd8f04fedec1a111b4d3a1bde3a | [
"MIT"
] | 7 | 2019-02-24T13:13:35.000Z | 2019-02-26T10:10:58.000Z | scripts/markovian_features.py | aametwally/MC_MicroSimilarities | b625fcbe7eb1fcd8f04fedec1a111b4d3a1bde3a | [
"MIT"
] | null | null | null | import numpy as np
import os
from sklearn.decomposition import PCA
path = '/media/asem/store/experimental/build-markovian_features-Desktop_Qt_5_9_1_GCC_64bit-Release/src/app/data'
consensus_gprofile_files = sorted([ f for f in os.listdir(path) \
if f.startswith('consensus_gprofile') ])
consensus_profile_files = sorted([ f for f in os.listdir(path) \
if f.startswith('consensus_profile') ])
test_files = [ f for f in os.listdir(path) if f.startswith('test')]
test_gprofile_files = sorted( list( filter( lambda s : 'gprofile' in s, test_files )))
test_profile_files = sorted(list( filter( lambda s : '_profile' in s, test_files )))
f2array = lambda f: np.loadtxt( path + '/' + f, dtype = np.float32 )
array2vec = lambda a : a.reshape(1,-1)
f2label = lambda f: int(f.split('.')[0].split('_')[-1])
consensus_profiles_gen = map( array2vec , map( f2array , consensus_profile_files ))
consensus_gprofiles_gen = map( array2vec, map( f2array , consensus_gprofile_files ))
test_profiles_gen = map( f2array , test_profile_files )
test_gprofiles_gen = map( f2array , test_gprofile_files )
test_labels = np.fromiter( map( f2label , test_profile_files ) , dtype = np.int32 ).reshape(1,-1)
def canonical_features_array( features ):
population = None
try:
population = next( features ).reshape( 1 , -1 )
except StopIteration:
pass
for f in features:
population = np.vstack( (population , f.reshape( 1 , -1 )))
return population
# Expensive Operations
consensus_profiles = canonical_features_array( consensus_profiles_gen )
consensus_gprofiles = canonical_features_array( consensus_gprofiles_gen )
test_profiles = canonical_features_array( test_profiles_gen )
test_gprofiles = canonical_features_array( test_gprofiles_gen )
pca_p = PCA( n_components = 0.95 )
pca_p.fit( consensus_profiles )
ev_p = pca_p.explained_variance_ratio_.cumsum()
pca_gp = PCA( n_components = 0.95 )
pca_gp.fit( consensus_gprofiles )
ev_gp = pca_gp.explained_variance_ratio_.cumsum()
train_p_reduced = pca_p.transform( consensus_profiles )
train_gp_reduced = pca_gp.transform( consensus_gprofiles )
train_labels = np.range( 1 , train_p_reduced.shape[0] + 1 , np.int32 )
test_p_reduced = pca_p.transform( test_profiles )
test_gp_reduced = pca_gp.transform( test_gprofiles )
| 38.048387 | 112 | 0.732514 |
7a860ea6c16fd38dc5bb11486c237de11f0cc287 | 2,002 | py | Python | tests/metrics/test_metric_lightning.py | daeyun/pytorch-lightning | 96785b99df878ebc45bc455eb7902a6dd4661139 | [
"Apache-2.0"
] | 1 | 2020-10-26T09:02:08.000Z | 2020-10-26T09:02:08.000Z | tests/metrics/test_metric_lightning.py | vivektalwar13071999/pytorch-lightning | 7c4f80a1afe3d7b0f1e9ee834aacaf8439195cdf | [
"Apache-2.0"
] | null | null | null | tests/metrics/test_metric_lightning.py | vivektalwar13071999/pytorch-lightning | 7c4f80a1afe3d7b0f1e9ee834aacaf8439195cdf | [
"Apache-2.0"
] | null | null | null | import torch
from pytorch_lightning import Trainer
from pytorch_lightning.metrics import Metric
from tests.base.boring_model import BoringModel
class SumMetric(Metric):
def __init__(self):
super().__init__()
self.add_state("x", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, x):
self.x += x
def compute(self):
return self.x
def test_metric_lightning(tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.metric = SumMetric()
self.sum = 0.0
def training_step(self, batch, batch_idx):
x = batch
self.metric(x.sum())
self.sum += x.sum()
return self.step(x)
def training_epoch_end(self, outs):
assert torch.allclose(self.sum, self.metric.compute())
self.sum = 0.0
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
def test_metric_lightning_log(tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.metric = SumMetric()
self.sum = 0.0
def training_step(self, batch, batch_idx):
x = batch
self.metric(x.sum())
self.sum += x.sum()
self.log("sum", self.metric, on_epoch=True, on_step=False)
return self.step(x)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
logged = trainer.logged_metrics
assert torch.allclose(torch.tensor(logged["sum"]), model.sum)
| 24.716049 | 70 | 0.597902 |
e7c7bc7d85d7fcffb9d9c24d82c0a819ccb22889 | 310 | py | Python | polyaxon/action_manager/utils/discord.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/action_manager/utils/discord.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/action_manager/utils/discord.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | from event_manager.event_context import get_event_context, get_readable_event
def serialize_event_to_context(event):
event_context = get_event_context(event)
logo_url = ''
payload = {
'content': get_readable_event(event_context),
'avatar_url': logo_url
}
return payload
| 22.142857 | 77 | 0.722581 |
d145a9dde4efed62c6a214c96208b0213d9d9a32 | 1,923 | py | Python | jacket/objects/compute/hv_spec.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | null | null | null | jacket/objects/compute/hv_spec.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | null | null | null | jacket/objects/compute/hv_spec.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | 2 | 2016-08-10T02:21:49.000Z | 2020-07-24T01:57:21.000Z | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from jacket.compute.cloud import hv_type
from jacket.objects.compute import base
from jacket.objects.compute import fields
@base.NovaObjectRegistry.register
class HVSpec(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added 'vz' hypervisor
# Version 1.2: Added 'lxd' hypervisor
VERSION = '1.2'
fields = {
'arch': fields.ArchitectureField(),
'hv_type': fields.HVTypeField(),
'vm_mode': fields.VMModeField(),
}
# NOTE(pmurray): for backward compatibility, the supported instance
# data is stored in the database as a list.
@classmethod
def from_list(cls, data):
return cls(arch=data[0],
hv_type=data[1],
vm_mode=data[2])
def to_list(self):
return [self.arch, self.hv_type, self.vm_mode]
def obj_make_compatible(self, primitive, target_version):
super(HVSpec, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if (target_version < (1, 1) and 'hv_type' in primitive and
hv_type.VIRTUOZZO == primitive['hv_type']):
primitive['hv_type'] = hv_type.PARALLELS
| 36.283019 | 78 | 0.688508 |
a7c582460d994c8fde66c9baaa205d1d9661ef79 | 44,464 | py | Python | src/python/datastore/data_types.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 1 | 2020-12-23T02:49:09.000Z | 2020-12-23T02:49:09.000Z | src/python/datastore/data_types.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:36:03.000Z | 2021-12-13T20:48:34.000Z | src/python/datastore/data_types.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 1 | 2020-12-21T16:24:36.000Z | 2020-12-21T16:24:36.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for objects stored in the datastore."""
import re
import six
from google.cloud import ndb
from base import json_utils
from base import utils
from datastore import search_tokenizer
from metrics import logs
from system import environment
# Prefix used when a large testcase is stored in the blobstore.
BLOBSTORE_STACK_PREFIX = 'BLOB_KEY='
# List of builtin fuzzers.
BUILTIN_FUZZERS = ['afl', 'libFuzzer']
# Time to look back to find a corpus backup that is marked public.
CORPUS_BACKUP_PUBLIC_LOOKBACK_DAYS = 30
# Marker to indicate end of crash stacktrace. Anything after that is excluded
# from being stored as part of crash stacktrace (e.g. merge content, etc).
CRASH_STACKTRACE_END_MARKER = 'CRASH OUTPUT ENDS HERE'
# Skips using crash state similarity for these types.
CRASH_TYPES_WITH_UNIQUE_STATE = [
'Missing-library',
'Out-of-memory',
'Overwrites-const-input',
'Timeout',
# V8 correctness failures use metadata from the fuzz test cases as crash
# state. This is not suitable for using levenshtein distance for
# similarity.
'V8 correctness failure',
]
# Maximum size allowed for an appengine entity type.
# Explicily kept slightly lower than 1 MB.
ENTITY_SIZE_LIMIT = 900000
# Minimum number of unreproducible crashes to see before filing it.
FILE_UNREPRODUCIBLE_TESTCASE_MIN_CRASH_THRESHOLD = 100
# Heartbeat wait interval.
HEARTBEAT_WAIT_INTERVAL = 10 * 60
# FIXME: Move this to configuration.
# List of internal sandboxed data types. This gives a warning on testcase
# uploads on unsandboxed job types.
INTERNAL_SANDBOXED_JOB_TYPES = [
'linux_asan_chrome_media', 'linux_asan_chrome_mp',
'linux_asan_chrome_v8_arm', 'mac_asan_chrome', 'windows_asan_chrome'
]
# Time to wait after a report is marked fixed and before filing another similar
# one (hours).
MIN_ELAPSED_TIME_SINCE_FIXED = 2 * 24
# Time to wait for grouping task to finish, before filing the report (hours).
MIN_ELAPSED_TIME_SINCE_REPORT = 3
# Valid name check for fuzzer, job, etc.
NAME_CHECK_REGEX = re.compile(r'^[a-zA-Z0-9_-]+$')
# Regex to match special chars in project name.
SPECIAL_CHARS_REGEX = re.compile('[^a-zA-Z0-9_-]')
# List of supported platforms.
PLATFORMS = [
'LINUX',
'ANDROID',
'CHROMEOS',
'MAC',
'WINDOWS',
'FUCHSIA',
'ANDROID_KERNEL',
'ANDROID_AUTO',
]
# Maximum size allowed for an appengine pubsub request.
# Explicily kept slightly lower than 1 MB.
PUBSUB_REQUEST_LIMIT = 900000
# We store at most 3 stacktraces per Testcase entity (original, second, latest).
STACKTRACE_LENGTH_LIMIT = ENTITY_SIZE_LIMIT // 3
# Maximum size allowed for testcase comments.
# 1MiB (maximum Datastore entity size) - ENTITY_SIZE_LIMIT (our limited entity
# size with breathing room), divided by 2 to leave room for other things in the
# entity. This is around 74KB.
TESTCASE_COMMENTS_LENGTH_LIMIT = (1024 * 1024 - ENTITY_SIZE_LIMIT) // 2
# Maximum number of testcase entities to query in one batch.
TESTCASE_ENTITY_QUERY_LIMIT = 256
# Deadlines for testcase filing, closures and deletions (in days).
DUPLICATE_TESTCASE_NO_BUG_DEADLINE = 3
CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE = 14
FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE = 14
NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE = 7
UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE = 7
UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE = 14
# Chromium specific issue state tracking labels.
CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL = 'ReleaseBlock-Beta'
# TODO(ochang): Find some way to remove these.
CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL = 'Test-Predator-Auto-CC'
CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL = 'Test-Predator-Auto-Components'
CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL = 'Test-Predator-Auto-Owner'
CHROMIUM_ISSUE_PREDATOR_WRONG_COMPONENTS_LABEL = (
'Test-Predator-Wrong-Components')
CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL = 'Test-Predator-Wrong-CLs'
MISSING_VALUE_STRING = '---'
COVERAGE_INFORMATION_DATE_FORMAT = '%Y%m%d'
def clone_entity(e, **extra_args):
"""Clones a DataStore entity and returns the clone."""
ent_class = e.__class__
# pylint: disable=protected-access
props = dict((v._code_name, v.__get__(e, ent_class))
for v in six.itervalues(ent_class._properties)
if not isinstance(v, ndb.ComputedProperty))
props.update(extra_args)
return ent_class(**props)
class SecuritySeverity(object):
"""Enum for Security Severity."""
CRITICAL = 0
HIGH = 1
MEDIUM = 2
LOW = 3
MISSING = 4
@classmethod
def is_valid(cls, security_severity):
"""Return bool on whether a severity is valid."""
return (security_severity in [cls.CRITICAL, cls.HIGH, cls.MEDIUM, cls.LOW])
@classmethod
def list(cls):
"""Return the list of severities for a dropdown menu."""
return [
{
'value': cls.CRITICAL,
'name': 'Critical'
},
{
'value': cls.HIGH,
'name': 'High',
'default': True
},
{
'value': cls.MEDIUM,
'name': 'Medium'
},
{
'value': cls.LOW,
'name': 'Low'
},
{
'value': cls.MISSING,
'name': 'Missing'
},
]
# Impact values for security issues.
class SecurityImpact(object):
STABLE = 0
BETA = 1
HEAD = 2
NONE = 3
MISSING = 4
# Archive state enums.
class ArchiveStatus(object):
NONE = 0
FUZZED = 1
MINIMIZED = 2
ALL = FUZZED | MINIMIZED
# ExternalUserPermission Auto-CC type.
class AutoCCType(object):
# Don't Auto-CC user.
NONE = 0
# Auto-CC user for all issues.
ALL = 1
# Auto-CC only for security issues.
SECURITY = 2
# Type of permission. Used by ExternalUserPermision.
class PermissionEntityKind(object):
FUZZER = 0
JOB = 1
UPLOADER = 2
# Task state string mappings.
class TaskState(object):
STARTED = 'started'
WIP = 'in-progress'
FINISHED = 'finished'
ERROR = 'errored out'
NA = ''
# Build state.
class BuildState(object):
UNMARKED = 0
GOOD = 1
BAD = 2
class TestcaseVariantStatus(object):
PENDING = 0
REPRODUCIBLE = 1
FLAKY = 2
UNREPRODUCIBLE = 3
class Model(ndb.Model):
"""Cache-less NDB model."""
_use_cache = False
_use_memcache = False
class Blacklist(Model):
"""Represents global blacklist to track entries for suppressions files."""
# Function name.
function_name = ndb.StringProperty()
# Tool name.
tool_name = ndb.StringProperty()
# Testcase ID.
testcase_id = ndb.IntegerProperty()
class Fuzzer(Model):
"""Represents a fuzzer."""
VALID_NAME_REGEX = NAME_CHECK_REGEX
# Last update time.
timestamp = ndb.DateTimeProperty()
# Fuzzer Name.
name = ndb.StringProperty()
# The name of the archive that the user uploaded.
filename = ndb.StringProperty()
# Blobstore key for this fuzzer.
blobstore_key = ndb.StringProperty()
# String representation of the file size.
file_size = ndb.StringProperty()
# Fuzzer's main executable path, relative to root.
executable_path = ndb.StringProperty()
# Revision number of the fuzzer.
revision = ndb.IntegerProperty()
# Fuzzer's source (for accountability).
source = ndb.StringProperty()
# Testcase timeout.
timeout = ndb.IntegerProperty()
# Supported platforms.
supported_platforms = ndb.StringProperty()
# Custom script that should be used to launch chrome for this fuzzer.
launcher_script = ndb.StringProperty()
# Result from the last fuzzer run showing the number of testcases generated.
result = ndb.StringProperty()
# Last result update timestamp.
result_timestamp = ndb.DateTimeProperty()
# Console output from last fuzzer run.
console_output = ndb.TextProperty()
# Return code from last fuzzer run.
return_code = ndb.IntegerProperty()
# Blobstore key for the sample testcase generated by the fuzzer.
sample_testcase = ndb.StringProperty()
# Job types for this fuzzer.
jobs = ndb.StringProperty(repeated=True)
# Is the fuzzer coming from an external contributor ? Useful for adding
# reward flags.
external_contribution = ndb.BooleanProperty(default=False)
# Max testcases to generate for this fuzzer.
max_testcases = ndb.IntegerProperty()
# Does it run un-trusted content ? Examples including running live sites.
untrusted_content = ndb.BooleanProperty(default=False)
# Data bundle name.
data_bundle_name = ndb.StringProperty(default='')
# Additional environment variables that need to be set for this fuzzer.
additional_environment_string = ndb.TextProperty()
# Column specification for stats.
stats_columns = ndb.TextProperty()
# Helpful descriptions for the stats_columns. In a yaml format.
stats_column_descriptions = ndb.TextProperty(indexed=False)
# Whether this is a builtin fuzzer.
builtin = ndb.BooleanProperty(indexed=False, default=False)
# Whether this is a differential fuzzer.
differential = ndb.BooleanProperty(default=False)
# If this flag is set, fuzzer generates the testcase in the larger directory
# on disk |FUZZ_INPUTS_DISK|, rather than smaller tmpfs one (FUZZ_INPUTS).
has_large_testcases = ndb.BooleanProperty(default=False)
class BuildCrashStatsJobHistory(Model):
"""Represents the record of build_crash_stats run."""
# End time in hours from epoch, inclusively.
end_time_in_hours = ndb.IntegerProperty()
class Testcase(Model):
"""Represents a single testcase."""
# Crash on an invalid read/write.
crash_type = ndb.StringProperty()
# Crashing address.
crash_address = ndb.TextProperty()
# First x stack frames.
crash_state = ndb.StringProperty()
# Complete stacktrace.
crash_stacktrace = ndb.TextProperty(indexed=False)
# Last tested crash stacktrace using the latest revision.
last_tested_crash_stacktrace = ndb.TextProperty(indexed=False)
# Blobstore keys for various things like original testcase, minimized
# testcase, etc.
fuzzed_keys = ndb.TextProperty()
minimized_keys = ndb.TextProperty()
minidump_keys = ndb.TextProperty()
# Tracking issue tracker bug. One bug number per line (future extension).
bug_information = ndb.StringProperty()
# Regression range.
regression = ndb.StringProperty(default='')
# Revisions where this issue has been fixed.
fixed = ndb.StringProperty(default='')
# Is it a security bug ?
security_flag = ndb.BooleanProperty(default=False)
# Security severity of the bug.
security_severity = ndb.IntegerProperty(indexed=False)
# Did the bug only reproduced once ?
one_time_crasher_flag = ndb.BooleanProperty(default=False)
# Any additional comments.
comments = ndb.TextProperty(default='', indexed=False)
# Revision that we discovered the crash in.
crash_revision = ndb.IntegerProperty()
# The file on the bot that generated the testcase.
absolute_path = ndb.TextProperty()
# Minimized argument list.
minimized_arguments = ndb.TextProperty(default='', indexed=False)
# Window argument (usually width, height, top, left, etc).
window_argument = ndb.TextProperty(default='', indexed=False)
# Type of job associated with this testcase.
job_type = ndb.StringProperty()
# Original job queue used for tasks created for this testcase.
queue = ndb.TextProperty()
# State representing whether the fuzzed or minimized testcases are archived.
archive_state = ndb.IntegerProperty(default=0, indexed=False)
# File name of the original uploaded archive.
archive_filename = ndb.TextProperty()
# Is this a binary file?
binary_flag = ndb.BooleanProperty(default=False, indexed=False)
# Timestamp.
timestamp = ndb.DateTimeProperty()
# Does the testcase crash stack vary b/w crashes ?
flaky_stack = ndb.BooleanProperty(default=False, indexed=False)
# Do we need to test this testcase using an HTTP/HTTPS server?
http_flag = ndb.BooleanProperty(default=False, indexed=False)
# Name of the fuzzer used to generate this testcase.
fuzzer_name = ndb.StringProperty()
# Status of this testcase (pending, processed, unreproducible, etc).
status = ndb.StringProperty(default='Processed')
# Id of the testcase that this is marked as a duplicate of.
duplicate_of = ndb.IntegerProperty(indexed=False)
# Flag indicating whether or not the testcase has been symbolized.
symbolized = ndb.BooleanProperty(default=False, indexed=False)
# Id for this testcase's associated group.
group_id = ndb.IntegerProperty(default=0)
# Tracking issue tracker bug for this testcase group.
group_bug_information = ndb.IntegerProperty(default=0)
# Fake user interaction sequences like key clicks, mouse movements, etc.
gestures = ndb.TextProperty(repeated=True)
# ASAN redzone size in bytes.
redzone = ndb.IntegerProperty(default=128, indexed=False)
# Flag indicating if UBSan detection should be disabled. This is needed for
# cases when ASan and UBSan are bundled in the same build configuration
# and we need to disable UBSan in some runs to find the potentially more
# interesting ASan bugs.
disable_ubsan = ndb.BooleanProperty(default=False)
# Whether testcase is open.
open = ndb.BooleanProperty(default=True)
# Adjusts timeout based on multiplier value.
timeout_multiplier = ndb.FloatProperty(default=1.0, indexed=False)
# Additional metadata stored as a JSON object. This should be used for
# properties that are not commonly accessed and do not need to be indexed.
additional_metadata = ndb.TextProperty(indexed=False)
# Boolean attribute indicating if cleanup triage needs to be done.
triaged = ndb.BooleanProperty(default=False)
# Project name associated with this test case.
project_name = ndb.StringProperty()
# keywords is used for searching.
keywords = ndb.StringProperty(repeated=True)
# Whether testcase has a bug (either bug_information or
# group_bug_information).
has_bug_flag = ndb.BooleanProperty()
# Indices for bug_information and group_bug_information.
bug_indices = ndb.StringProperty(repeated=True)
# Overridden fuzzer name because actual fuzzer name can be different in many
# scenarios (libfuzzer, afl, etc).
overridden_fuzzer_name = ndb.StringProperty()
# Platform (e.g. windows, linux, android).
platform = ndb.StringProperty()
# Platform id (e.g. windows, linux, android:hammerhead:l).
# For Android, includes device type and underlying OS version.
platform_id = ndb.StringProperty()
# Impact indices for searching.
impact_indices = ndb.StringProperty(repeated=True)
# Whether or not a testcase is a duplicate of other testcase.
is_a_duplicate_flag = ndb.BooleanProperty()
# Whether or not a testcase is the leader of its group.
# If the testcase is not in a group, it's the leader of a group of 1.
# The default is false because we prefer not to show crashes until we are
# sure. And group_task will correctly set the value within 30 minutes.
is_leader = ndb.BooleanProperty(default=False)
# Fuzzer name indices
fuzzer_name_indices = ndb.StringProperty(repeated=True)
# The impacted version indices (including both beta and stable).
impact_version_indices = ndb.StringProperty(repeated=True)
# The impacted stable version.
impact_stable_version = ndb.StringProperty()
# The impacted stable version indices.
impact_stable_version_indices = ndb.StringProperty(repeated=True)
# The impacted stable version is merely probable (not definite). Because
# for a non-asan build, we don't have a stable/beta build. Therefore, we
# make an intelligent guess on the version.
impact_stable_version_likely = ndb.BooleanProperty()
# The impacted beta version.
impact_beta_version = ndb.StringProperty()
# The impacted beta version indices.
impact_beta_version_indices = ndb.StringProperty(repeated=True)
# The impacted beta version is merely probable (not definite). See the
# comment on impact_stable_version_likely.
impact_beta_version_likely = ndb.BooleanProperty()
# Whether or not impact task has been run on this testcase.
is_impact_set_flag = ndb.BooleanProperty()
# Uploader email address.
uploader_email = ndb.StringProperty()
def has_blame(self):
return self.project_name == 'chromium'
def has_impacts(self):
return self.project_name == 'chromium' and not self.one_time_crasher_flag
def impacts_production(self):
return bool(self.impact_stable_version) or bool(self.impact_beta_version)
def is_status_unreproducible(self):
return self.status and self.status.startswith('Unreproducible')
def is_crash(self):
return bool(self.crash_state)
def populate_indices(self):
"""Populate keywords for fast test case list searching."""
self.keywords = list(
search_tokenizer.tokenize(self.crash_state)
| search_tokenizer.tokenize(self.crash_type)
| search_tokenizer.tokenize(self.fuzzer_name)
| search_tokenizer.tokenize(self.overridden_fuzzer_name)
| search_tokenizer.tokenize(self.job_type)
| search_tokenizer.tokenize(self.platform_id))
self.bug_indices = search_tokenizer.tokenize_bug_information(self)
self.has_bug_flag = bool(self.bug_indices)
self.is_a_duplicate_flag = bool(self.duplicate_of)
fuzzer_name_indices = list(
set([self.fuzzer_name, self.overridden_fuzzer_name]))
self.fuzzer_name_indices = [f for f in fuzzer_name_indices if f]
# If the impact task hasn't been run (aka is_impact_set_flag=False) OR
# if impact isn't applicable (aka has_impacts() is False), we wipe all
# the impact fields' indices.
if self.has_impacts() and self.is_impact_set_flag:
self.impact_stable_version_indices = (
search_tokenizer.tokenize_impact_version(self.impact_stable_version))
self.impact_beta_version_indices = (
search_tokenizer.tokenize_impact_version(self.impact_beta_version))
self.impact_version_indices = list(
set(self.impact_stable_version_indices +
self.impact_beta_version_indices))
if self.impact_beta_version:
self.impact_version_indices.append('beta')
if self.impact_stable_version:
self.impact_version_indices.append('stable')
if not self.impacts_production():
self.impact_version_indices.append('head')
else:
self.impact_version_indices = []
self.impact_stable_version_indices = []
self.impact_beta_version_indices = []
def _pre_put_hook(self):
self.populate_indices()
def _post_put_hook(self, _):
if not self.key:
# Failed put. An exception will be thrown automatically afterwards.
return
logs.log('Updated testcase %d (bug %s).' % (self.key.id(),
self.bug_information or '-'))
def set_impacts_as_na(self):
self.impact_stable_version = self.impact_beta_version = None
self.impact_stable_version_likely = self.impact_beta_version_likely = False
self.is_impact_set_flag = False
def _ensure_metadata_is_cached(self):
"""Ensure that the metadata for this has been cached."""
if hasattr(self, 'metadata_cache'):
return
try:
cache = json_utils.loads(self.additional_metadata)
except (TypeError, ValueError):
cache = {}
setattr(self, 'metadata_cache', cache)
def get_metadata(self, key=None, default=None):
"""Get metadata for a test case. Slow on first access."""
self._ensure_metadata_is_cached()
# If no key is specified, return all metadata.
if not key:
return self.metadata_cache
try:
return self.metadata_cache[key]
except KeyError:
return default
def set_metadata(self, key, value, update_testcase=True):
"""Set metadata for a test case."""
self._ensure_metadata_is_cached()
self.metadata_cache[key] = value
self.additional_metadata = json_utils.dumps(self.metadata_cache)
if update_testcase:
self.put()
def delete_metadata(self, key, update_testcase=True):
"""Remove metadata key for a test case."""
self._ensure_metadata_is_cached()
# Make sure that the key exists in cache. If not, no work to do here.
if key not in self.metadata_cache:
return
del self.metadata_cache[key]
self.additional_metadata = json_utils.dumps(self.metadata_cache)
if update_testcase:
self.put()
def actual_fuzzer_name(self):
"""Actual fuzzer name, uses one from overridden attribute if available."""
return self.overridden_fuzzer_name or self.fuzzer_name
def get_fuzz_target(self):
"""Get the associated FuzzTarget entity for this test case."""
name = self.actual_fuzzer_name()
if not name:
return None
target = ndb.Key(FuzzTarget, name).get()
if environment.get_value('ORIGINAL_JOB_NAME'):
# Overridden engine (e.g. for minimization).
target.engine = environment.get_engine_for_job()
return target
class TestcaseGroup(Model):
"""Group for a set of testcases."""
class DataBundle(Model):
"""Represents a data bundle associated with a fuzzer."""
VALID_NAME_REGEX = NAME_CHECK_REGEX
# The data bundle's name (important for identifying shared bundles).
name = ndb.StringProperty()
# Name of cloud storage bucket on GCS.
bucket_name = ndb.StringProperty()
# Data bundle's source (for accountability).
# TODO(ochang): Remove.
source = ndb.StringProperty()
# If data bundle can be unpacked locally or needs nfs.
is_local = ndb.BooleanProperty(default=True)
# Creation timestamp.
timestamp = ndb.DateTimeProperty()
# Whether or not bundle should be synced to worker instead.
# Fuzzer scripts are usually run on trusted hosts, so data bundles are synced
# there. In libFuzzer's case, we want the bundle to be on the same machine as
# where the libFuzzer binary will run (untrusted).
sync_to_worker = ndb.BooleanProperty(default=False)
class Config(Model):
"""Configuration."""
previous_hash = ndb.StringProperty(default='')
# Project's url.
url = ndb.StringProperty(default='')
# Issue tracker client authentication parameters.
client_credentials = ndb.TextProperty(default='')
# Jira url and credentials
jira_url = ndb.StringProperty(default='')
jira_credentials = ndb.TextProperty(default='')
# Build apiary authentication parameters.
build_apiary_service_account_private_key = ndb.TextProperty(default='')
# Google test account for login, gms testing, etc.
test_account_email = ndb.StringProperty(default='')
test_account_password = ndb.StringProperty(default='')
# Privileged users.
privileged_users = ndb.TextProperty(default='')
# Blacklisted users.
blacklisted_users = ndb.TextProperty(default='')
# Admin contact string.
contact_string = ndb.StringProperty(default='')
# Component to repository mappings.
component_repository_mappings = ndb.TextProperty(default='')
# URL for help page for reproducing issues.
reproduction_help_url = ndb.StringProperty(default='')
# Documentation url.
documentation_url = ndb.StringProperty(default='')
# Bug report url.
bug_report_url = ndb.StringProperty(default='')
# Platforms that coverage is supported for.
platform_group_mappings = ndb.TextProperty(default='')
# More relaxed restrictions: allow CC'ed users and reporters of issues to view
# testcase details.
relax_testcase_restrictions = ndb.BooleanProperty(default=False)
# More relaxed restrictions: allow domain users to access both security and
# functional bugs.
relax_security_bug_restrictions = ndb.BooleanProperty(default=False)
# Coverage reports bucket.
coverage_reports_bucket = ndb.StringProperty(default='')
# For GitHub API.
github_credentials = ndb.StringProperty(default='')
# OAuth2 client id for the reproduce tool.
reproduce_tool_client_id = ndb.StringProperty(default='')
# OAuth2 client secret for the reproduce tool.
reproduce_tool_client_secret = ndb.StringProperty(default='')
# Pub/Sub topics for the Predator service.
predator_crash_topic = ndb.StringProperty(default='')
predator_result_topic = ndb.StringProperty(default='')
# Wifi connection information.
wifi_ssid = ndb.StringProperty(default='')
wifi_password = ndb.StringProperty(default='')
# SendGrid config.
sendgrid_api_key = ndb.StringProperty(default='')
sendgrid_sender = ndb.StringProperty(default='')
class TestcaseUploadMetadata(Model):
"""Metadata associated with a user uploaded test case."""
# Timestamp.
timestamp = ndb.DateTimeProperty()
# Testcase filename.
filename = ndb.StringProperty()
# Current status of the testcase.
status = ndb.StringProperty()
# Uploader email address.
uploader_email = ndb.StringProperty()
# Name of the bot that ran analyze on this testcase.
bot_name = ndb.StringProperty()
# Id of the associated testcase.
testcase_id = ndb.IntegerProperty()
# Id of the testcase that this is marked as a duplicate of.
duplicate_of = ndb.IntegerProperty()
# Blobstore key for the testcase associated with this object.
blobstore_key = ndb.StringProperty()
# Testcase timeout.
timeout = ndb.IntegerProperty()
# Is this a single testcase bundled in an archive?
bundled = ndb.BooleanProperty()
# Path to the file in the archive.
path_in_archive = ndb.TextProperty()
# Original blobstore key for this object (used for archives).
original_blobstore_key = ndb.StringProperty()
# Security flag.
security_flag = ndb.BooleanProperty(default=False)
# Number of retries for this testcase.
retries = ndb.IntegerProperty()
# Flag to indicate where bug title should be updated or not.
bug_summary_update_flag = ndb.BooleanProperty()
# Flag to indicate if we are running in quiet mode (e.g. bug updates).
quiet_flag = ndb.BooleanProperty()
# Additional testcase metadata dict stored as a string.
additional_metadata_string = ndb.TextProperty(indexed=False)
class JobTemplate(Model):
# Job template name.
name = ndb.StringProperty()
# Environment string.
environment_string = ndb.TextProperty()
class Job(Model):
"""Definition of a job type used by the bots."""
VALID_NAME_REGEX = NAME_CHECK_REGEX
# Job type name.
name = ndb.StringProperty()
# Job environment string.
environment_string = ndb.TextProperty()
# The platform that this job can run on.
platform = ndb.StringProperty()
# Blobstore key of the custom binary for this job.
custom_binary_key = ndb.StringProperty()
# Filename for the custom binary.
custom_binary_filename = ndb.StringProperty()
# Revision of the custom binary.
custom_binary_revision = ndb.IntegerProperty()
# Description of the job.
description = ndb.TextProperty()
# Template to use, if any.
templates = ndb.StringProperty(repeated=True)
# Project name.
project = ndb.StringProperty()
# Keywords is used for searching.
keywords = ndb.StringProperty(repeated=True)
def get_environment(self):
"""Get the environment as a dict for this job, including any environment
variables in its template."""
if not self.templates:
return environment.parse_environment_definition(self.environment_string)
job_environment = {}
for template_name in self.templates:
template = JobTemplate.query(JobTemplate.name == template_name).get()
if not template:
continue
template_environment = environment.parse_environment_definition(
template.environment_string)
job_environment.update(template_environment)
environment_overrides = environment.parse_environment_definition(
self.environment_string)
job_environment.update(environment_overrides)
return job_environment
def get_environment_string(self):
"""Get the environment string for this job, including any environment
variables in its template. Avoid using this if possible."""
environment_string = ''
job_environment = self.get_environment()
for key, value in six.iteritems(job_environment):
environment_string += '%s = %s\n' % (key, value)
return environment_string
def populate_indices(self):
"""Populate keywords for fast job searching."""
self.keywords = list(
search_tokenizer.tokenize(self.name)
| search_tokenizer.tokenize(self.project))
def _pre_put_hook(self):
"""Pre-put hook."""
self.project = self.get_environment().get('PROJECT_NAME',
utils.default_project_name())
self.populate_indices()
class CSRFToken(Model):
"""Token used to prevent CSRF attacks."""
# Value of this token.
value = ndb.StringProperty()
# Expiration time for this token.
expiration_time = ndb.DateTimeProperty()
# User that this token is associated with.
user_email = ndb.StringProperty()
class Heartbeat(Model):
"""Bot health metadata."""
# Name of the bot.
bot_name = ndb.StringProperty()
# Time of the last heartbeat.
last_beat_time = ndb.DateTimeProperty()
# Task payload containing information on current task execution.
task_payload = ndb.StringProperty()
# Expected end time for task.
task_end_time = ndb.DateTimeProperty()
# Source version (for accountability).
source_version = ndb.StringProperty()
# Platform id (esp important for Android platform for OS version).
platform_id = ndb.StringProperty()
# Keywords is used for searching.
keywords = ndb.StringProperty(repeated=True)
def populate_indices(self):
"""Populate keywords for fast job searching."""
self.keywords = list(
search_tokenizer.tokenize(self.bot_name)
| search_tokenizer.tokenize(self.task_payload))
def _pre_put_hook(self):
"""Pre-put hook."""
self.populate_indices()
class Notification(Model):
"""Tracks whether or not an email has been sent to a user for a test case."""
# Testcase id associated with this notification.
testcase_id = ndb.IntegerProperty()
# User that this notification was sent to.
user_email = ndb.StringProperty()
class BundledArchiveMetadata(Model):
"""Metadata needed for multiple test cases uploaded in an archive."""
# Blobstore key of the archive.
blobstore_key = ndb.StringProperty()
# Timeout in seconds for each testcase in the bundle.
timeout = ndb.IntegerProperty()
# Job queue for the analyze tasks created for this bundle.
job_queue = ndb.StringProperty()
# Job type that should be used for all testcases in this bundle.
job_type = ndb.StringProperty()
# Flag indicating whether or not these testcases need http.
http_flag = ndb.BooleanProperty()
# File name of the uploaded archive.
archive_filename = ndb.StringProperty()
# Email address of the uploader of the archive.
uploader_email = ndb.StringProperty()
# Fake user interaction sequences like key clicks, mouse movements, etc.
gestures = ndb.StringProperty(repeated=True)
# Optional. Revision that we discovered the crash in.
crash_revision = ndb.IntegerProperty()
# Optional. Additional arguments.
additional_arguments = ndb.StringProperty()
# Optional. Bug information.
bug_information = ndb.StringProperty()
# Optional. Platform id, e.g. android:shamu.
platform_id = ndb.StringProperty()
# Optional. App launch command. e.g. shell am start ...
app_launch_command = ndb.StringProperty()
# Fuzzer name.
fuzzer_name = ndb.StringProperty()
# Overridden fuzzer name because actual fuzzer name can be different in many
# scenarios (libfuzzer, afl, etc).
overridden_fuzzer_name = ndb.StringProperty()
# Binary name for fuzz target (only applicable to libFuzzer, AFL).
fuzzer_binary_name = ndb.StringProperty()
class TaskStatus(Model):
"""Information about task status."""
# Bot name.
bot_name = ndb.StringProperty()
# Status.
status = ndb.StringProperty()
# Time of creation or last update time.
time = ndb.DateTimeProperty()
class BuildMetadata(Model):
"""Metadata associated with a particular archived build."""
# Job type that this build belongs to.
job_type = ndb.StringProperty()
# Revision of the build.
revision = ndb.IntegerProperty()
# Good build or bad build.
bad_build = ndb.BooleanProperty(default=False)
# Stdout and stderr.
console_output = ndb.TextProperty()
# Bot name.
bot_name = ndb.StringProperty()
# Symbol data.
symbols = ndb.StringProperty()
# Creation timestamp.
timestamp = ndb.DateTimeProperty()
class ReportMetadata(Model):
"""Metadata associated with a crash report."""
# Job type from testcase.
job_type = ndb.StringProperty()
# Revision of build from report.
crash_revision = ndb.IntegerProperty(default=-1)
# Has this report been successfully uploaded?
is_uploaded = ndb.BooleanProperty(default=False)
# Product.
product = ndb.StringProperty(default='')
# Version.
version = ndb.TextProperty(default='')
# Key to minidump previously written to blobstore.
minidump_key = ndb.TextProperty(default='')
# Processed crash bytes.
serialized_crash_stack_frames = ndb.BlobProperty(default='', indexed=False)
# Id of the associated testcase.
testcase_id = ndb.StringProperty(default='')
# Id of the associated bot.
bot_id = ndb.TextProperty(default='')
# Optional upload params, stored as a JSON object.
optional_params = ndb.TextProperty(indexed=False)
# Report id from crash/.
crash_report_id = ndb.StringProperty()
class Lock(Model):
"""Lock entity."""
# Expiration time for the lock.
expiration_time = ndb.DateTimeProperty()
# The bot name denoting the holder of the lock.
holder = ndb.StringProperty()
class FuzzTarget(Model):
"""Fuzz target."""
# The engine this target is a child of.
engine = ndb.StringProperty()
# Project name.
project = ndb.StringProperty()
# Binary name.
binary = ndb.StringProperty()
def _pre_put_hook(self):
"""Pre-put hook."""
self.key = ndb.Key(FuzzTarget, self.fully_qualified_name())
def fully_qualified_name(self):
"""Get the fully qualified name for this fuzz target."""
return fuzz_target_fully_qualified_name(self.engine, self.project,
self.binary)
def project_qualified_name(self):
"""Get the name qualified by project."""
return fuzz_target_project_qualified_name(self.project, self.binary)
def fuzz_target_fully_qualified_name(engine, project, binary):
"""Get a fuzz target's fully qualified name."""
return engine + '_' + fuzz_target_project_qualified_name(project, binary)
def normalized_name(name):
"""Return normalized name with special chars like slash, colon, etc normalized
to hyphen(-). This is important as otherwise these chars break local and cloud
storage paths."""
return SPECIAL_CHARS_REGEX.sub('-', name).strip('-')
def fuzz_target_project_qualified_name(project, binary):
"""Get a fuzz target's project qualified name."""
binary = normalized_name(binary)
if not project:
return binary
if project == utils.default_project_name():
# Don't prefix with project name if it's the default project.
return binary
normalized_project_prefix = normalized_name(project) + '_'
if binary.startswith(normalized_project_prefix):
return binary
return normalized_project_prefix + binary
class FuzzTargetsCount(Model):
"""Fuzz targets count for every job. Key IDs are the job name."""
count = ndb.IntegerProperty(indexed=False)
class FuzzTargetJob(Model):
"""Mapping between fuzz target and jobs with additional metadata for
selection."""
# Fully qualified fuzz target name.
fuzz_target_name = ndb.StringProperty()
# Job this target ran as.
job = ndb.StringProperty()
# Engine this ran as.
engine = ndb.StringProperty()
# Relative frequency with which to select this fuzzer.
weight = ndb.FloatProperty(default=1.0)
# Approximate last time this target was run.
last_run = ndb.DateTimeProperty()
def _pre_put_hook(self):
"""Pre-put hook."""
self.key = ndb.Key(FuzzTargetJob,
fuzz_target_job_key(self.fuzz_target_name, self.job))
class FuzzStrategyProbability(Model):
"""Mapping between fuzz strategies and probabilities with which they
should be selected."""
strategy_name = ndb.StringProperty()
probability = ndb.FloatProperty()
engine = ndb.StringProperty()
def fuzz_target_job_key(fuzz_target_name, job):
"""Return the key for FuzzTargetJob."""
return '{}/{}'.format(fuzz_target_name, job)
class ExternalUserPermission(Model):
"""Permissions for external users."""
# Email user is authenticated as.
email = ndb.StringProperty()
# Type of |entity_name|. Can be one of the values of PermissionEntityKind.
entity_kind = ndb.IntegerProperty()
# Name of the entity that user is allowed to view.
entity_name = ndb.StringProperty()
# Whether or not |allowed_name| is a prefix.
is_prefix = ndb.BooleanProperty(default=False)
# Auto CC type.
auto_cc = ndb.IntegerProperty()
class FiledBug(Model):
"""Metadata information for issues that were filed automatically."""
# Timestamp when the issue was filed.
timestamp = ndb.DateTimeProperty()
# ID of the test case that is associated with the filed issue.
testcase_id = ndb.IntegerProperty()
# Tracking issue tracker bug for this testcase.
bug_information = ndb.IntegerProperty(default=0)
# Group ID associated with this issue.
group_id = ndb.IntegerProperty()
# Crash type for easy reference.
crash_type = ndb.StringProperty()
# Crash state for easy reference.
crash_state = ndb.StringProperty()
# Is it a security bug?
security_flag = ndb.BooleanProperty()
# Platform id.
platform_id = ndb.StringProperty()
class CoverageInformation(Model):
"""Coverage info."""
date = ndb.DateProperty(auto_now_add=True)
fuzzer = ndb.StringProperty()
# Function coverage information.
functions_covered = ndb.IntegerProperty()
functions_total = ndb.IntegerProperty()
# Edge coverage information.
edges_covered = ndb.IntegerProperty()
edges_total = ndb.IntegerProperty()
# Corpus size information.
corpus_size_units = ndb.IntegerProperty()
corpus_size_bytes = ndb.IntegerProperty()
corpus_location = ndb.StringProperty()
# Corpus backup information.
corpus_backup_location = ndb.StringProperty()
# Quarantine size information.
quarantine_size_units = ndb.IntegerProperty()
quarantine_size_bytes = ndb.IntegerProperty()
quarantine_location = ndb.StringProperty()
# Link to the HTML report.
html_report_url = ndb.StringProperty()
def _pre_put_hook(self):
"""Pre-put hook."""
self.key = ndb.Key(CoverageInformation,
coverage_information_key(self.fuzzer, self.date))
class CorpusTag(Model):
"""Corpus Tags for sharing corpora between fuzz targets."""
tag = ndb.StringProperty()
fully_qualified_fuzz_target_name = ndb.StringProperty()
def coverage_information_date_to_string(date):
"""Returns string representation of the date in a format used for coverage."""
return date.strftime(COVERAGE_INFORMATION_DATE_FORMAT)
def coverage_information_key(project_qualified_fuzzer_name, date):
"""Constructs an ndb key for CoverageInformation entity."""
date_string = coverage_information_date_to_string(date)
return project_qualified_fuzzer_name + '-' + date_string
class Trial(Model):
"""Trials for specific binaries."""
# App name that this trial is applied to. E.g. "d8" or "chrome".
app_name = ndb.StringProperty()
# Chance to select this set of arguments. Zero to one.
probability = ndb.FloatProperty()
# Additional arguments to apply if selected.
app_args = ndb.TextProperty()
# TODO(ochang): Make this generic.
class OssFuzzProject(Model):
"""Represents a project that has been set up for OSS-Fuzz."""
# Name of the project.
name = ndb.StringProperty()
# Whether or not the project should run on high end hosts.
high_end = ndb.BooleanProperty(default=False)
# Weight for CPU distribution. This is set by admins.
cpu_weight = ndb.FloatProperty(default=1.0)
# The disk size to use (overrides the default).
disk_size_gb = ndb.IntegerProperty()
# Service account for this project.
service_account = ndb.StringProperty()
# CCs for the project.
ccs = ndb.StringProperty(repeated=True)
class OssFuzzProjectInfo(Model):
"""Set up information for a project (cpu allocation, instance groups, service
accounts)."""
class ClusterInfo(Model):
"""Cpu allocation information for a project in a zone."""
# The cluster for the CPU allocation.
cluster = ndb.StringProperty()
# The number of allocated CPUs in this cluster.
cpu_count = ndb.IntegerProperty(default=0)
# The GCE zone for this cluster.
gce_zone = ndb.StringProperty()
# Name of the project.
name = ndb.StringProperty()
# Information about CPUs in each cluster.
clusters = ndb.StructuredProperty(ClusterInfo, repeated=True)
def get_cluster_info(self, name):
return next((info for info in self.clusters if info.cluster == name), None)
class HostWorkerAssignment(Model):
"""Host worker assignment information."""
# The host instance name.
host_name = ndb.StringProperty()
# The instance number (0 to WORKERS_PER_HOST - 1).
instance_num = ndb.IntegerProperty()
# The worker instance name.
worker_name = ndb.StringProperty()
# The project name.
project_name = ndb.StringProperty()
class WorkerTlsCert(Model):
"""TLS certs for untrusted workers."""
# The name of the project.
project_name = ndb.StringProperty()
# The contents of the TLS cert.
cert_contents = ndb.BlobProperty()
# The contents of the private key.
key_contents = ndb.BlobProperty()
class FuzzerJob(Model):
"""Mapping between a fuzzer and job with additional metadata for selection."""
fuzzer = ndb.StringProperty()
job = ndb.StringProperty()
platform = ndb.StringProperty()
weight = ndb.FloatProperty(default=1.0)
multiplier = ndb.FloatProperty(default=1.0)
@property
def actual_weight(self):
"""Get the actual weight for this job."""
return self.weight * self.multiplier
class FuzzerJobs(Model):
"""(Batched) mappings between a fuzzer and jobs with additional metadata for
selection."""
platform = ndb.StringProperty()
fuzzer_jobs = ndb.LocalStructuredProperty(FuzzerJob, repeated=True)
class OssFuzzBuildFailure(Model):
"""Represents build failure."""
# Project name.
project_name = ndb.StringProperty()
# The monorail issue ID for the failure.
issue_id = ndb.StringProperty()
# The last timestamp of the build.
last_checked_timestamp = ndb.DateTimeProperty()
# Number of consecutive failures.
consecutive_failures = ndb.IntegerProperty(default=0)
# Build type (fuzzing, coverage, etc).
build_type = ndb.StringProperty()
class Admin(Model):
"""Records an admin user."""
email = ndb.StringProperty()
class TestcaseVariant(Model):
"""Represent a testcase variant on another job (another platform / sanitizer
/ config)."""
# Testcase ID of the testcase for which the variant is being evaluated.
testcase_id = ndb.IntegerProperty()
# Status of the testcase variant (pending, reproducible, unreproducible, etc).
status = ndb.IntegerProperty(default=0)
# Job type for the testcase variant.
job_type = ndb.StringProperty()
# Revision that the testcase variant was tried against.
revision = ndb.IntegerProperty()
# Crash type.
crash_type = ndb.StringProperty()
# Crash state.
crash_state = ndb.StringProperty()
# Bool to indicate if it is a security bug?
security_flag = ndb.BooleanProperty()
# Bool to indicate if crash is similar to original testcase.
is_similar = ndb.BooleanProperty()
# Similar testcase reproducer key (optional). This is set in case we notice a
# similar crash on another platform.
reproducer_key = ndb.StringProperty()
# Platform (e.g. windows, linux, android).
platform = ndb.StringProperty()
| 30.002699 | 80 | 0.732098 |
c09b615b2bfbef9eb35612bde05cd989042d2b88 | 6,616 | py | Python | tensorflow_riemopt/optimizers/constrained_rmsprop_test.py | vishalbelsare/tensorflow-riemopt | 4814b0c4a8bc74bf98f34ba99440f24ea21ab93b | [
"MIT"
] | 33 | 2021-07-12T14:03:55.000Z | 2022-03-30T11:44:11.000Z | tensorflow_riemopt/optimizers/constrained_rmsprop_test.py | vishalbelsare/tensorflow-riemopt | 4814b0c4a8bc74bf98f34ba99440f24ea21ab93b | [
"MIT"
] | 2 | 2021-08-02T18:39:06.000Z | 2022-02-14T21:01:22.000Z | tensorflow_riemopt/optimizers/constrained_rmsprop_test.py | vishalbelsare/tensorflow-riemopt | 4814b0c4a8bc74bf98f34ba99440f24ea21ab93b | [
"MIT"
] | 5 | 2020-11-06T05:22:07.000Z | 2021-06-15T03:21:58.000Z | """Tests for RMSprop."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow_riemopt.optimizers.constrained_rmsprop import (
ConstrainedRMSprop,
)
class ConstrainedRMSpropTest(test.TestCase, parameterized.TestCase):
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
for centered in [False, True]:
with ops.Graph().as_default(), self.cached_session(
use_gpu=True
):
var0_np = np.array(
[1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype
)
grads0_np = np.array(
[0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype
)
var1_np = np.array(
[3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype
)
grads1_np = np.array(
[0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
var0_ref = variables.Variable(var0_np)
var1_ref = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices),
constant_op.constant([3]),
)
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices),
constant_op.constant([3]),
)
opt = ConstrainedRMSprop(centered=centered)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
opt_ref = rmsprop.RMSprop(centered=centered)
update_ref = opt_ref.apply_gradients(
zip([grads0, grads1], [var0_ref, var1_ref])
)
self.evaluate(variables.global_variables_initializer())
# Run 3 steps
for t in range(3):
update.run()
update_ref.run()
# Validate updated params
self.assertAllCloseAccordingToType(
self.evaluate(var0_ref), self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
self.evaluate(var1_ref), self.evaluate(var1)
)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasic(self):
for i, dtype in enumerate(
[dtypes.half, dtypes.float32, dtypes.float64]
):
for centered in [False, True]:
with self.cached_session(use_gpu=True):
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array(
[0.01, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
var0_ref = variables.Variable(
var0_np, name="var0_ref_%d" % i
)
var1_ref = variables.Variable(
var1_np, name="var1_ref_%d" % i
)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
opt = ConstrainedRMSprop(
learning_rate=learning_rate,
centered=centered,
)
opt_ref = rmsprop.RMSprop(
learning_rate=learning_rate,
centered=centered,
)
if not context.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update_ref = opt_ref.apply_gradients(
zip([grads0, grads1], [var0_ref, var1_ref])
)
self.evaluate(variables.global_variables_initializer())
# Run 3 steps
for t in range(3):
if not context.executing_eagerly():
self.evaluate(update)
self.evaluate(update_ref)
else:
opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
opt_ref.apply_gradients(
zip([grads0, grads1], [var0_ref, var1_ref])
)
# Validate updated params
self.assertAllCloseAccordingToType(
self.evaluate(var0_ref),
self.evaluate(var0),
rtol=1e-4,
atol=1e-4,
)
self.assertAllCloseAccordingToType(
self.evaluate(var1_ref),
self.evaluate(var1),
rtol=1e-4,
atol=1e-4,
)
if __name__ == "__main__":
test.main()
| 42.683871 | 80 | 0.472642 |
8b70008666db6981df40778f6eec201d3c3264c1 | 161 | py | Python | ex049.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | ex049.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | ex049.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
] | null | null | null | # Exercício de tabuada utilizando comando for.
n = int(input('Digite um número inteiro: '))
for c in range(1, 11):
print('{} x {} = {}'.format(n, c, n*c))
| 23 | 46 | 0.608696 |
709c325e018a28697033561dbad454d0ba98319d | 169 | py | Python | mysite/urls.py | krystacassidy/djangoblog | 37dec5360c27b2622212ced1abdda1dfedf480cb | [
"MIT"
] | 8 | 2017-01-16T16:36:21.000Z | 2018-05-08T05:55:42.000Z | mysite/urls.py | krystacassidy/djangoblog | 37dec5360c27b2622212ced1abdda1dfedf480cb | [
"MIT"
] | null | null | null | mysite/urls.py | krystacassidy/djangoblog | 37dec5360c27b2622212ced1abdda1dfedf480cb | [
"MIT"
] | 1 | 2018-02-18T17:30:17.000Z | 2018-02-18T17:30:17.000Z | from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('blog.urls')),
]
| 21.125 | 41 | 0.674556 |
01bc8f34b45812f6d6b1c4937bcbc749a4464dbe | 109 | py | Python | data_cache/__init__.py | statnett/data_cache | cd76456906f7bad409eb5e581ae7f81637527fab | [
"MIT"
] | 23 | 2020-05-19T08:01:24.000Z | 2022-02-17T21:40:58.000Z | data_cache/__init__.py | statnett/data_cache | cd76456906f7bad409eb5e581ae7f81637527fab | [
"MIT"
] | 3 | 2020-05-19T19:46:06.000Z | 2022-03-16T07:09:00.000Z | data_cache/__init__.py | statnett/data_cache | cd76456906f7bad409eb5e581ae7f81637527fab | [
"MIT"
] | 3 | 2021-07-28T18:31:14.000Z | 2021-09-09T14:56:40.000Z | from data_cache.cache_tools import numpy_cache, pandas_cache, read_metadata # noqa: F401
VERSION = "0.1.6"
| 27.25 | 89 | 0.779817 |
956d25c67580f673c99078eb82cbf5fabf7c3f44 | 3,555 | py | Python | software/client/motion_handler.py | seanwatson/ece4416-project | ce048d84d2bc054fdb24848a32bb0219c4708fdc | [
"MIT"
] | 1 | 2020-07-26T12:55:27.000Z | 2020-07-26T12:55:27.000Z | software/client/motion_handler.py | seanwatson/ece4416-project | ce048d84d2bc054fdb24848a32bb0219c4708fdc | [
"MIT"
] | null | null | null | software/client/motion_handler.py | seanwatson/ece4416-project | ce048d84d2bc054fdb24848a32bb0219c4708fdc | [
"MIT"
] | null | null | null | """
Handles incoming motion commands and translates them to actions.
Copyright (c) 2013 Sean Watson
Licensed under the MIT license
"""
import threading
import logging
import time
class MotionHandler(threading.Thread):
"""Translates motion commands to actions.
When a new motion command is received over the wireless
connection it is consumed by the MotionHandler. The
MotionHandler then scans through the motions it has been
set up to detect trying to match the command to the motions.
If a match is found and that motion is completed the corresponding
action is executed.
Attributes:
motion_queue: A Queue for incoming motion commands.
motions: The motions that should be detected
kill: A boolean flag for stopping the thread
timeout: The amount of time in seconds a composite motion is
allowed to take
"""
def __init__(self, motion_queue):
"""Initializes a new MotionHandler.
Args:
motion_queue: A queue where the incoming commands will be placed
"""
threading.Thread.__init__(self)
self.motion_queue = motion_queue
self.motions = []
self.kill = False
self.timeout = 1
logging.debug("Created MotionHandler %s . Reader %s",
self, self.motion_queue)
def run(self):
"""The main thread process.
Waits for commands to be received, processes them and calls the
appropriate actions when necessary.
"""
logging.debug("Starting MotionHandler thread")
start = time.time()
while not self.kill:
code = int(self.motion_queue.get(True)) # Blocking get call
logging.debug("Got code: %s", code)
# Reset all motions if stationary for too long
if(time.time() - start > self.timeout):
for mot in self.motions:
mot.position = 0
# Check the code against the handled motions
for mot in self.motions:
# Check if it matches the next code in the motion
if code == mot.motions[mot.position].code:
# If the motion is done, preform the movement
if mot.position == len(mot.motions) - 1:
mot.position = 0
mot.move()
logging.debug("Motion %s fully matched", mot)
# Otherwise start looking for the next motion
else:
mot.position += 1
logging.debug("Motion %s partially matched", mot)
# Reset the position for a wrong motion
else:
mot.position = 0
self.motion_queue.task_done()
start = time.time()
logging.info("MotionHandler thread stopped")
def stop(self):
"""Stops the thread."""
self.kill = True
self.motion_queue.put(0) # Needed to get out of blocking call
logging.debug("Stopping MotionHandler")
def add_motion(self, mot):
"""Adds a motion to detect.
Once added the handler will try to detect this motion.
Duplicate motions are allowed, but it will cause the action
to get executed multiple times for each detection.
Args:
The new motion to be detected.
"""
self.motions.append(mot)
logging.debug("Added Motion %s to MotionHandler %s",
mot, self)
| 34.182692 | 76 | 0.590717 |
a9b0748be3810ec13d7ca08368d63f1cd8719654 | 873 | py | Python | example.py | hagenr/gpio-events | 9a53fb952499fc65b4b2f070fa392dc2f7da8223 | [
"MIT"
] | null | null | null | example.py | hagenr/gpio-events | 9a53fb952499fc65b4b2f070fa392dc2f7da8223 | [
"MIT"
] | null | null | null | example.py | hagenr/gpio-events | 9a53fb952499fc65b4b2f070fa392dc2f7da8223 | [
"MIT"
] | null | null | null | #
# Example Code GPIO Events
# Hagen Richter <hagen.richter@informatik.uni-hamburg.de>
# <https://github.com/hagenr/gpio-events>
#
import rotary_encoder_events
import button_events
import time
CW_PIN = 35
ACW_PIN = 36
SW_PIN = 37
def cb_up(CallingPin):
btn_new = btn.gpio.input(CallingPin)
print 'UP Pin: %d val: %d, count: %d' % (CallingPin, btn_new, btn.counter)
return
def cb_down(CallingPin):
btn_new = btn.gpio.input(CallingPin)
print 'DN Pin: %d val: %d, count: %d' % (CallingPin, btn_new, btn.counter)
return
btn = button_events.Button(SW_PIN, callback_down=cb_down, callback_up=cb_up)
encoder = rotary_encoder_events.RotaryEncoder(CW_PIN, ACW_PIN)
start = 0
while True:
delta = encoder.get_delta()
if delta!=0:
start=start + delta
print ("rotate %d, count = %d" % (delta, start))
time.sleep(0.01)
| 22.973684 | 78 | 0.68614 |
b85562359f3115a56d86e0bb483c61919b926576 | 10,637 | py | Python | scripts/xiangshan.py | shinezyy/XiangShan | ffba40a0863135d3185f5b4fb947625c4e13a8e2 | [
"MulanPSL-1.0"
] | 1 | 2021-08-01T14:26:52.000Z | 2021-08-01T14:26:52.000Z | scripts/xiangshan.py | yutiansky/XiangShan | 29fcb828156fbf1789fae87055e1ab757d718d9d | [
"MulanPSL-1.0"
] | null | null | null | scripts/xiangshan.py | yutiansky/XiangShan | 29fcb828156fbf1789fae87055e1ab757d718d9d | [
"MulanPSL-1.0"
] | null | null | null | #***************************************************************************************
# Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
# Copyright (c) 2020-2021 Peng Cheng Laboratory
#
# XiangShan is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#
# See the Mulan PSL v2 for more details.
#***************************************************************************************
# Simple version of xiangshan python wrapper
import os
import argparse
import sys
import subprocess
class XSArgs(object):
script_path = os.path.realpath(__file__)
# default path to the repositories
noop_home = os.path.join(os.path.dirname(script_path), "..")
nemu_home = os.path.join(noop_home, "../NEMU")
am_home = os.path.join(noop_home, "../nexus-am")
dramsim3_home = os.path.join(noop_home, "../DRAMsim3")
rvtest_home = os.path.join(noop_home, "../riscv-tests")
def __init__(self, args):
# all path environment variables that should be set
all_path = [
# (python argument, environment variable, default, target function)
(None, "NOOP_HOME", self.noop_home, self.set_noop_home),
(args.nemu, "NEMU_HOME", self.nemu_home, self.set_nemu_home),
(args.am, "AM_HOME", self.am_home, self.set_am_home),
(args.dramsim3, "DRAMSIM3_HOME", self.dramsim3_home, self.set_dramsim3_home),
(args.rvtest, "RVTEST_HOME", self.rvtest_home, self.set_rvtest_home),
]
for (arg_in, env, default, set_func) in all_path:
set_func(self.__extract_path(arg_in, env, default))
# Chisel arguments
self.disable_log = args.disable_log
self.dual_core = args.dual_core
# Makefile arguments
self.threads = args.threads
self.with_dramsim3 = 1 if args.with_dramsim3 else None
self.trace = 1 if args.trace else None
self.config = args.config
# emu arguments
self.max_instr = args.max_instr
self.numa = args.numa
def get_env_variables(self):
all_env = {
"NOOP_HOME" : self.noop_home,
"NEMU_HOME" : self.nemu_home,
"AM_HOME" : self.am_home,
"DRAMSIM3_HOME": self.dramsim3_home
}
return all_env
def get_chisel_args(self, prefix=None):
chisel_args = [
(self.disable_log, "disable-log"),
(self.dual_core, "dual-core")
]
args = map(lambda x: x[1], filter(lambda arg: arg[0], chisel_args))
if prefix is not None:
args = map(lambda x: prefix + x, args)
return args
def get_makefile_args(self):
makefile_args = [
(self.threads, "EMU_THREADS"),
(self.with_dramsim3, "WITH_DRAMSIM3"),
(self.trace, "EMU_TRACE"),
(self.config, "CONFIG")
]
args = filter(lambda arg: arg[0] is not None, makefile_args)
return args
def get_emu_args(self):
emu_args = [
(self.max_instr, "max-instr")
]
args = filter(lambda arg: arg[0] is not None, emu_args)
return args
def show(self):
print("Extra environment variables:")
env = self.get_env_variables()
for env_name in env:
print(f"{env_name}: {env[env_name]}")
print()
print("Chisel arguments:")
print(" ".join(self.get_chisel_args()))
print()
print("Makefile arguments:")
for val, name in self.get_makefile_args():
print(f"{name}={val}")
print()
print("emu arguments:")
for val, name in self.get_emu_args():
print(f"--{name} {val}")
print()
def __extract_path(self, path, env=None, default=None):
if path is None and env is not None:
path = os.getenv(env)
if path is None and default is not None:
path = default
path = os.path.realpath(path)
return path
def set_noop_home(self, path):
self.noop_home = path
def set_nemu_home(self, path):
self.nemu_home = path
def set_am_home(self, path):
self.am_home = path
def set_dramsim3_home(self, path):
self.dramsim3_home = path
def set_rvtest_home(self, path):
self.rvtest_home = path
# XiangShan environment
class XiangShan(object):
def __init__(self, args):
self.args = XSArgs(args)
def show(self):
self.args.show()
def generate_verilog(self):
print("Generating XiangShan verilog with the following configurations:")
self.show()
sim_args = " ".join(self.args.get_chisel_args(prefix="--"))
make_args = " ".join(map(lambda arg: f"{arg[1]}={arg[0]}", self.args.get_makefile_args()))
return_code = self.__exec_cmd(f'make -C $NOOP_HOME verilog SIM_ARGS="{sim_args}" {make_args}')
return return_code
def build_emu(self):
print("Building XiangShan emu with the following configurations:")
self.show()
sim_args = " ".join(self.args.get_chisel_args(prefix="--"))
make_args = " ".join(map(lambda arg: f"{arg[1]}={arg[0]}", self.args.get_makefile_args()))
return_code = self.__exec_cmd(f'make -C $NOOP_HOME emu -j200 SIM_ARGS="{sim_args}" {make_args}')
return return_code
def run_emu(self, workload):
print("Running XiangShan emu with the following configurations:")
self.show()
emu_args = " ".join(map(lambda arg: f"--{arg[1]} {arg[0]}", self.args.get_emu_args()))
print("workload:", workload)
numa_args = f"numactl -m 1 -C 64-{64+self.args.threads-1}" if self.args.numa else ""
return_code = self.__exec_cmd(f'{numa_args} $NOOP_HOME/build/emu -i {workload} {emu_args}')
return return_code
def run(self, args):
if args.ci is not None:
return self.run_ci(args.ci)
actions = [
(args.generate, lambda _ : self.generate_verilog()),
(args.build, lambda _ : self.build_emu()),
(args.workload, lambda args: self.run_emu(args.workload))
]
valid_actions = map(lambda act: act[1], filter(lambda act: act[0], actions))
for i, action in enumerate(valid_actions):
print(f"Action {i}:")
ret = action(args)
if ret:
return ret
return 0
def __exec_cmd(self, cmd):
env = dict(os.environ)
env.update(self.args.get_env_variables())
print("subprocess call cmd:", cmd)
return_code = subprocess.call(cmd, shell=True, env=env)
return return_code
def __get_ci_cputest(self, name=None):
base_dir = os.path.join(self.args.am_home, "tests/cputest/build")
cputest = os.listdir(base_dir)
cputest = filter(lambda x: x.endswith(".bin"), cputest)
cputest = map(lambda x: os.path.join(base_dir, x), cputest)
return cputest
def __get_ci_rvtest(self, name=None):
base_dir = os.path.join(self.args.rvtest_home, "isa/build")
riscv_tests = os.listdir(base_dir)
riscv_tests = filter(lambda x: x.endswith(".bin"), riscv_tests)
all_rv_tests = ["rv64ui", "rv64um", "rv64ua", "rv64uf", "rv64ud"]
riscv_tests = filter(lambda x: x[:6] in all_rv_tests, riscv_tests)
riscv_tests = map(lambda x: os.path.join(base_dir, x), riscv_tests)
return riscv_tests
def __am_apps_path(self, bench):
filename = f"{bench}-riscv64-noop.bin"
return [os.path.join(self.args.am_home, "apps", bench, "build", filename)]
def __get_ci_workloads(self, name):
workloads = {
"linux-hello": "bbl.bin",
"povray": "_3400001000_.gz",
"mcf": "_2550001000_.gz",
"xalancbmk": "_6600001000_.gz",
"gcc": "_1250001000_.gz",
"namd": "_4850001000_.gz",
"milc": "_4150001000_.gz",
"lbm": "_7550001000_.gz",
"gromacs": "_3150001000_.gz"
}
return [os.path.join("/home/ci-runner/xsenv/workloads", name, workloads[name])]
def run_ci(self, test):
all_tests = {
"cputest": self.__get_ci_cputest,
"riscv-tests": self.__get_ci_rvtest,
"microbench": self.__am_apps_path,
"coremark": self.__am_apps_path
}
for target in all_tests.get(test, self.__get_ci_workloads)(test):
print(target)
ret = self.run_emu(target)
if ret:
return ret
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Python wrapper for XiangShan')
parser.add_argument('workload', nargs='?', type=str, default="",
help='input workload file in binary format')
# actions
parser.add_argument('--build', action='store_true', help='build XS emu')
parser.add_argument('--generate', action='store_true', help='generate XS verilog')
parser.add_argument('--ci', nargs='?', type=str, const="", help='run CI tests')
# environment variables
parser.add_argument('--nemu', nargs='?', type=str, help='path to nemu')
parser.add_argument('--am', nargs='?', type=str, help='path to nexus-am')
parser.add_argument('--dramsim3', nargs='?', type=str, help='path to dramsim3')
parser.add_argument('--rvtest', nargs='?', type=str, help='path to riscv-tests')
# chisel arguments
parser.add_argument('--disable-log', action='store_true', help='disable log')
parser.add_argument('--dual-core', action='store_true', help='dual core')
# makefile arguments
parser.add_argument('--with-dramsim3', action='store_true', help='enable dramsim3')
parser.add_argument('--threads', nargs='?', type=int, help='number of emu threads')
parser.add_argument('--trace', action='store_true', help='enable waveform')
parser.add_argument('--config', nargs='?', type=str, help='config')
# emu arguments
parser.add_argument('--numa', action='store_true', help='use numactl')
parser.add_argument('--max-instr', nargs='?', type=int, help='max instr')
args = parser.parse_args()
xs = XiangShan(args)
ret = xs.run(args)
sys.exit(ret)
| 39.396296 | 104 | 0.605152 |
7896dc44e1bdc5f6d4c29d94b8482a5a34658df8 | 7,896 | py | Python | ica/ica/ica_gpu.py | MRN-Code/coinstac_gica | 46fd9274b80bae9a5731bd77c5a3bc2d7002bb4c | [
"MIT"
] | 1 | 2019-03-01T05:30:06.000Z | 2019-03-01T05:30:06.000Z | ica/ica/ica_gpu.py | MRN-Code/coinstac_gica | 46fd9274b80bae9a5731bd77c5a3bc2d7002bb4c | [
"MIT"
] | null | null | null | ica/ica/ica_gpu.py | MRN-Code/coinstac_gica | 46fd9274b80bae9a5731bd77c5a3bc2d7002bb4c | [
"MIT"
] | null | null | null | '''
Independent Component Analysis (ICA):
This script computes ICA using the INFOMAX criteria.
The preprocessing steps include demeaning and whitening.
'''
import numpy as np
from numpy import dot
from numpy.linalg import matrix_rank, inv
from numpy.random import permutation
from scipy.linalg import eigh
# Theano Imports
import theano.tensor as T
import theano
from theano import shared
# Global constants
EPS = 1e-18
MAX_W = 1e8
ANNEAL = 0.9
MAX_STEP = 500
MIN_LRATE = 1e-6
W_STOP = 1e-6
class ica_gpu(object):
"""
Infomax ICA for one data modality
"""
def __init__(self, n_comp=10, verbose=False):
# Theano initialization
self.T_weights = shared(np.eye(n_comp, dtype=np.float32))
self.T_bias = shared(np.ones((n_comp, 1), dtype=np.float32))
T_p_x_white = T.fmatrix()
T_lrate = T.fscalar()
T_block = T.fscalar()
T_unmixed = T.dot(self.T_weights, T_p_x_white) + T.addbroadcast(self.T_bias, 1)
T_logit = 1 - 2 / (1 + T.exp(-T_unmixed))
T_out = self.T_weights + T_lrate * \
T.dot(T_block * T.identity_like(self.T_weights) + T.dot(T_logit, T.transpose(T_unmixed)), self.T_weights)
T_bias_out = self.T_bias + T_lrate * T.reshape(T_logit.sum(axis=1), (-1, 1))
T_max_w = T.max(self.T_weights)
T_isnan = T.any(T.isnan(self.T_weights))
self.w_up_fun = theano.function([T_p_x_white, T_lrate, T_block],
[T_max_w, T_isnan],
updates=[(self.T_weights, T_out),
(self.T_bias, T_bias_out)],
allow_input_downcast=True)
T_matrix = T.fmatrix()
T_cov = T.dot(T_matrix, T.transpose(T_matrix))/T_block
self.cov_fun = theano.function([T_matrix, T_block], T_cov, allow_input_downcast=True)
self.loading = None
self.sources = None
self.weights = None
self.n_comp = n_comp
self.verbose = verbose
def __pca_whiten(self, x2d):
""" data Whitening
*Input
x2d : 2d data matrix of observations by variables
n_comp: Number of components to retain
*Output
Xwhite : Whitened X
white : whitening matrix (Xwhite = np.dot(white,X))
dewhite : dewhitening matrix (X = np.dot(dewhite,Xwhite))
"""
NSUB, NVOX = x2d.shape
x2d_demean = x2d - x2d.mean(axis=1).reshape((-1, 1))
# cov = dot(x2d_demean, x2d_demean.T) / ( NVOX -1 )
cov = self.cov_fun(x2d_demean, NVOX-1)
w, v = eigh(cov, eigvals=(NSUB-self.n_comp, NSUB-1))
D = np.diag(1./(np.sqrt(w)))
white = dot(D, v.T)
D = np.diag(np.sqrt(w))
dewhite = dot(v, D)
x_white = dot(white, x2d_demean)
return (x_white, white, dewhite)
def __w_update(self, x_white, lrate1):
""" Update rule for infomax
This function recieves parameters to update W1
* Input
W1: unmixing matrix (must be a square matrix)
Xwhite1: whitened data
bias1: current estimated bias
lrate1: current learning rate
startW1: in case update blows up it will start again from startW1
* Output
W1: updated mixing matrix
bias: updated bias
lrate1: updated learning rate
"""
error = 0
NVOX = x_white.shape[1]
NCOMP = x_white.shape[0]
block1 = int(np.floor(np.sqrt(NVOX / 3)))
permute1 = permutation(NVOX)
p_x_white = x_white[:, permute1].astype(np.float32)
for start in range(0, NVOX, block1):
if start + block1 < NVOX:
tt2 = start + block1
else:
tt2 = NVOX
block1 = NVOX - start
max_w, isnan = self.w_up_fun(p_x_white[:, start:tt2], lrate1, block1)
# Checking if W blows up
if isnan or max_w > MAX_W:
# print("Numeric error! restarting with lower learning rate")
lrate1 = lrate1 * ANNEAL
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
self.T_bias.set_value(np.zeros((NCOMP, 1), dtype=np.float32))
error = 1
if lrate1 > 1e-6 and \
matrix_rank(x_white) < NCOMP:
# print("Data 1 is rank defficient"
# ". I cannot compute " +
# str(NCOMP) + " components.")
return (0, 1)
if lrate1 < 1e-6:
# print("Weight matrix may"
# " not be invertible...")
return (0, 1)
return(lrate1, error)
def __infomax(self, x_white):
"""Computes ICA infomax in whitened data
Decomposes x_white as x_white=AS
*Input
x_white: whitened data (Use PCAwhiten)
verbose: flag to print optimization updates
*Output
A : mixing matrix
S : source matrix
W : unmixing matrix
"""
NCOMP = self.n_comp
# Initialization
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
weights = np.eye(NCOMP)
old_weights = np.eye(NCOMP)
d_weigths = np.zeros(NCOMP)
old_d_weights = np.zeros(NCOMP)
lrate = 0.005 / np.log(NCOMP)
self.T_bias.set_value(np.zeros((NCOMP, 1), dtype=np.float32))
change = 1
angle_delta = 0
step = 1
# if self.verbose:
# print("Beginning ICA training...")
while step < MAX_STEP and change > W_STOP:
(lrate, error) = self.__w_update(x_white, lrate)
if error != 0:
step = 1
error = 0
lrate = lrate * ANNEAL
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
old_weights = np.eye(NCOMP)
d_weigths = np.zeros(NCOMP)
old_d_weights = np.zeros(NCOMP)
self.T_bias.set_value(np.zeros((NCOMP, 1), dtype=np.float32))
else:
weights = self.T_weights.get_value()
d_weigths = weights - old_weights
change = np.linalg.norm(d_weigths, 'fro')**2
if step > 2:
angle_delta = np.arccos(np.sum(d_weigths * old_d_weights) /
(np.linalg.norm(d_weigths, 'fro')) /
(np.linalg.norm(old_d_weights, 'fro')))
angle_delta = angle_delta * 180 / np.pi
old_weights = np.copy(weights)
if angle_delta > 60:
lrate = lrate * ANNEAL
old_d_weights = np.copy(d_weigths)
elif step == 1:
old_d_weights = np.copy(d_weigths)
# if (self.verbose and step % 10 == 0) or
# (self.verbose and change < W_STOP):
# print("Step %d: Lrate %.1e,"
# "Wchange %.1e,"
# "Angle %.2f" % (step, lrate,
# change, angle_delta))
step = step + 1
# A,S,W
return (inv(weights), dot(weights, x_white), weights)
def fit(self, x_raw):
'''
Single modality Independent Component Analysis
'''
# if self.verbose:
# print("Whitening data...")
x_white, _, dewhite = self.__pca_whiten(x_raw)
# if self.verbose:
# print("Done.")
# print("Running INFOMAX-ICA ...")
loading, self.sources, self.weights = self.__infomax(x_white)
self.loading = dot(dewhite, loading)
# if self.verbose:
# print("Done.")
return (self.loading, self.sources)
| 34.938053 | 117 | 0.535968 |
1e2d1ec5bf1f3c59d03350398b6a1c1fa3b03927 | 71,375 | py | Python | sdks/python/apache_beam/runners/dataflow/dataflow_runner.py | chishankar-work/beam | b822366d7ab72c609de276255faadd9475e4e2d2 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/dataflow/dataflow_runner.py | chishankar-work/beam | b822366d7ab72c609de276255faadd9475e4e2d2 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/dataflow/dataflow_runner.py | chishankar-work/beam | b822366d7ab72c609de276255faadd9475e4e2d2 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
# pytype: skip-file
import base64
import logging
import os
import threading
import time
import traceback
from collections import defaultdict
from subprocess import DEVNULL
from typing import TYPE_CHECKING
from typing import List
from urllib.parse import quote
from urllib.parse import quote_from_bytes
from urllib.parse import unquote_to_bytes
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.common import group_by_key_input_visitor
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms import window
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.typehints import typehints
from apache_beam.utils import processes
from apache_beam.utils import proto_utils
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.plugin import BeamPlugin
if TYPE_CHECKING:
from apache_beam.pipeline import PTransformOverride
__all__ = ['DataflowRunner']
_LOGGER = logging.getLogger(__name__)
BQ_SOURCE_UW_ERROR = (
'The Read(BigQuerySource(...)) transform is not supported with newer stack '
'features (Fn API, Dataflow Runner V2, etc). Please use the transform '
'apache_beam.io.gcp.bigquery.ReadFromBigQuery instead.')
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CombineValuesPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import NativeReadPTransformOverride
# These overrides should be applied before the proto representation of the
# graph is created.
_PTRANSFORM_OVERRIDES = [
CombineValuesPTransformOverride(),
NativeReadPTransformOverride(),
] # type: List[PTransformOverride]
_JRH_PTRANSFORM_OVERRIDES = [
JrhReadPTransformOverride(),
] # type: List[PTransformOverride]
# These overrides should be applied after the proto representation of the
# graph is created.
_NON_PORTABLE_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
ReadPTransformOverride(),
] # type: List[PTransformOverride]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
self._default_environment = None
def is_fnapi_compatible(self):
return False
def apply(self, transform, input, options):
self._maybe_add_unified_worker_missing_options(options)
return super().apply(transform, input, options)
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
current_seen_messages = set()
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration // 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
_LOGGER.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0 or last_error_msg is not None or
str(response.currentState) == 'JOB_STATE_DONE' or
str(response.currentState) == 'JOB_STATE_CANCELLED' or
str(response.currentState) == 'JOB_STATE_UPDATED' or
str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# Check that job is in a post-preparation state before starting the
# final countdown.
if (str(response.currentState) not in ('JOB_STATE_PENDING',
'JOB_STATE_QUEUED')):
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
if not last_message_time or m.time > last_message_time:
last_message_time = m.time
current_seen_messages = set()
if message in current_seen_messages:
# Skip the message if it has already been seen at the current
# time. This could be the case since the list_messages API is
# queried starting at last_message_time.
continue
else:
current_seen_messages.add(message)
# Skip empty messages.
if m.messageImportance is None:
continue
_LOGGER.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
_LOGGER.warning(
'Timing out on waiting for job %s after %d seconds',
job_id,
passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def _only_element(iterable):
# type: (Iterable[T]) -> T
element, = iterable
return element
@staticmethod
def side_input_visitor(
use_unified_worker=False,
use_fn_api=False,
deterministic_key_coders=True):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.transforms.core import ParDo
class SideInputVisitor(PipelineVisitor):
"""Ensures input `PCollection` used as a side inputs has a `KV` type.
TODO(BEAM-115): Once Python SDK is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
if isinstance(transform_node.transform, ParDo):
new_side_inputs = []
for ix, side_input in enumerate(transform_node.side_inputs):
access_pattern = side_input._side_input_data().access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
if use_unified_worker or not use_fn_api:
# TODO(BEAM-9173): Stop patching up the access pattern to
# appease Dataflow when using the UW and hardcode the output
# type to be Any since the Dataflow JSON and pipeline proto
# can differ in coders which leads to encoding/decoding issues
# within the runner.
side_input.pvalue.element_type = typehints.Any
new_side_input = _DataflowIterableSideInput(side_input)
else:
# Add a map to ('', value) as Dataflow currently only handles
# keyed side inputs when using the JRH.
pipeline = side_input.pvalue.pipeline
new_side_input = _DataflowIterableAsMultimapSideInput(
side_input)
new_side_input.pvalue = beam.pvalue.PCollection(
pipeline,
element_type=typehints.KV[bytes,
side_input.pvalue.element_type],
is_bounded=side_input.pvalue.is_bounded)
parent = transform_node.parent or pipeline._root_transform()
map_to_void_key = beam.pipeline.AppliedPTransform(
parent,
beam.Map(lambda x: (b'', x)),
transform_node.full_label + '/MapToVoidKey%s' % ix,
{'input': side_input.pvalue})
new_side_input.pvalue.producer = map_to_void_key
map_to_void_key.add_output(new_side_input.pvalue, None)
parent.add_part(map_to_void_key)
elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
# Ensure the input coder is a KV coder and patch up the
# access pattern to appease Dataflow.
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label)
side_input.pvalue.requires_deterministic_key_coder = (
deterministic_key_coders and transform_node.full_label)
new_side_input = _DataflowMultimapSideInput(side_input)
else:
raise ValueError(
'Unsupported access pattern for %r: %r' %
(transform_node.full_label, access_pattern))
new_side_inputs.append(new_side_input)
if use_fn_api:
transform_node.side_inputs = new_side_inputs
transform_node.transform.side_inputs = new_side_inputs
return SideInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = DataflowRunner._only_element(
transform_node.outputs.values())
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
@staticmethod
def combinefn_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
from apache_beam import core
class CombineFnVisitor(PipelineVisitor):
"""Checks if `CombineFn` has non-default setup or teardown methods.
If yes, raises `ValueError`.
"""
def visit_transform(self, applied_transform):
transform = applied_transform.transform
if isinstance(transform, core.ParDo) and isinstance(
transform.fn, core.CombineValuesDoFn):
if self._overrides_setup_or_teardown(transform.fn.combinefn):
raise ValueError(
'CombineFn.setup and CombineFn.teardown are '
'not supported with non-portable Dataflow '
'runner. Please use Dataflow Runner V2 instead.')
@staticmethod
def _overrides_setup_or_teardown(combinefn):
# TODO(BEAM-3736): provide an implementation for this method
return False
return CombineFnVisitor()
def _adjust_pipeline_for_dataflow_v2(self, pipeline):
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(
group_by_key_input_visitor(
not pipeline._options.view_as(
TypeOptions).allow_non_deterministic_key_coders))
def _check_for_unsupported_features_on_non_portable_worker(self, pipeline):
pipeline.visit(self.combinefn_visitor())
def run_pipeline(self, pipeline, options):
"""Remotely executes entire pipeline or parts reachable from node."""
# Label goog-dataflow-notebook if job is started from notebook.
if is_in_notebook():
notebook_version = (
'goog-dataflow-notebook=' +
beam.version.__version__.replace('.', '_'))
if options.view_as(GoogleCloudOptions).labels:
options.view_as(GoogleCloudOptions).labels.append(notebook_version)
else:
options.view_as(GoogleCloudOptions).labels = [notebook_version]
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
debug_options = options.view_as(DebugOptions)
if pipeline.contains_external_transforms:
if not apiclient._use_unified_worker(options):
_LOGGER.info(
'Automatically enabling Dataflow Runner v2 since the '
'pipeline used cross-language transforms.')
# This has to be done before any Fn API specific setup.
debug_options.add_experiment("use_runner_v2")
# Dataflow multi-language pipelines require portable job submission.
if not debug_options.lookup_experiment('use_portable_job_submission'):
debug_options.add_experiment("use_portable_job_submission")
self._maybe_add_unified_worker_missing_options(options)
use_fnapi = apiclient._use_fnapi(options)
if not use_fnapi:
self._check_for_unsupported_features_on_non_portable_worker(pipeline)
# Convert all side inputs into a form acceptable to Dataflow.
pipeline.visit(
self.side_input_visitor(
apiclient._use_unified_worker(options),
apiclient._use_fnapi(options),
deterministic_key_coders=not options.view_as(
TypeOptions).allow_non_deterministic_key_coders))
# Performing configured PTransform overrides. Note that this is currently
# done before Runner API serialization, since the new proto needs to contain
# any added PTransforms.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
from apache_beam.runners.dataflow.ptransform_overrides import WriteToBigQueryPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import GroupIntoBatchesWithShardedKeyPTransformOverride
pipeline.replace_all([
WriteToBigQueryPTransformOverride(pipeline, options),
GroupIntoBatchesWithShardedKeyPTransformOverride(self, options)
])
if use_fnapi and not apiclient._use_unified_worker(options):
pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES)
from apache_beam.transforms import environments
if options.view_as(SetupOptions).prebuild_sdk_container_engine:
# if prebuild_sdk_container_engine is specified we will build a new sdk
# container image with dependencies pre-installed and use that image,
# instead of using the inferred default container image.
self._default_environment = (
environments.DockerEnvironment.from_options(options))
options.view_as(WorkerOptions).sdk_container_image = (
self._default_environment.container_image)
else:
self._default_environment = (
environments.DockerEnvironment.from_container_image(
apiclient.get_container_image_from_options(options),
artifacts=environments.python_sdk_dependencies(options),
resource_hints=environments.resource_hints_from_options(options)))
# This has to be performed before pipeline proto is constructed to make sure
# that the changes are reflected in the portable job submission path.
self._adjust_pipeline_for_dataflow_v2(pipeline)
# Snapshot the pipeline in a portable proto.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=self._default_environment)
# Optimize the pipeline if it not streaming and the pre_optimize
# experiment is set.
if not options.view_as(StandardOptions).streaming:
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
from apache_beam.runners.portability.fn_api_runner import translations
if pre_optimize == 'none':
phases = []
elif pre_optimize == 'default' or pre_optimize == 'all':
phases = [translations.pack_combiners, translations.sort_stages]
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('pack_combiners', ):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
if phases:
self.proto_pipeline = translations.optimize_pipeline(
self.proto_pipeline,
phases=phases,
known_runner_urns=frozenset(),
partial=True)
if not use_fnapi:
# Performing configured PTransform overrides which should not be reflected
# in the proto representation of the graph.
pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
# Elevate "min_cpu_platform" to pipeline option, but using the existing
# experiment.
debug_options = options.view_as(DebugOptions)
worker_options = options.view_as(WorkerOptions)
if worker_options.min_cpu_platform:
debug_options.add_experiment(
'min_cpu_platform=' + worker_options.min_cpu_platform)
# Elevate "enable_streaming_engine" to pipeline option, but using the
# existing experiment.
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.enable_streaming_engine:
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
elif (apiclient._use_fnapi(options) and
apiclient._use_unified_worker(options) and
options.view_as(StandardOptions).streaming):
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
else:
if (debug_options.lookup_experiment("enable_windmill_service") or
debug_options.lookup_experiment("enable_streaming_engine")):
raise ValueError(
"""Streaming engine both disabled and enabled:
--enable_streaming_engine flag is not set, but
enable_windmill_service
and/or enable_streaming_engine experiments are present.
It is recommended you only set the --enable_streaming_engine flag.""")
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
if not apiclient._use_fnapi(options):
_LOGGER.warning(
'Typical end users should not use this worker jar feature. '
'It can only be used when FnAPI is enabled.')
else:
debug_options.add_experiment('use_staged_dataflow_worker_jar')
# Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment
# is set. Note that use_avro is only interpreted by the Dataflow runner
# at job submission and is not interpreted by Dataflow service or workers,
# which by default use avro library unless use_fastavro experiment is set.
if not debug_options.lookup_experiment('use_avro'):
debug_options.add_experiment('use_fastavro')
self.job = apiclient.Job(options, self.proto_pipeline)
# Dataflow Runner v1 requires output type of the Flatten to be the same as
# the inputs, hence we enforce that here. Dataflow Runner v2 does not
# require this.
pipeline.visit(self.flatten_input_visitor())
# Trigger a traversal of all reachable nodes.
self.visit_transforms(pipeline, options)
test_options = options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
result = PipelineResult(PipelineState.DONE)
result.wait_until_finish = lambda duration=None: None
return result
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
# TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring.
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _maybe_add_unified_worker_missing_options(self, options):
# set default beam_fn_api experiment if use unified
# worker experiment flag exists, no-op otherwise.
debug_options = options.view_as(DebugOptions)
from apache_beam.runners.dataflow.internal import apiclient
if apiclient._use_unified_worker(options):
if not debug_options.lookup_experiment('beam_fn_api'):
debug_options.add_experiment('beam_fn_api')
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(
self._get_coder(typehint, window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint), window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder, unused=None):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError(
'Coder object must inherit from coders.Coder: %s.' % str(coder))
return coder.as_cloud_object(self.proto_context.coders)
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': 'kind:stream',
'component_encodings': [input_encoding],
'is_stream_like': {
'value': True
},
}
def _get_encoded_output_coder(
self, transform_node, window_value=True, output_tag=None):
"""Returns the cloud encoding of the coder for the output of a transform."""
if output_tag in transform_node.outputs:
element_type = transform_node.outputs[output_tag].element_type
elif len(transform_node.outputs) == 1:
output_tag = DataflowRunner._only_element(transform_node.outputs.keys())
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[output_tag].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
# All outputs have the same windowing. So getting the coder from an
# arbitrary window is fine.
output_tag = next(iter(transform_node.outputs.keys()))
window_coder = (
transform_node.outputs[output_tag].windowing.windowfn.
get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(element_type, window_coder)
def get_pcoll_with_auto_sharding(self):
if not hasattr(self, '_pcoll_with_auto_sharding'):
return set()
return self._pcoll_with_auto_sharding
def add_pcoll_with_auto_sharding(self, applied_ptransform):
if not hasattr(self, '_pcoll_with_auto_sharding'):
self.__setattr__('_pcoll_with_auto_sharding', set())
output = DataflowRunner._only_element(applied_ptransform.outputs.keys())
self._pcoll_with_auto_sharding.add(
applied_ptransform.outputs[output]._unique_name())
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
# External transforms may not use 'None' as an output tag.
output_tags = ([None] +
list(side_tags) if None in transform_node.outputs.keys() else
list(transform_node.outputs.keys()))
# We have to cache output for all tags since some transforms may produce
# multiple outputs.
for output_tag in output_tags:
self._cache.cache_output(transform_node, output_tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[
item.get_dict()
for item in DisplayData.create_from(transform_node.transform).items
])
if transform_node.resource_hints:
step.add_property(
PropertyNames.RESOURCE_HINTS,
{
hint: quote_from_bytes(value)
for (hint, value) in transform_node.resource_hints.items()
})
return step
def _add_singleton_step(
self,
label,
full_label,
tag,
input_step,
windowing_strategy,
access_pattern):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)
})
step.encoding = self._get_side_input_encoding(input_step.encoding)
output_info = {
PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
if common_urns.side_inputs.MULTIMAP.urn == access_pattern:
output_info[PropertyNames.USE_INDEXED_FORMAT] = True
step.add_property(PropertyNames.OUTPUT_INFO, [output_info])
step.add_property(
PropertyNames.WINDOWING_STRATEGY,
self.serialize_windowing_strategy(
windowing_strategy, self._default_environment))
return step
def run_Impulse(self, transform_node, options):
standard_options = options.view_as(StandardOptions)
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
use_streaming_engine = (
debug_options.experiments and
'enable_streaming_engine' in debug_options.experiments and
'enable_windmill_service' in debug_options.experiments)
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
if (standard_options.streaming and
(not use_fn_api or not use_streaming_engine)):
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
else:
step.add_property(PropertyNames.FORMAT, 'impulse')
encoded_impulse_element = coders.WindowedValueCoder(
coders.BytesCoder(),
coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
window.GlobalWindows.windowed_value(b''))
if use_fn_api:
encoded_impulse_as_str = self.byte_array_to_json_string(
encoded_impulse_element)
else:
encoded_impulse_as_str = base64.b64encode(
encoded_impulse_element).decode('ascii')
step.add_property(PropertyNames.IMPULSE_ELEMENT, encoded_impulse_as_str)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run_Flatten(self, transform_node, options):
step = self._add_step(
TransformNames.FLATTEN, transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append({
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)
})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
# TODO(srohde): Remove this after internal usages have been removed.
def apply_GroupByKey(self, transform, pcoll, options):
return transform.expand(pcoll)
def _verify_gbk_coders(self, transform, pcoll):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError((
'Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label, coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
def run_GroupByKey(self, transform_node, options):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Verify that the GBK's parent has a KV coder.
self._verify_gbk_coders(transform_node.transform, transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
windowing = transform_node.transform.get_windowing(transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing, self._default_environment))
def run_ExternalTransform(self, transform_node, options):
# Adds a dummy step to the Dataflow job description so that inputs and
# outputs are mapped correctly in the presence of external transforms.
#
# Note that Dataflow Python multi-language pipelines use Portable Job
# Submission by default, hence this step and rest of the Dataflow step
# definitions defined here are not used at Dataflow service but we have to
# maintain the mapping correctly till we can fully drop the Dataflow step
# definitions from the SDK.
# AppliedTransform node outputs have to be updated to correctly map the
# outputs for external transforms.
transform_node.outputs = ({
output.tag: output
for output in transform_node.outputs.values()
})
self.run_Impulse(transform_node, options)
def run_ParDo(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
named_inputs = transform_node.named_inputs()
label_renames = {}
for ix, side_pval in enumerate(transform_node.side_inputs):
assert isinstance(side_pval, AsSideInput)
step_name = 'SideInput-' + self._get_unique_step_name()
si_label = ((SIDE_INPUT_PREFIX + '%d-%s') %
(ix, transform_node.full_label))
old_label = (SIDE_INPUT_PREFIX + '%d') % ix
label_renames[old_label] = si_label
assert old_label in named_inputs
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (
transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
step_name,
si_full_label,
side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue),
side_pval.pvalue.windowing,
side_pval._side_input_data().access_pattern)
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: step_name,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label +
('/{}'.format(transform_name) if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
transform_proto = self.proto_context.transforms.get_proto(transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node)
use_fnapi = apiclient._use_fnapi(options)
use_unified_worker = apiclient._use_unified_worker(options)
# Patch side input ids to be unique across a given pipeline.
if (label_renames and
transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn):
# Patch PTransform proto.
for old, new in label_renames.items():
transform_proto.inputs[new] = transform_proto.inputs[old]
del transform_proto.inputs[old]
# Patch ParDo proto.
proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn]
proto = proto_utils.parse_Bytes(transform_proto.spec.payload, proto_type)
for old, new in label_renames.items():
proto.side_inputs[new].CopyFrom(proto.side_inputs[old])
del proto.side_inputs[old]
transform_proto.spec.payload = proto.SerializeToString()
# We need to update the pipeline proto.
del self.proto_pipeline.components.transforms[transform_id]
(
self.proto_pipeline.components.transforms[transform_id].CopyFrom(
transform_proto))
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
if (use_fnapi and
(transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or
use_unified_worker)):
serialized_data = transform_id
else:
serialized_data = pickler.dumps(
self._pardo_fn_data(transform_node, lookup_label))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'None' for main output and '<tag>' for a tagged output.
outputs = []
all_output_tags = list(transform_proto.outputs.keys())
# Some external transforms require output tags to not be modified.
# So we randomly select one of the output tags as the main output and
# leave others as side outputs. Transform execution should not change
# dependending on which output tag we choose as the main output here.
# Also, some SDKs do not work correctly if output tags are modified. So for
# external transforms, we leave tags unmodified.
#
# Python SDK uses 'None' as the tag of the main output.
main_output_tag = 'None'
step.encoding = self._get_encoded_output_coder(
transform_node, output_tag=main_output_tag)
side_output_tags = set(all_output_tags).difference({main_output_tag})
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: main_output_tag
})
for side_tag in side_output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
encoding = self._get_encoded_output_coder(
transform_node, output_tag=side_tag)
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: encoding,
PropertyNames.OUTPUT_NAME: side_tag
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
# Add the restriction encoding if we are a splittable DoFn
# and are using the Fn API on the unified worker.
restriction_coder = transform.get_restriction_coder()
if restriction_coder:
step.add_property(
PropertyNames.RESTRICTION_ENCODING,
self._get_cloud_encoding(restriction_coder))
if options.view_as(StandardOptions).streaming:
is_stateful_dofn = (DoFnSignature(transform.dofn).is_stateful_dofn())
if is_stateful_dofn:
step.add_property(PropertyNames.USES_KEYED_STATE, 'true')
# Also checks whether the step allows shardable keyed states.
# TODO(BEAM-11360): remove this when migrated to portable job
# submission since we only consider supporting the property in runner
# v2.
for pcoll in transform_node.outputs.values():
if pcoll._unique_name() in self.get_pcoll_with_auto_sharding():
step.add_property(PropertyNames.ALLOWS_SHARDABLE_STATE, 'true')
# Currently we only allow auto-sharding to be enabled through the
# GroupIntoBatches transform. So we also add the following property
# which GroupIntoBatchesDoFn has, to allow the backend to perform
# graph optimization.
step.add_property(PropertyNames.PRESERVES_KEYS, 'true')
break
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (
transform.fn,
transform.args,
transform.kwargs,
si_tags_and_types,
transform_node.inputs[0].windowing)
def run_CombineValuesReplacement(self, transform_node, options):
transform = transform_node.transform.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node.parent)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if use_fnapi:
# Fnapi pipelines send the transform ID of the CombineValues transform's
# parent composite because Dataflow expects the ID of a CombinePerKey
# transform.
serialized_data = transform_id
else:
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
serialized_data = pickler.dumps(
(transform.fn, transform.args, transform.kwargs, ()))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def run_Read(self, transform_node, options):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
standard_options = options.view_as(StandardOptions)
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
_LOGGER.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
_LOGGER.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source,
traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
if standard_options.streaming:
raise ValueError(
'BigQuery source is not currently available for use '
'in streaming pipelines.')
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
if use_fn_api:
raise ValueError(BQ_SOURCE_UW_ERROR)
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(
PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(
PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError(
'BigQuery source %r must specify either a table or'
' a query' % transform.source)
if transform.source.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key)
elif transform.source.format == 'pubsub':
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(
PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(
PropertyNames.PUBSUB_TOPIC, transform.source.full_topic)
if transform.source.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.source.id_label)
if transform.source.with_attributes:
# Setting this property signals Dataflow runner to return full
# PubsubMessages instead of just the data part of the payload.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.source.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.source.timestamp_attribute)
else:
raise ValueError(
'Source %r has unexpected format %s.' %
(transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
coders.registry.get_coder(transform_node.outputs[None].element_type),
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run__NativeWrite(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX,
transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX,
transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE,
transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(
PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(
PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
if transform.sink.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key)
elif transform.sink.format == 'pubsub':
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
if transform.sink.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.sink.id_label)
# Setting this property signals Dataflow runner that the PCollection
# contains PubsubMessage objects instead of just raw data.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.sink.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.sink.timestamp_attribute)
else:
raise ValueError(
'Sink %r has unexpected format %s.' %
(transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
transform.sink.coder, coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
def run_TestStream(self, transform_node, options):
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import WatermarkEvent
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'TestStream is currently available for use '
'only in streaming pipelines.')
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.proto_context.transforms.get_id(transform_node))
step.add_property(PropertyNames.FORMAT, 'test_stream')
test_stream_payload = beam_runner_api_pb2.TestStreamPayload()
# TestStream source doesn't do any decoding of elements,
# so we won't set test_stream_payload.coder_id.
output_coder = transform._infer_output_coder() # pylint: disable=protected-access
for event in transform._events:
new_event = test_stream_payload.events.add()
if isinstance(event, ElementEvent):
for tv in event.timestamped_values:
element = new_event.element_event.elements.add()
element.encoded_element = output_coder.encode(tv.value)
element.timestamp = tv.timestamp.micros
elif isinstance(event, ProcessingTimeEvent):
new_event.processing_time_event.advance_duration = (
event.advance_by.micros)
elif isinstance(event, WatermarkEvent):
new_event.watermark_event.new_watermark = event.new_watermark.micros
serialized_payload = self.byte_array_to_json_string(
test_stream_payload.SerializeToString())
step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
# We must mark this method as not a test or else its name is a matcher for
# nosetest tests.
run_TestStream.__test__ = False # type: ignore[attr-defined]
@classmethod
def serialize_windowing_strategy(cls, windowing, default_environment):
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext(
default_environment=default_environment)
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return unquote_to_bytes(encoded_string)
def get_default_gcp_region(self):
"""Get a default value for Google Cloud region according to
https://cloud.google.com/compute/docs/gcloud-compute/#default-properties.
If no default can be found, returns None.
"""
environment_region = os.environ.get('CLOUDSDK_COMPUTE_REGION')
if environment_region:
_LOGGER.info(
'Using default GCP region %s from $CLOUDSDK_COMPUTE_REGION',
environment_region)
return environment_region
try:
cmd = ['gcloud', 'config', 'get-value', 'compute/region']
raw_output = processes.check_output(cmd, stderr=DEVNULL)
formatted_output = raw_output.decode('utf-8').strip()
if formatted_output:
_LOGGER.info(
'Using default GCP region %s from `%s`',
formatted_output,
' '.join(cmd))
return formatted_output
except RuntimeError:
pass
return None
class _DataflowSideInput(beam.pvalue.AsSideInput):
"""Wraps a side input as a dataflow-compatible side input."""
def _view_options(self):
return {
'data': self._data,
}
def _side_input_data(self):
return self._data
class _DataflowIterableAsMultimapSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
iterable_view_fn = side_input_data.view_fn
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
lambda multimap: iterable_view_fn(multimap[b'']))
class _DataflowIterableSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.ITERABLE.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class _DataflowMultimapSideInput(_DataflowSideInput):
"""Wraps a multimap side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self.is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
def _get_job_state(self):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# Ordered by the enum values. Values that may be introduced in
# future versions of Dataflow API are considered UNRECOGNIZED by the SDK.
api_jobstate_map = defaultdict(
lambda: PipelineState.UNRECOGNIZED,
{
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
})
return (
api_jobstate_map[self._job.currentState]
if self._job.currentState else PipelineState.UNKNOWN)
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
return self._get_job_state()
def is_in_terminal_state(self):
if not self.has_job:
return True
return PipelineState.is_terminal(self._get_job_state())
def wait_until_finish(self, duration=None):
if not self.is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.is_alive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# is_in_terminal_state.
terminated = self.is_in_terminal_state()
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)),
self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self.is_in_terminal_state():
_LOGGER.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(),
self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
_LOGGER.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.job_id(), self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super().__init__(msg)
self.result = result
| 42.688397 | 114 | 0.707895 |
e82ad0b03850129eb32f3d4dcbf8cb1d1c9bcc08 | 1,221 | py | Python | examples/resource.py | ROZBEH/rdflib | 5b9da927714a92a8888407f42b46249002964e8e | [
"BSD-3-Clause"
] | 2 | 2021-02-06T17:36:05.000Z | 2021-04-21T07:33:39.000Z | examples/resource.py | ROZBEH/rdflib | 5b9da927714a92a8888407f42b46249002964e8e | [
"BSD-3-Clause"
] | 8 | 2021-07-19T07:05:46.000Z | 2022-03-28T07:08:07.000Z | examples/resource.py | ROZBEH/rdflib | 5b9da927714a92a8888407f42b46249002964e8e | [
"BSD-3-Clause"
] | 4 | 2020-05-08T08:36:19.000Z | 2020-05-28T07:23:23.000Z | """
RDFLib has a :class:`~rdflib.resource.Resource` class, for a resource-centric API.
A resource acts like a URIRef with an associated graph, and allows
quickly adding or querying for triples where this resource is the
subject.
"""
from rdflib import Graph, RDF, RDFS, Literal
from rdflib.namespace import FOAF
if __name__ == "__main__":
g = Graph()
bob = g.resource("urn:bob")
bob.set(RDF.type, FOAF.Person) # .set replaces all other values
bob.set(FOAF.name, Literal("Bob"))
bill = g.resource("urn:bill")
bill.add(RDF.type, FOAF.Person) # adds to existing values
bill.add(RDF.type, FOAF.Agent)
bill.set(RDFS.label, Literal("Bill"))
bill.add(FOAF.knows, bob)
# Resources returned when querying are 'auto-boxed' as resources:
print("Bill's friend: ", bill.value(FOAF.knows).value(FOAF.name))
# slicing ([] syntax) can also be used:
print("Bill knows: ")
for friend in bill[FOAF.knows]:
print(next(friend[FOAF.name]))
# or even quicker with paths:
print("Bill knows: ")
for friend in bill[FOAF.knows / FOAF.name]:
print(friend)
# setting single properties is also possible:
bill[RDFS.label] = Literal("William")
| 26.543478 | 82 | 0.671581 |
5dc721fbc6163ab0e93a01c8a66ea267b0719514 | 1,426 | py | Python | algos/patterns/heaps/k_ClosestPoints.py | iamlmn/PyDS | 11b00629a91e8231eea7f8feb7c3c6065fdb1ce5 | [
"MIT"
] | null | null | null | algos/patterns/heaps/k_ClosestPoints.py | iamlmn/PyDS | 11b00629a91e8231eea7f8feb7c3c6065fdb1ce5 | [
"MIT"
] | null | null | null | algos/patterns/heaps/k_ClosestPoints.py | iamlmn/PyDS | 11b00629a91e8231eea7f8feb7c3c6065fdb1ce5 | [
"MIT"
] | null | null | null | # Given an array of points in the a 2D2D plane, find ‘K’ closest points to the origin.
# Example 1:
# Input: points = [[1,2],[1,3]], K = 1
# Output: [[1,2]]
# Explanation: The Euclidean distance between (1, 2) and the origin is sqrt(5).
# The Euclidean distance between (1, 3) and the origin is sqrt(10).
# Since sqrt(5) < sqrt(10), therefore (1, 2) is closer to the origin.
# Example 2:
# Input: point = [[1, 3], [3, 4], [2, -1]], K = 2
# Output: [[1, 3], [2, -1]]
import math
from heapq import *
class Points:
def __init__(self, x, y):
self.x = x
self.y = y
def print_point(self):
print("[" + str(self.x) + "," + str(self.y) + "]" , end="")
def find_distance(x,y):
return abs(math.sqrt((x*x) + (y*y)))
def find_closest_points(points, k):
maxHeap = []
j = 0
for i in range(k):
d = find_distance(points[i].x, points[i].y)
heappush(maxHeap, (-d, points[i]) )
for i in range(k, len(points)):
d = find_distance(points[i].x, points[i].y)
print(d, maxHeap[0][0])
if d > maxHeap[0][0]:
heappop(maxHeap)
heappush(maxHeap, (-d, points[i]) )
return maxHeap
if __name__ == '__main__':
result = find_closest_points([Points(1,3), Points(3,4), Points(2, -1)], 2)
print("Here are the k points closest to the origin.", result)
for d, point in result:
print([point.x, point.y])
| 26.407407 | 86 | 0.569425 |
08027d5807d8efa11c907d7fbed9ed83ab5cd415 | 112,898 | py | Python | venv/lib/python3.8/site-packages/pandas/tests/frame/test_constructors.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 1 | 2022-03-17T12:56:14.000Z | 2022-03-17T12:56:14.000Z | venv/lib/python3.8/site-packages/pandas/tests/frame/test_constructors.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 5 | 2022-02-13T14:38:04.000Z | 2022-02-15T00:13:07.000Z | venv/lib/python3.8/site-packages/pandas/tests/frame/test_constructors.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 4 | 2022-02-04T22:58:27.000Z | 2022-02-14T19:29:18.000Z | from collections import (
OrderedDict,
abc,
)
from datetime import (
date,
datetime,
timedelta,
)
import functools
import itertools
import re
import warnings
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
import pytest
import pytz
from pandas.compat import np_version_under1p19
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer_dtype
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
IntervalDtype,
PandasDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
MultiIndex,
Period,
RangeIndex,
Series,
Timedelta,
Timestamp,
cut,
date_range,
isna,
)
import pandas._testing as tm
from pandas.arrays import (
DatetimeArray,
IntervalArray,
PeriodArray,
SparseArray,
)
from pandas.core.api import Int64Index
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameConstructors:
def test_constructor_from_2d_datetimearray(self, using_array_manager):
dti = date_range("2016-01-01", periods=6, tz="US/Pacific")
dta = dti._data.reshape(3, 2)
df = DataFrame(dta)
expected = DataFrame({0: dta[:, 0], 1: dta[:, 1]})
tm.assert_frame_equal(df, expected)
if not using_array_manager:
# GH#44724 big performance hit if we de-consolidate
assert len(df._mgr.blocks) == 1
def test_constructor_dict_with_tzaware_scalar(self):
# GH#42505
dt = Timestamp("2019-11-03 01:00:00-0700").tz_convert("America/Los_Angeles")
df = DataFrame({"dt": dt}, index=[0])
expected = DataFrame({"dt": [dt]})
tm.assert_frame_equal(df, expected)
# Non-homogeneous
df = DataFrame({"dt": dt, "value": [1]})
expected = DataFrame({"dt": [dt], "value": [1]})
tm.assert_frame_equal(df, expected)
def test_construct_ndarray_with_nas_and_int_dtype(self):
# GH#26919 match Series by not casting np.nan to meaningless int
arr = np.array([[1, np.nan], [2, 3]])
with tm.assert_produces_warning(FutureWarning):
df = DataFrame(arr, dtype="i8")
assert df.values.dtype == arr.dtype
assert isna(df.iloc[0, 1])
# check this matches Series behavior
with tm.assert_produces_warning(FutureWarning):
ser = Series(arr[0], dtype="i8", name=0)
expected = df.iloc[0]
tm.assert_series_equal(ser, expected)
def test_construct_from_list_of_datetimes(self):
df = DataFrame([datetime.now(), datetime.now()])
assert df[0].dtype == np.dtype("M8[ns]")
def test_constructor_from_tzaware_datetimeindex(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH#6032
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
def test_array_of_dt64_nat_with_td64dtype_raises(self, frame_or_series):
# GH#39462
nat = np.datetime64("NaT", "ns")
arr = np.array([nat], dtype=object)
if frame_or_series is DataFrame:
arr = arr.reshape(1, 1)
msg = "|".join(
[
"Could not convert object to NumPy timedelta",
"Invalid type for timedelta scalar: <class 'numpy.datetime64'>",
]
)
with pytest.raises(ValueError, match=msg):
frame_or_series(arr, dtype="m8[ns]")
@pytest.mark.parametrize("kind", ["m", "M"])
def test_datetimelike_values_with_object_dtype(self, kind, frame_or_series):
# with dtype=object, we should cast dt64 values to Timestamps, not pydatetimes
if kind == "M":
dtype = "M8[ns]"
scalar_type = Timestamp
else:
dtype = "m8[ns]"
scalar_type = Timedelta
arr = np.arange(6, dtype="i8").view(dtype).reshape(3, 2)
if frame_or_series is Series:
arr = arr[:, 0]
obj = frame_or_series(arr, dtype=object)
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
# go through a different path in internals.construction
obj = frame_or_series(frame_or_series(arr), dtype=object)
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
obj = frame_or_series(frame_or_series(arr), dtype=PandasDtype(object))
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
if frame_or_series is DataFrame:
# other paths through internals.construction
sers = [Series(x) for x in arr]
obj = frame_or_series(sers, dtype=object)
assert obj._mgr.arrays[0].dtype == object
assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type)
def test_series_with_name_not_matching_column(self):
# GH#9232
x = Series(range(5), name=1)
y = Series(range(5), name=0)
result = DataFrame(x, columns=[0])
expected = DataFrame([], columns=[0])
tm.assert_frame_equal(result, expected)
result = DataFrame(y, columns=[1])
expected = DataFrame([], columns=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"constructor",
[
lambda: DataFrame(),
lambda: DataFrame(None),
lambda: DataFrame({}),
lambda: DataFrame(()),
lambda: DataFrame([]),
lambda: DataFrame(_ for _ in []),
lambda: DataFrame(range(0)),
lambda: DataFrame(data=None),
lambda: DataFrame(data={}),
lambda: DataFrame(data=()),
lambda: DataFrame(data=[]),
lambda: DataFrame(data=(_ for _ in [])),
lambda: DataFrame(data=range(0)),
],
)
def test_empty_constructor(self, constructor):
expected = DataFrame()
result = constructor()
assert len(result.index) == 0
assert len(result.columns) == 0
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"emptylike,expected_index,expected_columns",
[
([[]], RangeIndex(1), RangeIndex(0)),
([[], []], RangeIndex(2), RangeIndex(0)),
([(_ for _ in [])], RangeIndex(1), RangeIndex(0)),
],
)
def test_emptylike_constructor(self, emptylike, expected_index, expected_columns):
expected = DataFrame(index=expected_index, columns=expected_columns)
result = DataFrame(emptylike)
tm.assert_frame_equal(result, expected)
def test_constructor_mixed(self, float_string_frame):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert float_string_frame["foo"].dtype == np.object_
def test_constructor_cast_failure(self):
msg = "either all columns will be cast to that dtype, or a TypeError will"
with tm.assert_produces_warning(FutureWarning, match=msg):
foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64)
assert foo["a"].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df["foo"] = np.ones((4, 2)).tolist()
# this is not ok
msg = "Expected a 1D array, got an array with shape \\(4, 2\\)"
with pytest.raises(ValueError, match=msg):
df["test"] = np.ones((4, 2))
# this is ok
df["foo2"] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({"col1": [1.0], "col2": [2.0], "col3": [3.0]})
new_df = DataFrame(orig_df, dtype=float, copy=True)
new_df["col1"] = 200.0
assert orig_df["col1"][0] == 1.0
def test_constructor_dtype_nocast_view_dataframe(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
def test_constructor_dtype_nocast_view_2d_array(self, using_array_manager):
df = DataFrame([[1, 2], [3, 4]], dtype="int64")
if not using_array_manager:
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
else:
# INFO(ArrayManager) DataFrame(ndarray) doesn't necessarily preserve
# a view on the array to ensure contiguous 1D arrays
df2 = DataFrame(df.values, dtype=df[0].dtype)
assert df2._mgr.arrays[0].flags.c_contiguous
@td.skip_array_manager_invalid_test
def test_1d_object_array_does_not_copy(self):
# https://github.com/pandas-dev/pandas/issues/39272
arr = np.array(["a", "b"], dtype="object")
df = DataFrame(arr)
assert np.shares_memory(df.values, arr)
@td.skip_array_manager_invalid_test
def test_2d_object_array_does_not_copy(self):
# https://github.com/pandas-dev/pandas/issues/39272
arr = np.array([["a", "b"], ["c", "d"]], dtype="object")
df = DataFrame(arr)
assert np.shares_memory(df.values, arr)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, "2"], [None, "a"]], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == "2"
@pytest.mark.skipif(np_version_under1p19, reason="NumPy change.")
def test_constructor_list_of_2d_raises(self):
# https://github.com/pandas-dev/pandas/issues/32289
a = DataFrame()
b = np.empty((0, 0))
with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"):
DataFrame([a])
with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"):
DataFrame([b])
a = DataFrame({"A": [1, 2]})
with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"):
DataFrame([a, a])
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == "int":
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d) for d in dtypes]
elif typ == "float":
dtypes = MIXED_FLOAT_DTYPES
arrays = [
np.array(np.random.randint(10, size=10), dtype=d) for d in dtypes
]
for d, a in zip(dtypes, arrays):
assert a.dtype == d
if ad is None:
ad = {}
ad.update({d: a for d, a in zip(dtypes, arrays)})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert df.dtypes[d] == d
# mixed floating and integer coexist in the same frame
df = _make_mixed_dtypes_df("float")
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df("float", {"A": 1, "B": "foo", "C": "bar"})
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df("int")
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({"a": a, "b": b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({"A": ["x", None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({"A": ["x", np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self, float_frame):
rec = float_frame.to_records(index=False)
rec.dtype.names = list(rec.dtype.names)[::-1]
index = float_frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
# case with columns != the ones we would infer from the data
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=["C", "B"])
expected = DataFrame(rec, index=rng).reindex(columns=["C", "B"])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64)
result = DataFrame({"a": values})
assert result["a"].dtype == np.uint64
# see gh-2355
data_scores = [
(6311132704823138710, 273),
(2685045978526272070, 23),
(8921811264899370420, 45),
(17019687244989530680, 270),
(9930107427299601010, 273),
]
dtype = [("uid", "u8"), ("score", "u8")]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls["uid"].dtype == np.uint64
@pytest.mark.parametrize(
"values",
[
np.array([2 ** 64], dtype=object),
np.array([2 ** 65]),
[2 ** 64 + 1],
np.array([-(2 ** 63) - 4], dtype=object),
np.array([-(2 ** 64) - 1]),
[-(2 ** 65) - 2],
],
)
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = list(range(nitems))
random.shuffle(nums)
expected = [f"A{i:d}" for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
datetime_series = tm.makeTimeSeries(nper=30)
# test expects index shifted by 5
datetime_series_short = tm.makeTimeSeries(nper=30)[5:]
frame = DataFrame({"col1": datetime_series, "col2": datetime_series_short})
# col2 is padded with NaN
assert len(datetime_series) == 30
assert len(datetime_series_short) == 25
tm.assert_series_equal(frame["col1"], datetime_series.rename("col1"))
exp = Series(
np.concatenate([[np.nan] * 5, datetime_series_short.values]),
index=datetime_series.index,
name="col2",
)
tm.assert_series_equal(exp, frame["col2"])
frame = DataFrame(
{"col1": datetime_series, "col2": datetime_series_short},
columns=["col2", "col3", "col4"],
)
assert len(frame) == len(datetime_series_short)
assert "col1" not in frame
assert isna(frame["col3"]).all()
# Corner cases
assert len(DataFrame()) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
with pytest.raises(ValueError, match=msg):
DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]})
def test_constructor_dict_length1(self):
# Length-one dict micro-optimization
frame = DataFrame({"A": {"1": 1, "2": 2}})
tm.assert_index_equal(frame.index, Index(["1", "2"]))
def test_constructor_dict_with_index(self):
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
def test_constructor_dict_with_index_and_columns(self):
# empty dict with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
def test_constructor_dict_of_empty_lists(self):
# with dict of empty list and Series
frame = DataFrame({"A": [], "B": []}, columns=["A", "B"])
tm.assert_index_equal(frame.index, RangeIndex(0), exact=True)
def test_constructor_dict_with_none(self):
# GH 14381
# Dict with None value
frame_none = DataFrame({"a": None}, index=[0])
frame_none_list = DataFrame({"a": [None]}, index=[0])
assert frame_none._get_value(0, "a") is None
assert frame_none_list._get_value(0, "a") is None
tm.assert_frame_equal(frame_none, frame_none_list)
def test_constructor_dict_errors(self):
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({"a": 0.7}, columns=["a"])
@pytest.mark.parametrize("scalar", [2, np.nan, None, "D"])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({"a": scalar}, columns=["b"])
expected = DataFrame(columns=["b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ["a", value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values("a", axis=1)
expected = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols
)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values("a", axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float("nan")])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([("a", value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values((11, 21)).sort_values(("a", value), axis=1)
expected = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols
)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(("a", value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
def test_constructor_dict_order_insertion(self):
datetime_series = tm.makeTimeSeries(nper=30)
datetime_series_short = tm.makeTimeSeries(nper=25)
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {"b": datetime_series_short, "a": datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list("ba"))
tm.assert_frame_equal(frame, expected)
def test_constructor_dict_nan_key_and_columns(self):
# GH 16894
result = DataFrame({np.nan: [1, 2], 2: [2, 3]}, columns=[np.nan, 2])
expected = DataFrame([[1, 2], [2, 3]], columns=[np.nan, 2])
tm.assert_frame_equal(result, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert isna(df).values.ravel().all()
def test_constructor_2d_index(self):
# GH 25416
# handling of 2d index in construction
df = DataFrame([[1]], columns=[[1]], index=[1, 2])
expected = DataFrame(
[1, 1],
index=Int64Index([1, 2], dtype="int64"),
columns=MultiIndex(levels=[[1]], codes=[[0]]),
)
tm.assert_frame_equal(df, expected)
df = DataFrame([[1]], columns=[[1]], index=[[1, 2]])
expected = DataFrame(
[1, 1],
index=MultiIndex(levels=[[1, 2]], codes=[[0, 1]]),
columns=MultiIndex(levels=[[1]], codes=[[0]]),
)
tm.assert_frame_equal(df, expected)
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list("abc"))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(
np.arange(12).reshape((4, 3)),
columns=["foo", "bar", "baz"],
index=date_range("2000-01-01", periods=3),
)
arr = np.array([[4, 5, 6]])
msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)"
with pytest.raises(ValueError, match=msg):
DataFrame(index=[0], columns=range(0, 4), data=arr)
arr = np.array([4, 5, 6])
msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)"
with pytest.raises(ValueError, match=msg):
DataFrame(index=[0], columns=range(0, 4), data=arr)
# higher dim raise exception
with pytest.raises(ValueError, match="Must pass 2-d input"):
DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1])
# wrong size axis labels
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B", "C"], index=[1])
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B"], index=[1, 2])
# gh-26429
msg = "2 columns passed, passed data had 10 columns"
with pytest.raises(ValueError, match=msg):
DataFrame((range(10), range(10, 20)), columns=("ones", "twos"))
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": False, "b": True})
def test_constructor_subclass_dict(self, dict_subclass):
# Test for passing dict subclass to constructor
data = {
"col1": dict_subclass((x, 10.0 * x) for x in range(10)),
"col2": dict_subclass((x, 20.0 * x) for x in range(10)),
}
df = DataFrame(data)
refdf = DataFrame({col: dict(val.items()) for col, val in data.items()})
tm.assert_frame_equal(refdf, df)
data = dict_subclass(data.items())
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
def test_constructor_defaultdict(self, float_frame):
# try with defaultdict
from collections import defaultdict
data = {}
float_frame["B"][:10] = np.nan
for k, v in float_frame.items():
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
expected = frame.reindex(index=float_frame.index)
tm.assert_frame_equal(float_frame, expected)
def test_constructor_dict_block(self):
expected = np.array([[4.0, 3.0, 2.0, 1.0]])
df = DataFrame(
{"d": [4.0], "c": [3.0], "b": [2.0], "a": [1.0]},
columns=["d", "c", "b", "a"],
)
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame["B"].dtype == np.float64
assert frame["A"].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame["B"].dtype == np.object_
assert frame["A"].dtype == np.float64
def test_constructor_dict_cast2(self):
# can't cast to float
test_data = {
"A": dict(zip(range(20), tm.makeStringIndex(20))),
"B": dict(zip(range(15), np.random.randn(15))),
}
msg = "either all columns will be cast to that dtype, or a TypeError will"
with tm.assert_produces_warning(FutureWarning, match=msg):
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame["A"].dtype == np.object_
assert frame["B"].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {"Col1": {"Row1": "A String", "Row2": np.nan}}
df = DataFrame(d)
assert isinstance(df["Col1"]["Row2"], float)
def test_constructor_dict_dont_upcast2(self):
dm = DataFrame([[1, 2], ["a", "b"]], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {"a": (1, 2, 3), "b": (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in data.items()})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_of_ranges(self):
# GH 26356
data = {"a": range(3), "b": range(3, 6)}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_of_iterators(self):
# GH 26349
data = {"a": iter(range(3)), "b": reversed(range(3))}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_of_generators(self):
# GH 26349
data = {"a": (i for i in (range(3))), "b": (i for i in reversed(range(3)))}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_multiindex(self):
d = {
("a", "a"): {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2},
("b", "a"): {("i", "i"): 6, ("i", "j"): 5, ("j", "i"): 4},
("b", "c"): {("i", "i"): 7, ("i", "j"): 8, ("j", "i"): 9},
}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d])
).T
expected.index = MultiIndex.from_tuples(expected.index)
tm.assert_frame_equal(
df,
expected,
)
d["z"] = {"y": 123.0, ("i", "i"): 111, ("i", "j"): 111, ("j", "i"): 111}
_d.insert(0, ("z", d["z"]))
expected = DataFrame(
[x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)
).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
tm.assert_frame_equal(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
def create_data(constructor):
return {i: {constructor(s): 2 * i} for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = DataFrame(
[
{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6},
],
index=[Timestamp(dt) for dt in dates_as_str],
)
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i} for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, "D"))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, "D"))
expected = DataFrame(
[
{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6},
],
index=[Timedelta(td, "D") for td in td_as_int],
)
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period_dict(self):
# PeriodIndex
a = pd.PeriodIndex(["2012-01", "NaT", "2012-04"], freq="M")
b = pd.PeriodIndex(["2012-02-01", "2012-03-01", "NaT"], freq="D")
df = DataFrame({"a": a, "b": b})
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
# list of periods
df = DataFrame({"a": a.astype(object).tolist(), "b": b.astype(object).tolist()})
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype):
ea_scalar, ea_dtype = ea_scalar_and_dtype
df = DataFrame({"a": ea_scalar}, index=[0])
assert df["a"].dtype == ea_dtype
expected = DataFrame(index=[0], columns=["a"], data=ea_scalar)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_extension_scalar_data(self, data, dtype):
# GH 34832
df = DataFrame(index=[0, 1], columns=["a", "b"], data=data)
assert df["a"].dtype == dtype
assert df["b"].dtype == dtype
arr = pd.array([data] * 2, dtype=dtype)
expected = DataFrame({"a": arr, "b": arr})
tm.assert_frame_equal(df, expected)
def test_nested_dict_frame_constructor(self):
rng = pd.period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df._get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df._get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=["A"], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
warn = None if empty is np.ones else FutureWarning
with tm.assert_produces_warning(warn):
frame = DataFrame(
mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64
)
if empty is np.ones:
# passing dtype casts
assert frame.values.dtype == np.int64
else:
# i.e. ma.masked_all
# Since we have NaNs, refuse to cast to int dtype, which would take NaN
# to meaningless integers. This matches Series behavior. GH#26919
assert frame.isna().all().all()
assert frame.values.dtype == np.float64
assert isna(frame.values).all()
# wrong size axis labels
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=["A", "B", "C"], index=[1])
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=["A", "B"], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match="Must pass 2-d input"):
DataFrame(empty((3, 3, 3)), columns=["A", "B", "C"], index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, Index(range(2)), exact=True)
tm.assert_index_equal(frame.columns, Index(range(3)), exact=True)
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, Index(range(3)), exact=True)
frame = DataFrame(mat, columns=["A", "B", "C"])
tm.assert_index_equal(frame.index, Index(range(2)), exact=True)
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(["foo", "bar"], index=[0, 1], columns=["A"])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert 1.0 == frame["A"][1]
assert 2.0 == frame["C"][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert 1 == frame["A"][1]
assert 2 == frame["C"][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype="M8[ns]")
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
msg = r"datetime64\[ns\] values and dtype=int64"
with tm.assert_produces_warning(FutureWarning, match=msg):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="elementwise comparison failed",
)
frame = DataFrame(
mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64
)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert 1 == frame["A"].view("i8")[1]
assert 2 == frame["C"].view("i8")[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert frame["A"][1] is True
assert frame["C"][2] is False
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask()
result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2])
expected = DataFrame(
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]},
columns=["A", "B"],
index=[1, 2],
dtype=float,
)
tm.assert_frame_equal(result, expected)
# Check case where mask is hard but no data are masked
mat_hard = ma.ones((2, 2), dtype=float).harden_mask()
result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2])
expected = DataFrame(
{"A": [1.0, 1.0], "B": [1.0, 1.0]},
columns=["A", "B"],
index=[1, 2],
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_constructor_maskedrecarray_dtype(self):
# Ensure constructor honors dtype
data = np.ma.array(
np.ma.zeros(5, dtype=[("date", "<f8"), ("price", "<f8")]), mask=[False] * 5
)
data = data.view(mrecords.mrecarray)
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(data, dtype=int)
expected = DataFrame(np.zeros((5, 2), dtype=int), columns=["date", "price"])
tm.assert_frame_equal(result, expected)
# GH#40363 check that the alternative suggested in the deprecation
# warning behaves as expected
alt = DataFrame({name: data[name] for name in data.dtype.names}, dtype=int)
tm.assert_frame_equal(result, alt)
@pytest.mark.slow
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(
tm.assert_frame_equal, check_index_type=True, check_column_type=True
)
arrays = [
("float", np.array([1.5, 2.0])),
("int", np.array([1, 2])),
("str", np.array(["abc", "def"])),
]
for name, arr in arrays[:]:
arrays.append(
("masked1_" + name, np.ma.masked_array(arr, mask=[False, True]))
)
arrays.append(("masked_all", np.ma.masked_all((2,))))
arrays.append(("masked_none", np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, "filled") else v) for k, v in comb}
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(mrecs)
expected = DataFrame(comb, columns=names)
assert_fr_equal(result, expected)
# specify columns
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(mrecs, columns=names[::-1])
expected = DataFrame(comb, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
with tm.assert_produces_warning(FutureWarning):
# Support for MaskedRecords deprecated
result = DataFrame(mrecs, index=[1, 2])
expected = DataFrame(comb, columns=names, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize(
"data, index, columns, dtype, expected",
[
(None, list(range(10)), ["a", "b"], object, np.object_),
(None, None, ["a", "b"], "int64", np.dtype("int64")),
(None, list(range(10)), ["a", "b"], int, np.dtype("float64")),
({}, None, ["foo", "bar"], None, np.object_),
({"b": 1}, list(range(10)), list("abc"), int, np.dtype("float64")),
],
)
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
@pytest.mark.parametrize(
"data,input_dtype,expected_dtype",
(
([True, False, None], "boolean", pd.BooleanDtype),
([1.0, 2.0, None], "Float64", pd.Float64Dtype),
([1, 2, None], "Int64", pd.Int64Dtype),
(["a", "b", "c"], "string", pd.StringDtype),
),
)
def test_constructor_dtype_nullable_extension_arrays(
self, data, input_dtype, expected_dtype
):
df = DataFrame({"a": data}, dtype=input_dtype)
assert df["a"].dtype == expected_dtype()
def test_constructor_scalar_inference(self):
data = {"int": 1, "bool": True, "float": 3.0, "complex": 4j, "object": "foo"}
df = DataFrame(data, index=np.arange(10))
assert df["int"].dtype == np.int64
assert df["bool"].dtype == np.bool_
assert df["float"].dtype == np.float64
assert df["complex"].dtype == np.complex128
assert df["object"].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({"a": np.random.randn(10), "b": True})
exp = DataFrame({"a": df["a"].values, "b": [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match="must pass an index"):
DataFrame({"a": False, "b": True})
def test_constructor_DataFrame(self, float_frame):
df = DataFrame(float_frame)
tm.assert_frame_equal(df, float_frame)
df_casted = DataFrame(float_frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_empty_dataframe(self):
# GH 20624
actual = DataFrame(DataFrame(), dtype="object")
expected = DataFrame([], dtype="object")
tm.assert_frame_equal(actual, expected)
def test_constructor_more(self, float_frame):
# used to be in test_matrix.py
arr = np.random.randn(10)
dm = DataFrame(arr, columns=["A"], index=np.arange(10))
assert dm.values.ndim == 2
arr = np.random.randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=["A", "B"], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=["A", "B"])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(["foo", "bar"], dtype=object).reshape(2, 1)
msg = "could not convert string to float: 'foo'"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(float_frame._series))
tm.assert_frame_equal(dm, float_frame)
# int cast
dm = DataFrame(
{"A": np.ones(10, dtype=int), "B": np.ones(10, dtype=np.float64)},
index=np.arange(10),
)
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=["A", "B"])
expected = DataFrame({}, columns=["A", "B"])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=["A", "B"])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, "a"], [2, "b"]], columns=["num", "str"])
assert is_integer_dtype(df["num"])
assert df["str"].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_nested_pandasarray_matches_nested_ndarray(self):
# GH#43986
ser = Series([1, 2])
arr = np.array([None, None], dtype=object)
arr[0] = ser
arr[1] = ser * 2
df = DataFrame(arr)
expected = DataFrame(pd.array(arr))
tm.assert_frame_equal(df, expected)
assert df.shape == (2, 1)
tm.assert_numpy_array_equal(df[0].values, arr)
def test_constructor_list_like_data_nested_list_column(self):
# GH 32173
arrays = [list("abcd"), list("cdef")]
result = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
mi = MultiIndex.from_arrays(arrays)
expected = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=mi)
tm.assert_frame_equal(result, expected)
def test_constructor_wrong_length_nested_list_column(self):
# GH 32173
arrays = [list("abc"), list("cde")]
msg = "3 columns passed, passed data had 4"
with pytest.raises(ValueError, match=msg):
DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
def test_constructor_unequal_length_nested_list_column(self):
# GH 32173
arrays = [list("abcd"), list("cde")]
# exception raised inside MultiIndex constructor
msg = "all arrays must be same length"
with pytest.raises(ValueError, match=msg):
DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
@pytest.mark.parametrize(
"data",
[
[[Timestamp("2021-01-01")]],
[{"x": Timestamp("2021-01-01")}],
{"x": [Timestamp("2021-01-01")]},
{"x": Timestamp("2021-01-01")},
],
)
def test_constructor_one_element_data_list(self, data):
# GH#42810
result = DataFrame(data, index=[0, 1, 2], columns=["x"])
expected = DataFrame({"x": [Timestamp("2021-01-01")] * 3})
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Sequence like
class DummyContainer(abc.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, "a"]), DummyContainer([2, "b"])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, "a"], [2, "b"]], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_stdlib_array(self):
# GH 4297
# support Array
import array
result = DataFrame({"A": array.array("i", range(10))})
expected = DataFrame({"A": list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array("i", range(10)), array.array("i", range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_range(self):
# GH26342
result = DataFrame(range(10))
expected = DataFrame(list(range(10)))
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_ranges(self):
result = DataFrame([range(10), range(10)])
expected = DataFrame([list(range(10)), list(range(10))])
tm.assert_frame_equal(result, expected)
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
result = DataFrame(iter(range(10)))
expected = DataFrame(list(range(10)))
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_iterators(self):
result = DataFrame([iter(range(10)), iter(range(10))])
expected = DataFrame([list(range(10)), list(range(10))])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, "a"] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: "a"})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dict_type", [dict, OrderedDict])
def test_constructor_ordered_dict_preserve_order(self, dict_type):
# see gh-13304
expected = DataFrame([[2, 1]], columns=["b", "a"])
data = dict_type()
data["b"] = [2]
data["a"] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = dict_type()
data["b"] = 2
data["a"] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dict_type", [dict, OrderedDict])
def test_constructor_ordered_dict_conflicting_orders(self, dict_type):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = dict_type()
row_one["b"] = 2
row_one["a"] = 1
row_two = dict_type()
row_two["a"] = 1
row_two["b"] = 2
row_three = {"b": 2, "a": 1}
expected = DataFrame([[2, 1], [2, 1]], columns=["b", "a"])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=["b", "a"])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3)]
result = DataFrame(series)
expected = DataFrame(
{"b": [0, 1, 2], "a": [0, 1, 2], "c": [0, 1, 2]},
columns=["b", "a", "c"],
index=["0", "1", "2"],
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {"a": 1.5, "b": 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {"A": np.random.randn(10), "B": np.random.randn(8)}
with pytest.raises(ValueError, match="All arrays must be of the same length"):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(range(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self, float_frame):
df = DataFrame(float_frame["A"], index=float_frame.index, columns=["A"])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data["A"] = {"foo": 1, "bar": 2, "baz": 3}
data["B"] = Series([4, 3, 2, 1], index=["bar", "qux", "baz", "foo"])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match="ambiguous ordering"):
DataFrame({"A": ["a", "b"], "B": {"a": "a", "b": "b"}})
# this is OK though
result = DataFrame({"A": ["a", "b"], "B": Series(["a", "b"], index=["a", "b"])})
expected = DataFrame({"A": ["a", "b"], "B": ["a", "b"]}, index=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_constructor_mixed_type_rows(self):
# Issue 25075
data = [[1, 2], (3, 4)]
result = DataFrame(data)
expected = DataFrame([[1, 2], [3, 4]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"tuples,lists",
[
((), []),
((()), []),
(((), ()), [(), ()]),
(((), ()), [[], []]),
(([], []), [[], []]),
(([1], [2]), [[1], [2]]), # GH 32776
(([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]),
],
)
def test_constructor_tuple(self, tuples, lists):
# GH 25691
result = DataFrame(tuples)
expected = DataFrame(lists)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_tuples(self):
result = DataFrame({"A": [(1, 2), (3, 4)]})
expected = DataFrame({"A": Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list("ab"))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({"y": [1, 2], "z": [3, 4]})
result = DataFrame(tuples, columns=["y", "z"])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dataclasses(self):
# GH21910
from dataclasses import make_dataclass
Point = make_dataclass("Point", [("x", int), ("y", int)])
data = [Point(0, 3), Point(1, 3)]
expected = DataFrame({"x": [0, 1], "y": [3, 3]})
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dataclasses_with_varying_types(self):
# GH21910
from dataclasses import make_dataclass
# varying types
Point = make_dataclass("Point", [("x", int), ("y", int)])
HLine = make_dataclass("HLine", [("x0", int), ("x1", int), ("y", int)])
data = [Point(0, 3), HLine(1, 3, 3)]
expected = DataFrame(
{"x": [0, np.nan], "y": [3, 3], "x0": [np.nan, 1], "x1": [np.nan, 3]}
)
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dataclasses_error_thrown(self):
# GH21910
from dataclasses import make_dataclass
Point = make_dataclass("Point", [("x", int), ("y", int)])
# expect TypeError
msg = "asdict() should be called on dataclass instances"
with pytest.raises(TypeError, match=re.escape(msg)):
DataFrame([Point(0, 0), {"x": 1, "y": 0}])
def test_constructor_list_of_dict_order(self):
# GH10056
data = [
{"First": 1, "Second": 4, "Third": 7, "Fourth": 10},
{"Second": 5, "First": 2, "Fourth": 11, "Third": 8},
{"Second": 6, "First": 3, "Fourth": 12, "Third": 9, "YYY": 14, "XXX": 13},
]
expected = DataFrame(
{
"First": [1, 2, 3],
"Second": [4, 5, 6],
"Third": [7, 8, 9],
"Fourth": [10, 11, 12],
"YYY": [None, None, 14],
"XXX": [None, None, 13],
}
)
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=["a", "b", "c"], name="x")
df = DataFrame(a)
assert df.columns[0] == "x"
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name="x")
df = DataFrame(s)
expected = DataFrame({"x": s})
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(s, columns=[1, 2])
# #2234
a = Series([], name="x", dtype=object)
df = DataFrame(a)
assert df.columns[0] == "x"
# series with name and w/o
s1 = Series(arr, name="x")
df = DataFrame([s1, arr]).T
expected = DataFrame({"x": s1, "Unnamed 0": arr}, columns=["x", "Unnamed 0"])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(DataFrame(s0, columns=[0]), s0.to_frame())
tm.assert_frame_equal(DataFrame(s1, columns=[1]), s1.to_frame())
# non-matching produces empty frame
assert DataFrame(s0, columns=[1]).empty
assert DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=["a", "b", "c"], name="x")
# no name
s2 = Series([1, 2, 3], index=["a", "b", "c"])
other_index = Index(["a", "b"])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == "x"
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
series = {
c: Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"])
}
result = DataFrame(series)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_constructor_manager_resize(self, float_frame):
index = list(float_frame.index[:5])
columns = list(float_frame.columns[:3])
result = DataFrame(float_frame._mgr, index=index, columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_mix_series_nonseries(self, float_frame):
df = DataFrame(
{"A": float_frame["A"], "B": list(float_frame["B"])}, columns=["A", "B"]
)
tm.assert_frame_equal(df, float_frame.loc[:, ["A", "B"]])
msg = "does not match index length"
with pytest.raises(ValueError, match=msg):
DataFrame({"A": float_frame["A"], "B": list(float_frame["B"])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
msg = "float-dtype values containing NaN and an integer dtype"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=["a", "a"])
edf = DataFrame([[8, 5]])
edf.columns = ["a", "a"]
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)], columns=["a", "a"])
tm.assert_frame_equal(idf, edf)
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5")
tm.assert_frame_equal(df, expected)
def test_constructor_empty_with_string_extension(self, nullable_string_dtype):
# GH 34915
expected = DataFrame(index=[], columns=["c1"], dtype=nullable_string_dtype)
df = DataFrame(columns=["c1"], dtype=nullable_string_dtype)
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0.0, index=[1, 2, 3], columns=["a", "b", "c"])
tm.assert_frame_equal(
df, DataFrame(np.zeros(df.shape).astype("float64"), df.index, df.columns)
)
df = DataFrame(0, index=[1, 2, 3], columns=["a", "b", "c"])
tm.assert_frame_equal(
df, DataFrame(np.zeros(df.shape).astype("int64"), df.index, df.columns)
)
df = DataFrame("a", index=[1, 2], columns=["a", "c"])
tm.assert_frame_equal(
df,
DataFrame(
np.array([["a", "a"], ["a", "a"]], dtype=object),
index=[1, 2],
columns=["a", "c"],
),
)
msg = "DataFrame constructor not properly called!"
with pytest.raises(ValueError, match=msg):
DataFrame("a", [1, 2])
with pytest.raises(ValueError, match=msg):
DataFrame("a", columns=["a", "c"])
msg = "incompatible data and dtype"
with pytest.raises(TypeError, match=msg):
DataFrame("a", [1, 2], ["a", "c"], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame(
{
"A": 1,
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime(2001, 1, 2, 0, 0),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("int64")]
+ [np.dtype(objectname)] * 2
+ [np.dtype(datetime64name)] * 2,
index=list("ABCDE"),
)
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
floatname: np.array(1.0, dtype=floatname),
intname: np.array(1, dtype=intname),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("object")]
+ [np.dtype("float64")]
+ [np.dtype(intname)],
index=["a", "b", "c", floatname, intname],
)
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
floatname: np.array([1.0] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("object")]
+ [np.dtype("float64")]
+ [np.dtype(intname)],
index=["a", "b", "c", floatname, intname],
)
tm.assert_series_equal(result, expected)
def test_constructor_with_datetimes1(self):
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == "M8[ns]"
def test_constructor_with_datetimes2(self):
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame(datetimes, columns=["datetimes"])
df["dates"] = dates
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("object")],
index=["datetimes", "dates"],
)
tm.assert_series_equal(result, expected)
def test_constructor_with_datetimes3(self):
# GH 7594
# don't coerce tz-aware
tz = pytz.timezone("US/Eastern")
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({"End Date": dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(
df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"})
)
df = DataFrame([{"End Date": dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(
df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"})
)
def test_constructor_with_datetimes4(self):
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
df = DataFrame({"value": dr})
assert df.iat[0, 0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
df = DataFrame({"value": dr})
assert str(df.iat[0, 0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
df = DataFrame({"value": dr})
assert str(df.iat[0, 0].tz) == "US/Eastern"
def test_constructor_with_datetimes5(self):
# GH 7822
# preserver an index with a tz on dict construction
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
expected = DataFrame({"a": i.to_series().reset_index(drop=True)})
df = DataFrame()
df["a"] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": i})
tm.assert_frame_equal(df, expected)
def test_constructor_with_datetimes6(self):
# multiples
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
i_no_tz = date_range("1/1/2011", periods=5, freq="10s")
df = DataFrame({"a": i, "b": i_no_tz})
expected = DataFrame({"a": i.to_series().reset_index(drop=True), "b": i_no_tz})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"arr",
[
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
[[np.datetime64("NaT")], [None]],
[[np.datetime64("NaT")], [pd.NaT]],
[[None], [np.datetime64("NaT")]],
[[None], [pd.NaT]],
[[pd.NaT], [np.datetime64("NaT")]],
[[pd.NaT], [None]],
],
)
def test_constructor_datetimes_with_nulls(self, arr):
# gh-15869, GH#11220
result = DataFrame(arr).dtypes
expected = Series([np.dtype("datetime64[ns]")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("order", ["K", "A", "C", "F"])
@pytest.mark.parametrize(
"dtype",
[
"datetime64[M]",
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_constructor_datetimes_non_ns(self, order, dtype):
na = np.array(
[
["2015-01-01", "2015-01-02", "2015-01-03"],
["2017-01-01", "2017-01-02", "2017-02-03"],
],
dtype=dtype,
order=order,
)
df = DataFrame(na)
expected = DataFrame(
[
["2015-01-01", "2015-01-02", "2015-01-03"],
["2017-01-01", "2017-01-02", "2017-02-03"],
]
)
expected = expected.astype(dtype=dtype)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("order", ["K", "A", "C", "F"])
@pytest.mark.parametrize(
"dtype",
[
"timedelta64[D]",
"timedelta64[h]",
"timedelta64[m]",
"timedelta64[s]",
"timedelta64[ms]",
"timedelta64[us]",
"timedelta64[ns]",
],
)
def test_constructor_timedelta_non_ns(self, order, dtype):
na = np.array(
[
[np.timedelta64(1, "D"), np.timedelta64(2, "D")],
[np.timedelta64(4, "D"), np.timedelta64(5, "D")],
],
dtype=dtype,
order=order,
)
df = DataFrame(na).astype("timedelta64[ns]")
expected = DataFrame(
[
[Timedelta(1, "D"), Timedelta(2, "D")],
[Timedelta(4, "D"), Timedelta(5, "D")],
],
)
tm.assert_frame_equal(df, expected)
def test_constructor_for_list_with_dtypes(self):
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.dtypes
expected = Series([np.dtype("int")] * 5)
tm.assert_series_equal(result, expected)
df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)])
result = df.dtypes
expected = Series([np.dtype("int32")] * 5)
tm.assert_series_equal(result, expected)
# overflow issue? (we always expected int64 upcasting here)
df = DataFrame({"a": [2 ** 31, 2 ** 31 + 1]})
assert df.dtypes.iloc[0] == np.dtype("int64")
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame([1.0, 2.0])
assert df.dtypes.iloc[0] == np.dtype("float64")
df = DataFrame({"a": [1, 2]})
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame({"a": [1.0, 2.0]})
assert df.dtypes.iloc[0] == np.dtype("float64")
df = DataFrame({"a": 1}, index=range(3))
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame({"a": 1.0}, index=range(3))
assert df.dtypes.iloc[0] == np.dtype("float64")
# with object list
df = DataFrame(
{
"a": [1, 2, 4, 7],
"b": [1.2, 2.3, 5.1, 6.3],
"c": list("abcd"),
"d": [datetime(2000, 1, 1) for i in range(4)],
"e": [1.0, 2, 4.0, 7],
}
)
result = df.dtypes
expected = Series(
[
np.dtype("int64"),
np.dtype("float64"),
np.dtype("object"),
np.dtype("datetime64[ns]"),
np.dtype("float64"),
],
index=list("abcde"),
)
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self, float_frame):
cop = DataFrame(float_frame, copy=True)
cop["A"] = 5
assert (cop["A"] == 5).all()
assert not (float_frame["A"] == 5).all()
def test_constructor_ndarray_copy(self, float_frame, using_array_manager):
if not using_array_manager:
df = DataFrame(float_frame.values)
float_frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(float_frame.values, copy=True)
float_frame.values[6] = 6
assert not (df.values[6] == 6).all()
else:
arr = float_frame.values.copy()
# default: copy to ensure contiguous arrays
df = DataFrame(arr)
assert df._mgr.arrays[0].flags.c_contiguous
arr[0, 0] = 100
assert df.iloc[0, 0] != 100
# manually specify copy=False
df = DataFrame(arr, copy=False)
assert not df._mgr.arrays[0].flags.c_contiguous
arr[0, 0] = 1000
assert df.iloc[0, 0] == 1000
# TODO(ArrayManager) keep view on Series?
@td.skip_array_manager_not_yet_implemented
def test_constructor_series_copy(self, float_frame):
series = float_frame._series
df = DataFrame({"A": series["A"]}, copy=True)
df["A"][:] = 5
assert not (series["A"] == 5).all()
@pytest.mark.parametrize(
"df",
[
DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan]),
DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan]),
DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]),
DataFrame(
[[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]
),
DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2]),
],
)
def test_constructor_with_nas(self, df):
# GH 5016
# na's in indices
# GH 21428 (non-unique columns)
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
with pytest.raises(KeyError, match="^nan$"):
df.loc[:, np.nan]
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan])
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({"a": [np.nan, False]})
assert d["a"].dtype == np.object_
assert not d["a"][1]
def test_constructor_ndarray_categorical_dtype(self):
cat = Categorical(["A", "B", "C"])
arr = np.array(cat).reshape(-1, 1)
arr = np.broadcast_to(arr, (3, 4))
result = DataFrame(arr, dtype=cat.dtype)
expected = DataFrame({0: cat, 1: cat, 2: cat, 3: cat})
tm.assert_frame_equal(result, expected)
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({"A": list("abc")}, dtype="category")
expected = Series(list("abc"), dtype="category", name="A")
tm.assert_series_equal(df["A"], expected)
# to_frame
s = Series(list("abc"), dtype="category")
result = s.to_frame()
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name="foo")
expected = Series(list("abc"), dtype="category", name="foo")
tm.assert_series_equal(result["foo"], expected)
# list-like creation
df = DataFrame(list("abc"), dtype="category")
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(df[0], expected)
def test_construct_from_1item_list_of_categorical(self):
# ndim != 1
msg = "will be changed to match the behavior"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([Categorical(list("abc"))])
expected = DataFrame({0: Series(list("abc"), dtype="category")})
tm.assert_frame_equal(df, expected)
def test_construct_from_list_of_categoricals(self):
msg = "will be changed to match the behavior"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([Categorical(list("abc")), Categorical(list("abd"))])
expected = DataFrame(
{
0: Series(list("abc"), dtype="category"),
1: Series(list("abd"), dtype="category"),
},
columns=[0, 1],
)
tm.assert_frame_equal(df, expected)
def test_from_nested_listlike_mixed_types(self):
# mixed
msg = "will be changed to match the behavior"
with tm.assert_produces_warning(FutureWarning, match=msg):
df = DataFrame([Categorical(list("abc")), list("def")])
expected = DataFrame(
{0: Series(list("abc"), dtype="category"), 1: list("def")}, columns=[0, 1]
)
tm.assert_frame_equal(df, expected)
def test_construct_from_listlikes_mismatched_lengths(self):
# invalid (shape)
msg = "|".join(
[
r"Length of values \(6\) does not match length of index \(3\)",
]
)
msg2 = "will be changed to match the behavior"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match=msg2):
DataFrame([Categorical(list("abc")), Categorical(list("abdefg"))])
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype("category")
res = Series(items, dtype="category")
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype("category")
res = Series(items, dtype="category")
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range("20000101", periods=3)
expected = Series(
Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"])
)
expected.index = index
expected = DataFrame({"x": expected})
df = DataFrame({"x": Series(["a", "b", "c"], dtype="category")}, index=index)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"dtype",
tm.ALL_INT_NUMPY_DTYPES
+ tm.ALL_INT_EA_DTYPES
+ tm.FLOAT_NUMPY_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES,
)
def test_check_dtype_empty_numeric_column(self, dtype):
# GH24386: Ensure dtypes are set correctly for an empty DataFrame.
# Empty DataFrame is generated via dictionary data with non-overlapping columns.
data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype)
assert data.b.dtype == dtype
@pytest.mark.parametrize(
"dtype", tm.STRING_DTYPES + tm.BYTES_DTYPES + tm.OBJECT_DTYPES
)
def test_check_dtype_empty_string_column(self, request, dtype, using_array_manager):
# GH24386: Ensure dtypes are set correctly for an empty DataFrame.
# Empty DataFrame is generated via dictionary data with non-overlapping columns.
data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype)
if using_array_manager and dtype in tm.BYTES_DTYPES:
# TODO(ArrayManager) astype to bytes dtypes does not yet give object dtype
td.mark_array_manager_not_yet_implemented(request)
assert data.b.dtype.name == "object"
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0, dtype=object).to_frame().dtypes
expected = Series({0: object})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0, dtype=object)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.arm_slow
@pytest.mark.parametrize("dtype", [None, "uint8", "category"])
def test_constructor_range_dtype(self, dtype):
expected = DataFrame({"A": [0, 1, 2, 3, 4]}, dtype=dtype or "int64")
# GH 26342
result = DataFrame(range(5), columns=["A"], dtype=dtype)
tm.assert_frame_equal(result, expected)
# GH 16804
result = DataFrame({"A": range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"extension_arr",
[
Categorical(list("aabbc")),
SparseArray([1, np.nan, np.nan, np.nan]),
IntervalArray([Interval(0, 1), Interval(1, 5)]),
PeriodArray(pd.period_range(start="1/1/2017", end="1/1/2018", freq="M")),
],
)
def test_constructor_with_extension_array(self, extension_arr):
# GH11363
expected = DataFrame(Series(extension_arr))
result = DataFrame(extension_arr)
tm.assert_frame_equal(result, expected)
def test_datetime_date_tuple_columns_from_dict(self):
# GH 10863
v = date.today()
tup = v, v
result = DataFrame({tup: Series(range(3), index=range(3))}, columns=[tup])
expected = DataFrame([0, 1, 2], columns=Index(Series([tup])))
tm.assert_frame_equal(result, expected)
def test_construct_with_two_categoricalindex_series(self):
# GH 14600
s1 = Series([39, 6, 4], index=CategoricalIndex(["female", "male", "unknown"]))
s2 = Series(
[2, 152, 2, 242, 150],
index=CategoricalIndex(["f", "female", "m", "male", "unknown"]),
)
result = DataFrame([s1, s2])
expected = DataFrame(
np.array([[39, 6, 4, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]]),
columns=["female", "male", "unknown", "f", "m"],
)
tm.assert_frame_equal(result, expected)
def test_constructor_series_nonexact_categoricalindex(self):
# GH 42424
ser = Series(range(0, 100))
ser1 = cut(ser, 10).value_counts().head(5)
ser2 = cut(ser, 10).value_counts().tail(5)
result = DataFrame({"1": ser1, "2": ser2})
index = CategoricalIndex(
[
Interval(-0.099, 9.9, closed="right"),
Interval(9.9, 19.8, closed="right"),
Interval(19.8, 29.7, closed="right"),
Interval(29.7, 39.6, closed="right"),
Interval(39.6, 49.5, closed="right"),
Interval(49.5, 59.4, closed="right"),
Interval(59.4, 69.3, closed="right"),
Interval(69.3, 79.2, closed="right"),
Interval(79.2, 89.1, closed="right"),
Interval(89.1, 99, closed="right"),
],
ordered=True,
)
expected = DataFrame(
{"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index
)
tm.assert_frame_equal(expected, result)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates, dtype=[("Date", "M8[us]"), ("Forecasting", "M8[us]")])
df = DataFrame(arr)
assert df["Date"][0] == dates[0][0]
assert df["Forecasting"][0] == dates[0][1]
s = Series(arr["Date"])
assert isinstance(s[0], Timestamp)
assert s[0] == dates[0][0]
def test_from_datetime_subclass(self):
# GH21142 Verify whether Datetime subclasses are also of dtype datetime
class DatetimeSubclass(datetime):
pass
data = DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]})
assert data.datetime.dtype == "datetime64[ns]"
def test_with_mismatched_index_length_raises(self):
# GH#33437
dti = date_range("2016-01-01", periods=3, tz="US/Pacific")
msg = "Shape of passed values|Passed arrays should have the same length"
with pytest.raises(ValueError, match=msg):
DataFrame(dti, index=range(4))
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_dataframe_constructor_infer_multiindex(self):
index_lists = [["a", "a", "b", "b"], ["x", "y", "x", "y"]]
multi = DataFrame(
np.random.randn(4, 4),
index=[np.array(x) for x in index_lists],
)
assert isinstance(multi.index, MultiIndex)
assert not isinstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4), columns=index_lists)
assert isinstance(multi.columns, MultiIndex)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(date_range("1/1/2011", periods=2, freq="H"))),
(list(date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH#16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("copy", [False, True])
def test_dict_nocopy(
self, request, copy, any_numeric_ea_dtype, any_numpy_dtype, using_array_manager
):
if using_array_manager and not (
(any_numpy_dtype in (tm.STRING_DTYPES + tm.BYTES_DTYPES))
or (
any_numpy_dtype
in (tm.DATETIME64_DTYPES + tm.TIMEDELTA64_DTYPES + tm.BOOL_DTYPES)
and copy
)
):
# TODO(ArrayManager) properly honor copy keyword for dict input
td.mark_array_manager_not_yet_implemented(request)
a = np.array([1, 2], dtype=any_numpy_dtype)
b = np.array([3, 4], dtype=any_numpy_dtype)
if b.dtype.kind in ["S", "U"]:
# These get cast, making the checks below more cumbersome
return
c = pd.array([1, 2], dtype=any_numeric_ea_dtype)
df = DataFrame({"a": a, "b": b, "c": c}, copy=copy)
def get_base(obj):
if isinstance(obj, np.ndarray):
return obj.base
elif isinstance(obj.dtype, np.dtype):
# i.e. DatetimeArray, TimedeltaArray
return obj._ndarray.base
else:
raise TypeError
def check_views():
# written to work for either BlockManager or ArrayManager
assert sum(x is c for x in df._mgr.arrays) == 1
assert (
sum(
get_base(x) is a
for x in df._mgr.arrays
if isinstance(x.dtype, np.dtype)
)
== 1
)
assert (
sum(
get_base(x) is b
for x in df._mgr.arrays
if isinstance(x.dtype, np.dtype)
)
== 1
)
if not copy:
# constructor preserves views
check_views()
df.iloc[0, 0] = 0
df.iloc[0, 1] = 0
if not copy:
# Check that the underlying data behind df["c"] is still `c`
# after setting with iloc. Since we don't know which entry in
# df._mgr.arrays corresponds to df["c"], we just check that exactly
# one of these arrays is `c`. GH#38939
assert sum(x is c for x in df._mgr.arrays) == 1
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
# FIXME(GH#35417): until GH#35417, iloc.setitem into EA values does not preserve
# view, so we have to check in the other direction
# df.iloc[0, 2] = 0
# if not copy:
# check_views()
c[0] = 0
if copy:
if a.dtype.kind == "M":
assert a[0] == a.dtype.type(1, "ns")
assert b[0] == b.dtype.type(3, "ns")
else:
assert a[0] == a.dtype.type(1)
assert b[0] == b.dtype.type(3)
# FIXME(GH#35417): enable after GH#35417
# assert c[0] == 1
assert df.iloc[0, 2] == 1
else:
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
# FIXME(GH#35417): enable after GH#35417
# assert b[0] == 0
assert df.iloc[0, 2] == 0
def test_from_series_with_name_with_columns(self):
# GH 7893
result = DataFrame(Series(1, name="foo"), columns=["bar"])
expected = DataFrame(columns=["bar"])
tm.assert_frame_equal(result, expected)
def test_nested_list_columns(self):
# GH 14467
result = DataFrame(
[[1, 2, 3], [4, 5, 6]], columns=[["A", "A", "A"], ["a", "b", "c"]]
)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6]],
columns=MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")]),
)
tm.assert_frame_equal(result, expected)
def test_from_2d_object_array_of_periods_or_intervals(self):
# Period analogue to GH#26825
pi = pd.period_range("2016-04-05", periods=3)
data = pi._data.astype(object).reshape(1, -1)
df = DataFrame(data)
assert df.shape == (1, 3)
assert (df.dtypes == pi.dtype).all()
assert (df == pi).all().all()
ii = pd.IntervalIndex.from_breaks([3, 4, 5, 6])
data2 = ii._data.astype(object).reshape(1, -1)
df2 = DataFrame(data2)
assert df2.shape == (1, 3)
assert (df2.dtypes == ii.dtype).all()
assert (df2 == ii).all().all()
# mixed
data3 = np.r_[data, data2, data, data2].T
df3 = DataFrame(data3)
expected = DataFrame({0: pi, 1: ii, 2: pi, 3: ii})
tm.assert_frame_equal(df3, expected)
@pytest.mark.parametrize(
"col_a, col_b",
[
([[1], [2]], np.array([[1], [2]])),
(np.array([[1], [2]]), [[1], [2]]),
(np.array([[1], [2]]), np.array([[1], [2]])),
],
)
def test_error_from_2darray(self, col_a, col_b):
msg = "Per-column arrays must each be 1-dimensional"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": col_a, "b": col_b})
def test_from_dict_with_missing_copy_false(self):
# GH#45369 filled columns should not be views of one another
df = DataFrame(index=[1, 2, 3], columns=["a", "b", "c"], copy=False)
assert not np.shares_memory(df["a"]._values, df["b"]._values)
df.iloc[0, 0] = 0
expected = DataFrame(
{
"a": [0, np.nan, np.nan],
"b": [np.nan, np.nan, np.nan],
"c": [np.nan, np.nan, np.nan],
},
index=[1, 2, 3],
dtype=object,
)
tm.assert_frame_equal(df, expected)
class TestDataFrameConstructorIndexInference:
def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self):
rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M")
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M")
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({"s1": s1, "s2": s2})
exp = pd.period_range("1/1/1980", "1/1/2012", freq="M")
tm.assert_index_equal(df.index, exp)
def test_frame_from_dict_with_mixed_tzaware_indexes(self):
# GH#44091
dti = date_range("2016-01-01", periods=3)
ser1 = Series(range(3), index=dti)
ser2 = Series(range(3), index=dti.tz_localize("UTC"))
ser3 = Series(range(3), index=dti.tz_localize("US/Central"))
ser4 = Series(range(3))
# no tz-naive, but we do have mixed tzs and a non-DTI
df1 = DataFrame({"A": ser2, "B": ser3, "C": ser4})
exp_index = Index(
list(ser2.index) + list(ser3.index) + list(ser4.index), dtype=object
)
tm.assert_index_equal(df1.index, exp_index)
df2 = DataFrame({"A": ser2, "C": ser4, "B": ser3})
exp_index3 = Index(
list(ser2.index) + list(ser4.index) + list(ser3.index), dtype=object
)
tm.assert_index_equal(df2.index, exp_index3)
df3 = DataFrame({"B": ser3, "A": ser2, "C": ser4})
exp_index3 = Index(
list(ser3.index) + list(ser2.index) + list(ser4.index), dtype=object
)
tm.assert_index_equal(df3.index, exp_index3)
df4 = DataFrame({"C": ser4, "B": ser3, "A": ser2})
exp_index4 = Index(
list(ser4.index) + list(ser3.index) + list(ser2.index), dtype=object
)
tm.assert_index_equal(df4.index, exp_index4)
# TODO: not clear if these raising is desired (no extant tests),
# but this is de facto behavior 2021-12-22
msg = "Cannot join tz-naive with tz-aware DatetimeIndex"
with pytest.raises(TypeError, match=msg):
DataFrame({"A": ser2, "B": ser3, "C": ser4, "D": ser1})
with pytest.raises(TypeError, match=msg):
DataFrame({"A": ser2, "B": ser3, "D": ser1})
with pytest.raises(TypeError, match=msg):
DataFrame({"D": ser1, "A": ser2, "B": ser3})
class TestDataFrameConstructorWithDtypeCoercion:
def test_floating_values_integer_dtype(self):
# GH#40110 make DataFrame behavior with arraylike floating data and
# inty dtype match Series behavior
arr = np.random.randn(10, 5)
msg = "if they cannot be cast losslessly"
with tm.assert_produces_warning(FutureWarning, match=msg):
DataFrame(arr, dtype="i8")
with tm.assert_produces_warning(None):
# if they can be cast losslessly, no warning
DataFrame(arr.round(), dtype="i8")
# with NaNs, we go through a different path with a different warning
arr[0, 0] = np.nan
msg = "passing float-dtype values containing NaN"
with tm.assert_produces_warning(FutureWarning, match=msg):
DataFrame(arr, dtype="i8")
with tm.assert_produces_warning(FutureWarning, match=msg):
Series(arr[0], dtype="i8")
# The future (raising) behavior matches what we would get via astype:
msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
with pytest.raises(ValueError, match=msg):
DataFrame(arr).astype("i8")
with pytest.raises(ValueError, match=msg):
Series(arr[0]).astype("i8")
class TestDataFrameConstructorWithDatetimeTZ:
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_construction_preserves_tzaware_dtypes(self, tz):
# after GH#7822
# these retain the timezones on dict construction
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
dr_tz = dr.tz_localize(tz)
df = DataFrame({"A": "foo", "B": dr_tz}, index=dr)
tz_expected = DatetimeTZDtype("ns", dr_tz.tzinfo)
assert df["B"].dtype == tz_expected
# GH#2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({"dr": dr})
df["dr_tz"] = dr_tz
df["datetimes_naive"] = datetimes_naive
df["datetimes_with_tz"] = datetimes_with_tz
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype(tz=tz),
np.dtype("datetime64[ns]"),
DatetimeTZDtype(tz=tz),
],
index=["dr", "dr_tz", "datetimes_naive", "datetimes_with_tz"],
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("pydt", [True, False])
def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt):
# GH#25843, GH#41555, GH#33401
tz = tz_aware_fixture
ts = Timestamp("2019", tz=tz)
if pydt:
ts = ts.to_pydatetime()
ts_naive = Timestamp("2019")
with tm.assert_produces_warning(FutureWarning):
result = DataFrame({0: [ts]}, dtype="datetime64[ns]")
expected = DataFrame({0: [ts_naive]})
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = DataFrame({0: ts}, index=[0], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = DataFrame([ts], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = DataFrame(np.array([ts], dtype=object), dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = DataFrame(ts, index=[0], columns=[0], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
df = DataFrame([Series([ts])], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
df = DataFrame([[ts]], columns=[0], dtype="datetime64[ns]")
tm.assert_equal(df, expected)
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
# construction
df = DataFrame({"A": idx, "B": dr})
assert df["A"].dtype, "M8[ns, US/Eastern"
assert df["A"].name == "A"
tm.assert_series_equal(df["A"], Series(idx, name="A"))
tm.assert_series_equal(df["B"], Series(dr, name="B"))
def test_from_index(self):
# from index
idx2 = date_range("20130101", periods=3, tz="US/Eastern", name="foo")
df2 = DataFrame(idx2)
tm.assert_series_equal(df2["foo"], Series(idx2, name="foo"))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2["foo"], Series(idx2, name="foo"))
idx2 = date_range("20130101", periods=3, tz="US/Eastern")
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range("1/1/2012", periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({"a": "foo", "b": s}, index=dr)
DataFrame({"a": "foo", "b": s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
ts = Series(dr)
# it works!
d = DataFrame({"A": "foo", "B": ts}, index=dr)
assert d["B"].isna().all()
def test_frame_timeseries_column(self):
# GH19157
dr = date_range(start="20130101T10:00:00", periods=3, freq="T", tz="US/Eastern")
result = DataFrame(dr, columns=["timestamps"])
expected = DataFrame(
{
"timestamps": [
Timestamp("20130101T10:00:00", tz="US/Eastern"),
Timestamp("20130101T10:01:00", tz="US/Eastern"),
Timestamp("20130101T10:02:00", tz="US/Eastern"),
]
}
)
tm.assert_frame_equal(result, expected)
def test_nested_dict_construction(self):
# GH22227
columns = ["Nevada", "Ohio"]
pop = {
"Nevada": {2001: 2.4, 2002: 2.9},
"Ohio": {2000: 1.5, 2001: 1.7, 2002: 3.6},
}
result = DataFrame(pop, index=[2001, 2002, 2003], columns=columns)
expected = DataFrame(
[(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)],
columns=columns,
index=Index([2001, 2002, 2003]),
)
tm.assert_frame_equal(result, expected)
def test_from_tzaware_object_array(self):
# GH#26825 2D object array of tzaware timestamps should not raise
dti = date_range("2016-04-05 04:30", periods=3, tz="UTC")
data = dti._data.astype(object).reshape(1, -1)
df = DataFrame(data)
assert df.shape == (1, 3)
assert (df.dtypes == dti.dtype).all()
assert (df == dti).all().all()
def test_from_tzaware_mixed_object_array(self):
# GH#26825
arr = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
res = DataFrame(arr, columns=["A", "B", "C"])
expected_dtypes = [
"datetime64[ns]",
"datetime64[ns, US/Eastern]",
"datetime64[ns, CET]",
]
assert (res.dtypes == expected_dtypes).all()
def test_from_2d_ndarray_with_dtype(self):
# GH#12513
array_dim2 = np.arange(10).reshape((5, 2))
df = DataFrame(array_dim2, dtype="datetime64[ns, UTC]")
expected = DataFrame(array_dim2).astype("datetime64[ns, UTC]")
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("typ", [set, frozenset])
def test_construction_from_set_raises(self, typ):
# https://github.com/pandas-dev/pandas/issues/32582
values = typ({1, 2, 3})
msg = f"'{typ.__name__}' type is unordered"
with pytest.raises(TypeError, match=msg):
DataFrame({"a": values})
with pytest.raises(TypeError, match=msg):
Series(values)
def test_construction_from_ndarray_datetimelike(self):
# ensure the underlying arrays are properly wrapped as EA when
# constructed from 2D ndarray
arr = np.arange(0, 12, dtype="datetime64[ns]").reshape(4, 3)
df = DataFrame(arr)
assert all(isinstance(arr, DatetimeArray) for arr in df._mgr.arrays)
def test_construction_from_ndarray_with_eadtype_mismatched_columns(self):
arr = np.random.randn(10, 2)
dtype = pd.array([2.0]).dtype
msg = r"len\(arrays\) must match len\(columns\)"
with pytest.raises(ValueError, match=msg):
DataFrame(arr, columns=["foo"], dtype=dtype)
arr2 = pd.array([2.0, 3.0, 4.0])
with pytest.raises(ValueError, match=msg):
DataFrame(arr2, columns=["foo", "bar"])
def get1(obj): # TODO: make a helper in tm?
if isinstance(obj, Series):
return obj.iloc[0]
else:
return obj.iloc[0, 0]
class TestFromScalar:
@pytest.fixture(params=[list, dict, None])
def constructor(self, request, frame_or_series):
box = request.param
extra = {"index": range(2)}
if frame_or_series is DataFrame:
extra["columns"] = ["A"]
if box is None:
return functools.partial(frame_or_series, **extra)
elif box is dict:
if frame_or_series is Series:
return lambda x, **kwargs: frame_or_series(
{0: x, 1: x}, **extra, **kwargs
)
else:
return lambda x, **kwargs: frame_or_series({"A": x}, **extra, **kwargs)
else:
if frame_or_series is Series:
return lambda x, **kwargs: frame_or_series([x, x], **extra, **kwargs)
else:
return lambda x, **kwargs: frame_or_series(
{"A": [x, x]}, **extra, **kwargs
)
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_from_nat_scalar(self, dtype, constructor):
obj = constructor(pd.NaT, dtype=dtype)
assert np.all(obj.dtypes == dtype)
assert np.all(obj.isna())
def test_from_timedelta_scalar_preserves_nanos(self, constructor):
td = Timedelta(1)
obj = constructor(td, dtype="m8[ns]")
assert get1(obj) == td
def test_from_timestamp_scalar_preserves_nanos(self, constructor, fixed_now_ts):
ts = fixed_now_ts + Timedelta(1)
obj = constructor(ts, dtype="M8[ns]")
assert get1(obj) == ts
def test_from_timedelta64_scalar_object(self, constructor):
td = Timedelta(1)
td64 = td.to_timedelta64()
obj = constructor(td64, dtype=object)
assert isinstance(get1(obj), np.timedelta64)
@pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64])
def test_from_scalar_datetimelike_mismatched(self, constructor, cls):
scalar = cls("NaT", "ns")
dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls]
msg = "Cannot cast"
if cls is np.datetime64:
msg = "|".join(
[
r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]",
"Cannot cast",
]
)
with pytest.raises(TypeError, match=msg):
constructor(scalar, dtype=dtype)
scalar = cls(4, "ns")
with pytest.raises(TypeError, match=msg):
constructor(scalar, dtype=dtype)
@pytest.mark.parametrize("cls", [datetime, np.datetime64])
def test_from_out_of_bounds_datetime(self, constructor, cls):
scalar = datetime(9999, 1, 1)
if cls is np.datetime64:
scalar = np.datetime64(scalar, "D")
result = constructor(scalar)
assert type(get1(result)) is cls
@pytest.mark.parametrize("cls", [timedelta, np.timedelta64])
def test_from_out_of_bounds_timedelta(self, constructor, cls):
scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1)
if cls is np.timedelta64:
scalar = np.timedelta64(scalar, "D")
result = constructor(scalar)
assert type(get1(result)) is cls
def test_tzaware_data_tznaive_dtype(self, constructor):
tz = "US/Eastern"
ts = Timestamp("2019", tz=tz)
ts_naive = Timestamp("2019")
with tm.assert_produces_warning(FutureWarning, match="Data is timezone-aware"):
result = constructor(ts, dtype="M8[ns]")
assert np.all(result.dtypes == "M8[ns]")
assert np.all(result == ts_naive)
| 36.560233 | 88 | 0.560373 |
8948ae2eb66e21bb200a6f0da8ab8d626721351f | 332 | py | Python | schrodinger/cli.py | dgiambra/schrodinger | ac1e283242d662f5bab151b46770f21f0ff0bbe0 | [
"MIT"
] | null | null | null | schrodinger/cli.py | dgiambra/schrodinger | ac1e283242d662f5bab151b46770f21f0ff0bbe0 | [
"MIT"
] | null | null | null | schrodinger/cli.py | dgiambra/schrodinger | ac1e283242d662f5bab151b46770f21f0ff0bbe0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
@click.command()
def main(args=None):
"""Console script for schrodinger"""
click.echo("Replace this message by putting your code into "
"schrodinger.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
if __name__ == "__main__":
main()
| 20.75 | 68 | 0.638554 |
d751b8b1fd6c64bc0ef29b4f0a1a6db007c0ad00 | 13,104 | py | Python | differential_evolution.py | oleksandrr/differential_evolution | ee57db1317f79b9492fdee4ff1fe154fdb9129d7 | [
"Apache-2.0"
] | 4 | 2015-12-23T17:14:24.000Z | 2020-03-22T15:04:02.000Z | differential_evolution.py | oleksandrr/differential_evolution | ee57db1317f79b9492fdee4ff1fe154fdb9129d7 | [
"Apache-2.0"
] | null | null | null | differential_evolution.py | oleksandrr/differential_evolution | ee57db1317f79b9492fdee4ff1fe154fdb9129d7 | [
"Apache-2.0"
] | 2 | 2017-03-29T12:49:40.000Z | 2018-10-19T15:53:35.000Z | #!/usr/bin/python
from __future__ import division
__all__ = ['mutate', 'crossover', 'select', 'minimize']
import numpy; from numpy import (array, random)
def mutate(original_population, original_population_costs, F, method="rand/1"):
# In some cases random_unique_indices can become a bottleneck for the whole algorithm,
# so further attention may be warranted. Circumstantially, it is found that the
# uniqueness constraint on the parents is non-critical, so in principle one might
# wish to choose the parents without restrictions for better mutation performance.
def random_unique_indices(source_length, count, inclusions=[], exclusions=[]):
indices = list(inclusions)
while len(indices) < count:
next = random.randint(source_length)
while next in indices or next in exclusions:
next = random.randint(source_length)
indices.append(next)
return indices
original_population = array(original_population,copy=False)
original_population_costs = array(original_population_costs,copy=False)
population_size = len(original_population)
differential_evolution_indices = lambda count: array(
[ random_unique_indices(population_size, count, exclusions=[index])
for index in xrange(population_size) ]
)
if method == "MDE5":
# Based on Thangaraj et al., Appl. Math. Comp. 216 (2), 532 (2010).
best_individual = original_population[numpy.argmin(original_population_costs)]
parents = original_population[differential_evolution_indices(2)]
L = random.laplace(F, F/4, (population_size,1))
return parents[:,0] + L*(best_individual - parents[:,1])
elif method == "current-to-best/1":
# Based on the current-to-p-best/1 method of JADE as presented by Jinqiao Zhang and
# A. C. Sanderson in IEEE Trans. Evol. Comput. 13 (5), 945 (2009). Here we pick from
# the first decile, but this proportion is arbitrary and may be varied.
p = min(1, int(round(0.1*population_size)))
p_best_individuals = original_population[numpy.argsort(original_population_costs)[:p]]
random_p_best = p_best_individuals[random.randint(p, size=population_size)]
parents = original_population[differential_evolution_indices(2)]
return original_population + F*(
random_p_best - original_population + parents[:,0] - parents[:,1]
)
elif method == "rand/2/dir":
# This method behaves similarly to the reflection operator of the Nelder-Mead algorithm.
# It is not particularly effective for low-dimensional problems, but performs well on
# high-dimensional non-separable functions.
# TODO: A more intelligent hybridization of DE and N-M is probably desirable.
indices = differential_evolution_indices(5)
sorted_indices = indices[ numpy.arange(population_size)[:,None],
numpy.argsort(original_population_costs[indices]) ]
parents = original_population[sorted_indices]
return parents[:,0] + F*(parents[:,1] + parents[:,2] - parents[:,3] - parents[:,4])
elif method == "best/1/dither":
# The classical best/1 mutator strongly favours local search; here we add dither to
# help avoid misconvergence by improving the diversity of the mutants.
best_individual = original_population[numpy.argmin(original_population_costs)]
parents = original_population[differential_evolution_indices(2)]
N = random.normal(F, F/2, (population_size,1))
return best_individual + N*(parents[:,0] - parents[:,1])
elif method == "best/2":
best_individual = original_population[numpy.argmin(original_population_costs)]
parents = original_population[differential_evolution_indices(4)]
return best_individual + F*(parents[:,0] + parents[:,1] - parents[:,2] - parents[:,3])
else: # invalid or no method specified; use default method == "rand/1"
parents = original_population[differential_evolution_indices(3)]
return parents[:,0] + F*(parents[:,1] - parents[:,2])
# Here we can safely rely on mutant_population being an array, because it is produced in this
# form by mutate(). However, the first-generation original_population might be passed as a list,
# so we must wrap it in array() as a precaution. original_population and mutant_population will
# however have the same size and shape regardless of this potential type difference.
#
def crossover(original_population, mutant_population, C, method="binomial"):
original_population = array(original_population,copy=False)
if method == "arithmetic":
# TODO: This method is questionably effective. A different (rotationally
# invariant) one should be implemented instead if possible.
return original_population + C*(mutant_population - original_population)
else: # invalid or no method specified; use default method == "binomial"
return numpy.where(
random.random_sample(mutant_population.shape) < C,
mutant_population,
original_population
)
# The size of the population need not be conserved from one generation to the next, so we may
# produce a final population by selecting n individuals with any n such that the uniqueness constraint
# on the parents is satisfiable (thus avoiding an infinite loop), i.e.:
# n > 2 (MDE5, current-to-best/1, best/1/dither)
# n > 3 (rand/1).
# n > 4 (best/2)
# n > 5 (rand/2/dir)
#
def select(objective_function,
original_population, original_population_costs,
trial_population, method="Storn-Price", stochastic=False,
fitness_monitor=None, generation_monitor=None):
original_population = array(original_population,copy=False)
original_population_costs = array(original_population_costs,copy=False)
# Here the objective function is evaluated for each trial vector. For brevity's sake, we will call
# the objective function value associated with each vector its "cost" (which is to be minimized).
#
trial_population_costs = array(
map(objective_function, trial_population)
)
if method == "elitist":
all_costs = numpy.concatenate((original_population_costs, trial_population_costs))
indices = numpy.argsort(all_costs)[:len(all_costs)//2]
new_population_costs = all_costs[indices]
new_population = numpy.concatenate((original_population, trial_population))[indices]
else: # invalid or no method specified; use default method == "Storn-Price"
mask = original_population_costs < trial_population_costs
new_population_costs = numpy.where(mask, original_population_costs, trial_population_costs)
new_population = numpy.where(mask.reshape((-1,1)), original_population, trial_population)
# For stochastic objective functions it is unhelpful to have the function values recorded for each
# parameter vector once and for all, because vectors with small objective function values obtained
# by chance tend to proliferate and disrupt the optimization process. Here we (re-)evaluate the
# objective function if necessary in order to avoid this. Note that this option should only be
# used for genuinely stochastic objective functions as it doubles the number of function
# evaluations required at each generation.
#
if stochastic:
new_population_costs = array(
map(objective_function, new_population)
)
# generation_monitor() is a function that can receive a tuple of the form (array of objective
# function values, array of parameter vectors) at each iteration. This allows results to be
# displayed or written out to disk as the optimization proceeds, if required.
#
if generation_monitor is not None:
generation_monitor((new_population_costs, new_population))
# fitness_monitor() is a function that can receive an array of objective function values at each
# iteration. This is intended to allow the algorithm to determine its rate of improvement over
# successive generations and so to stop when the optimization has converged or if the population
# has stagnated.
#
if fitness_monitor is not None:
fitness_monitor(new_population_costs)
return (new_population_costs, new_population)
def minimize(objective_function, initial_population, F, C,
convergence_tolerance, convergence_history_length, max_unconverged_iterations,
mutation_method="rand/1", crossover_method="binomial",
selection_method="Storn-Price", stochastic=False,
output_function=None):
if not (0.0 < F and F <= 2.0):
raise ValueError(
"Inappropriate value of differential mixing strength F. Suitable values are 0.0 < F <= 2.0."
)
if not (0.0 < C and C <= 1.0):
raise ValueError(
"Invalid value of crossover probability C. Allowable values are 0.0 < C <= 1.0."
)
if convergence_history_length < 2:
raise ValueError(
"Insufficient convergence history length. At least 2 generations are needed to assess convergence."
)
if mutation_method not in (
# Classical methods
"rand/1", "best/2",
# Modified classical methods
"best/1/dither", "rand/2/dir",
# Non-classical methods
"current-to-best/1", "MDE5"
):
raise ValueError(
"Unknown mutation method. Options are " +
"\"rand/1\" (default), \"best/2\", " +
"\"best/1/dither\", \"rand/2/dir\", " +
"\"current-to-best/1\", or \"MDE5\"."
)
if crossover_method not in ("binomial", "arithmetic"):
raise ValueError(
"Unknown crossover method. Options are \"binomial\" (default) or \"arithmetic\"."
)
if selection_method not in ("Storn-Price", "elitist"):
raise ValueError(
"Unknown selection method. Options are \"Storn-Price\" (default) or \"elitist\"."
)
# convergence_monitor() is a coroutine that maintains an internal history of objective function values
# over a specified number of generations. It functions as an iterator that terminates when the cumulative
# improvement over this number of generations falls below a given tolerance or when a specified maximum
# number of iterations is exceeded without achieving convergence. The minimum number of iterations
# performed is equal to the lesser of convergence_history_length or max_unconverged_iterations.
#
def convergence_monitor(convergence_tolerance, convergence_history_length, max_unconverged_iterations):
history = numpy.empty(convergence_history_length,dtype=numpy.float64); history[:] = numpy.nan
mean_abs_difference = numpy.float64(numpy.nan)
epsilon = numpy.finfo(numpy.float64).eps
iteration_count = 1
while iteration_count <= max_unconverged_iterations:
new_fitness = yield iteration_count
if new_fitness is not None:
history[0] = new_fitness; history = numpy.roll(history, -1)
mean_abs_difference = numpy.mean(
numpy.abs(numpy.diff(history))
)
else:
iteration_count += 1
if mean_abs_difference < convergence_tolerance + epsilon * mean_abs_difference:
return
# The convergence monitor must be instantiated and initialised before use. Cost values are then recorded
# for each generation by setting the fitness_monitor parameter of select() to a function that accepts a
# list of objective function values for a generation, reduces the list to a single number, and calls the
# send() method with this number as a parameter: here this is done by update_cm().
#
cm = convergence_monitor(convergence_tolerance, convergence_history_length, max_unconverged_iterations)
cm.send(None)
update_cm = lambda costs: cm.send(numpy.mean(costs))
population = array(initial_population,copy=False)
costs = map(objective_function, population)
update_cm(costs)
if output_function is not None:
output_function((costs, population))
# Since the initial population was taken to constitute the first generation, this loop will actually
# perform at most (max_unconverged_iterations - 1) further iterations if convergence is not achieved.
#
for iterations in cm:
(costs, population) = select(
objective_function,
population, costs,
crossover(
population,
mutate(population, costs, F, method=mutation_method),
C, method=crossover_method
), method=selection_method, stochastic=stochastic,
fitness_monitor=update_cm, generation_monitor=output_function
)
return (iterations, (costs, population))
| 49.263158 | 109 | 0.68956 |
98fb6abc9db9906a939ab7a10492d73e4c3df175 | 11,841 | py | Python | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py | Littlecowherd/grpc | d85552acf82e8376e46228dfc74052e2d404249f | [
"Apache-2.0"
] | 1 | 2021-06-21T19:56:01.000Z | 2021-06-21T19:56:01.000Z | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py | Littlecowherd/grpc | d85552acf82e8376e46228dfc74052e2d404249f | [
"Apache-2.0"
] | null | null | null | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py | Littlecowherd/grpc | d85552acf82e8376e46228dfc74052e2d404249f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import functools
import logging
from typing import Any, Dict, FrozenSet, Optional
import googleapiclient.errors
from framework.helpers import retryers
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
_timedelta = datetime.timedelta
class EtagConflict(gcp.api.Error):
"""
Indicates concurrent policy changes.
https://cloud.google.com/iam/docs/policies#etag
"""
pass
def handle_etag_conflict(func):
def wrap_retry_on_etag_conflict(*args, **kwargs):
retryer = retryers.exponential_retryer_with_timeout(
retry_on_exceptions=(EtagConflict,),
wait_min=_timedelta(seconds=1),
wait_max=_timedelta(seconds=10),
timeout=_timedelta(minutes=2))
return retryer(func, *args, **kwargs)
return wrap_retry_on_etag_conflict
def _replace_binding(policy: 'Policy', binding: 'Policy.Binding',
new_binding: 'Policy.Binding') -> 'Policy':
new_bindings = set(policy.bindings)
new_bindings.discard(binding)
new_bindings.add(new_binding)
return dataclasses.replace(policy, bindings=frozenset(new_bindings))
@dataclasses.dataclass(frozen=True)
class ServiceAccount:
"""An IAM service account.
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts
Note: "etag" field is skipped because it's deprecated
"""
name: str
projectId: str
uniqueId: str
email: str
oauth2ClientId: str
displayName: str = ''
description: str = ''
disabled: bool = False
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'ServiceAccount':
return cls(name=response['name'],
projectId=response['projectId'],
uniqueId=response['uniqueId'],
email=response['email'],
oauth2ClientId=response['oauth2ClientId'],
description=response.get('description', ''),
displayName=response.get('displayName', ''),
disabled=response.get('disabled', False))
def as_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class Expr:
"""
Represents a textual expression in the Common Expression Language syntax.
https://cloud.google.com/iam/docs/reference/rest/v1/Expr
"""
expression: str
title: str = ''
description: str = ''
location: str = ''
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'Expr':
return cls(**response)
def as_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class Policy:
"""An Identity and Access Management (IAM) policy, which specifies
access controls for Google Cloud resources.
https://cloud.google.com/iam/docs/reference/rest/v1/Policy
Note: auditConfigs not supported by this implementation.
"""
@dataclasses.dataclass(frozen=True)
class Binding:
"""Policy Binding. Associates members with a role.
https://cloud.google.com/iam/docs/reference/rest/v1/Policy#binding
"""
role: str
members: FrozenSet[str]
condition: Optional[Expr] = None
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'Policy.Binding':
fields = {
'role': response['role'],
'members': frozenset(response.get('members', [])),
}
if 'condition' in response:
fields['condition'] = Expr.from_response(response['condition'])
return cls(**fields)
def as_dict(self) -> Dict[str, Any]:
result = {
'role': self.role,
'members': list(self.members),
}
if self.condition is not None:
result['condition'] = self.condition.as_dict()
return result
bindings: FrozenSet[Binding]
etag: str
version: Optional[int] = None
@functools.lru_cache(maxsize=128)
def find_binding_for_role(
self,
role: str,
condition: Optional[Expr] = None) -> Optional['Policy.Binding']:
results = (binding for binding in self.bindings
if binding.role == role and binding.condition == condition)
return next(results, None)
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'Policy':
bindings = frozenset(
cls.Binding.from_response(b) for b in response.get('bindings', []))
return cls(bindings=bindings,
etag=response['etag'],
version=response.get('version'))
def as_dict(self) -> Dict[str, Any]:
result = {
'bindings': [binding.as_dict() for binding in self.bindings],
'etag': self.etag,
}
if self.version is not None:
result['version'] = self.version
return result
class IamV1(gcp.api.GcpProjectApiResource):
"""
Identity and Access Management (IAM) API.
https://cloud.google.com/iam/docs/reference/rest
"""
_service_accounts: gcp.api.discovery.Resource
# Operations that affect conditional role bindings must specify version 3.
# Otherwise conditions are omitted, and role names returned with a suffix,
# f.e. roles/iam.workloadIdentityUser_withcond_f1ec33c9beb41857dbf0
# https://cloud.google.com/iam/docs/reference/rest/v1/Policy#FIELDS.version
POLICY_VERSION: str = 3
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.iam('v1'), project)
# Shortcut to projects/*/serviceAccounts/ endpoints
self._service_accounts = self.api.projects().serviceAccounts()
def service_account_resource_name(self, account):
"""
Returns full resource name of the service account.
The resource name of the service account in the following format:
projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}.
The ACCOUNT value can be the email address or the uniqueId of the
service account.
Ref https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/get
Args:
account: The ACCOUNT value
"""
return f'projects/{self.project}/serviceAccounts/{account}'
def get_service_account(self, account: str) -> ServiceAccount:
response: Dict[str, Any] = self._service_accounts.get(
name=self.service_account_resource_name(account)).execute()
logger.debug('Loaded Service Account:\n%s',
self._resource_pretty_format(response))
return ServiceAccount.from_response(response)
def get_service_account_iam_policy(self, account: str) -> Policy:
response: Dict[str, Any] = self._service_accounts.getIamPolicy(
resource=self.service_account_resource_name(account),
options_requestedPolicyVersion=self.POLICY_VERSION).execute()
logger.debug('Loaded Service Account Policy:\n%s',
self._resource_pretty_format(response))
return Policy.from_response(response)
def set_service_account_iam_policy(self, account: str,
policy: Policy) -> Policy:
"""Sets the IAM policy that is attached to a service account.
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
body = {'policy': policy.as_dict()}
logger.debug('Updating Service Account %s policy:\n%s', account,
self._resource_pretty_format(body))
try:
response: Dict[str, Any] = self._service_accounts.setIamPolicy(
resource=self.service_account_resource_name(account),
body=body).execute()
return Policy.from_response(response)
except googleapiclient.errors.HttpError as error:
# TODO(sergiitk) use status_code() when we upgrade googleapiclient
if error.resp and error.resp.status == 409:
# https://cloud.google.com/iam/docs/policies#etag
logger.debug(error)
raise EtagConflict from error
else:
raise gcp.api.Error from error
@handle_etag_conflict
def add_service_account_iam_policy_binding(self, account: str, role: str,
member: str) -> None:
"""Add an IAM policy binding to an IAM service account.
See for details on updating policy bindings:
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
policy: Policy = self.get_service_account_iam_policy(account)
binding: Optional[Policy.Binding] = policy.find_binding_for_role(role)
if binding and member in binding.members:
logger.debug('Member %s already has role %s for Service Account %s',
member, role, account)
return
if binding is None:
updated_binding = Policy.Binding(role, frozenset([member]))
else:
updated_members: FrozenSet[str] = binding.members.union({member})
updated_binding: Policy.Binding = dataclasses.replace(
binding, members=updated_members)
updated_policy: Policy = _replace_binding(policy, binding,
updated_binding)
self.set_service_account_iam_policy(account, updated_policy)
logger.debug('Role %s granted to member %s for Service Account %s',
role, member, account)
@handle_etag_conflict
def remove_service_account_iam_policy_binding(self, account: str, role: str,
member: str) -> None:
"""Remove an IAM policy binding from the IAM policy of a service
account.
See for details on updating policy bindings:
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
policy: Policy = self.get_service_account_iam_policy(account)
binding: Optional[Policy.Binding] = policy.find_binding_for_role(role)
if binding is None:
logger.debug('Noop: Service Account %s has no bindings for role %s',
account, role)
return
if member not in binding.members:
logger.debug(
'Noop: Service Account %s binding for role %s has no member %s',
account, role, member)
return
updated_members: FrozenSet[str] = binding.members.difference({member})
updated_binding: Policy.Binding = dataclasses.replace(
binding, members=updated_members)
updated_policy: Policy = _replace_binding(policy, binding,
updated_binding)
self.set_service_account_iam_policy(account, updated_policy)
logger.debug('Role %s revoked from member %s for Service Account %s',
role, member, account)
| 37.951923 | 97 | 0.637615 |
f16e870591e004ae8cfe6a89a4608c5a45b1ceff | 1,248 | py | Python | src/oci/resource_search/resource_search_client_composite_operations.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/resource_search/resource_search_client_composite_operations.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/resource_search/resource_search_client_composite_operations.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class ResourceSearchClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.resource_search.ResourceSearchClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new ResourceSearchClientCompositeOperations object
:param ResourceSearchClient client:
The service client which will be wrapped by this object
"""
self.client = client
| 49.92 | 245 | 0.737981 |
d143568df431a31b3005ecb403c9baacba2c61bb | 735 | py | Python | examples/Simple/simple_client.py | vongostev/NumpySocket | f34d8c98af2961fc177151c2fd9306c1ab26b216 | [
"MIT"
] | 41 | 2018-03-04T14:50:46.000Z | 2022-03-15T22:01:38.000Z | examples/Simple/simple_client.py | vongostev/NumpySocket | f34d8c98af2961fc177151c2fd9306c1ab26b216 | [
"MIT"
] | 5 | 2019-03-05T21:10:12.000Z | 2021-05-06T07:57:41.000Z | examples/Simple/simple_client.py | vongostev/NumpySocket | f34d8c98af2961fc177151c2fd9306c1ab26b216 | [
"MIT"
] | 21 | 2018-06-01T06:49:58.000Z | 2022-03-22T11:09:49.000Z | #!/usr/bin/python3
import logging
from time import sleep
import numpy as np
from numpysocket import NumpySocket
logger = logging.getLogger('simple client')
logger.setLevel(logging.INFO)
host_ip = 'localhost' # change me
npSocket = NumpySocket()
while(True):
try:
npSocket.startClient(host_ip, 9999)
break
except:
logger.warning("connection failed, make sure `server` is running.")
sleep(1)
continue
logger.info("connected to server")
frame = np.arange(1000)
logger.info("sending numpy array:")
logger.info(frame)
npSocket.send(frame)
logger.info("array sent, closing connection")
try:
npSocket.close()
except OSError as err:
logging.error("client already disconnected")
| 20.416667 | 75 | 0.711565 |
6e1f4b2f397e28b9a8ee8715b5b17395580eb9ac | 47,204 | py | Python | Lib/test/test_compileall.py | cyyever/nogil | 2607880dd93de52cf34045f1b7e850639a06c137 | [
"0BSD"
] | 953 | 2021-10-08T17:12:34.000Z | 2022-03-31T18:31:50.000Z | Lib/test/test_compileall.py | cyyever/nogil | 2607880dd93de52cf34045f1b7e850639a06c137 | [
"0BSD"
] | 27 | 2021-10-13T20:54:09.000Z | 2022-03-27T14:41:13.000Z | Lib/test/test_compileall.py | cyyever/nogil | 2607880dd93de52cf34045f1b7e850639a06c137 | [
"0BSD"
] | 42 | 2021-10-08T16:05:57.000Z | 2022-03-18T13:06:12.000Z | import compileall
import contextlib
import filecmp
import importlib.util
import io
import itertools
import os
import pathlib
import py_compile
import shutil
import struct
import sys
import tempfile
import test.test_importlib.util
import time
import unittest
from unittest import mock, skipUnless
try:
from concurrent.futures import ProcessPoolExecutor
_have_multiprocessing = True
except ImportError:
_have_multiprocessing = False
from test import support
from test.support import script_helper
from .test_py_compile import without_source_date_epoch
from .test_py_compile import SourceDateEpochTestMeta
def get_pyc(script, opt):
if not opt:
# Replace None and 0 with ''
opt = ''
return importlib.util.cache_from_source(script, optimization=opt)
def get_pycs(script):
return [get_pyc(script, opt) for opt in (0, 1, 2)]
def is_hardlink(filename1, filename2):
"""Returns True if two files have the same inode (hardlink)"""
inode1 = os.stat(filename1).st_ino
inode2 = os.stat(filename2).st_ino
return inode1 == inode2
class CompileallTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
self.source_path2 = os.path.join(self.directory, '_test2.py')
self.bc_path2 = importlib.util.cache_from_source(self.source_path2)
shutil.copyfile(self.source_path, self.source_path2)
self.subdirectory = os.path.join(self.directory, '_subdir')
os.mkdir(self.subdirectory)
self.source_path3 = os.path.join(self.subdirectory, '_test3.py')
shutil.copyfile(self.source_path, self.source_path3)
def tearDown(self):
shutil.rmtree(self.directory)
def add_bad_source_file(self):
self.bad_source_path = os.path.join(self.directory, '_test_bad.py')
with open(self.bad_source_path, 'w') as file:
file.write('x (\n')
def timestamp_metadata(self):
with open(self.bc_path, 'rb') as file:
data = file.read(12)
mtime = int(os.stat(self.source_path).st_mtime)
compare = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER, 0,
mtime & 0xFFFF_FFFF)
return data, compare
def test_year_2038_mtime_compilation(self):
# Test to make sure we can handle mtimes larger than what a 32-bit
# signed number can hold as part of bpo-34990
try:
os.utime(self.source_path, (2**32 - 1, 2**32 - 1))
except (OverflowError, OSError):
self.skipTest("filesystem doesn't support timestamps near 2**32")
with contextlib.redirect_stdout(io.StringIO()):
self.assertTrue(compileall.compile_file(self.source_path))
def test_larger_than_32_bit_times(self):
# This is similar to the test above but we skip it if the OS doesn't
# support modification times larger than 32-bits.
try:
os.utime(self.source_path, (2**35, 2**35))
except (OverflowError, OSError):
self.skipTest("filesystem doesn't support large timestamps")
with contextlib.redirect_stdout(io.StringIO()):
self.assertTrue(compileall.compile_file(self.source_path))
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
if os.environ.get('SOURCE_DATE_EPOCH'):
raise unittest.SkipTest('SOURCE_DATE_EPOCH is set')
py_compile.compile(self.source_path)
self.assertEqual(*self.timestamp_metadata())
with open(self.bc_path, 'rb') as file:
bc = file.read()[len(metadata):]
with open(self.bc_path, 'wb') as file:
file.write(metadata)
file.write(bc)
self.assertNotEqual(*self.timestamp_metadata())
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(*self.timestamp_metadata())
def test_mtime(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(struct.pack('<4sLL', importlib.util.MAGIC_NUMBER,
0, 1))
def test_magic_number(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(b'\0\0\0\0')
def test_compile_files(self):
# Test compiling a single file, and complete directory
for fn in (self.bc_path, self.bc_path2):
try:
os.unlink(fn)
except:
pass
self.assertTrue(compileall.compile_file(self.source_path,
force=False, quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
not os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
self.assertTrue(compileall.compile_dir(self.directory, force=False,
quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
os.unlink(self.bc_path2)
# Test against bad files
self.add_bad_source_file()
self.assertFalse(compileall.compile_file(self.bad_source_path,
force=False, quiet=2))
self.assertFalse(compileall.compile_dir(self.directory,
force=False, quiet=2))
def test_compile_file_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
# we should also test the output
with support.captured_stdout() as stdout:
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path)))
self.assertRegex(stdout.getvalue(), r'Compiling ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_file_pathlike_ddir(self):
self.assertFalse(os.path.isfile(self.bc_path))
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path),
ddir=pathlib.Path('ddir_path'),
quiet=2))
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_path(self):
with test.test_importlib.util.import_state(path=[self.directory]):
self.assertTrue(compileall.compile_path(quiet=2))
with test.test_importlib.util.import_state(path=[self.directory]):
self.add_bad_source_file()
self.assertFalse(compileall.compile_path(skip_curdir=False,
force=True, quiet=2))
def test_no_pycache_in_non_package(self):
# Bug 8563 reported that __pycache__ directories got created by
# compile_file() for non-.py files.
data_dir = os.path.join(self.directory, 'data')
data_file = os.path.join(data_dir, 'file')
os.mkdir(data_dir)
# touch data/file
with open(data_file, 'w'):
pass
compileall.compile_file(data_file)
self.assertFalse(os.path.exists(os.path.join(data_dir, '__pycache__')))
def test_compile_file_encoding_fallback(self):
# Bug 44666 reported that compile_file failed when sys.stdout.encoding is None
self.add_bad_source_file()
with contextlib.redirect_stdout(io.StringIO()):
self.assertFalse(compileall.compile_file(self.bad_source_path))
def test_optimize(self):
# make sure compiling with different optimization settings than the
# interpreter's creates the correct file names
optimize, opt = (1, 1) if __debug__ else (0, '')
compileall.compile_dir(self.directory, quiet=True, optimize=optimize)
cached = importlib.util.cache_from_source(self.source_path,
optimization=opt)
self.assertTrue(os.path.isfile(cached))
cached2 = importlib.util.cache_from_source(self.source_path2,
optimization=opt)
self.assertTrue(os.path.isfile(cached2))
cached3 = importlib.util.cache_from_source(self.source_path3,
optimization=opt)
self.assertTrue(os.path.isfile(cached3))
def test_compile_dir_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
with support.captured_stdout() as stdout:
compileall.compile_dir(pathlib.Path(self.directory))
line = stdout.getvalue().splitlines()[0]
self.assertRegex(line, r'Listing ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_pool_called(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(pool_mock.called)
def test_compile_workers_non_positive(self):
with self.assertRaisesRegex(ValueError,
"workers must be greater or equal to 0"):
compileall.compile_dir(self.directory, workers=-1)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_workers_cpu_count(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=0)
self.assertEqual(pool_mock.call_args[1]['max_workers'], None)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
@mock.patch('compileall.compile_file')
def test_compile_one_worker(self, compile_file_mock, pool_mock):
compileall.compile_dir(self.directory, quiet=True)
self.assertFalse(pool_mock.called)
self.assertTrue(compile_file_mock.called)
@mock.patch('concurrent.futures.ProcessPoolExecutor', new=None)
@mock.patch('compileall.compile_file')
def test_compile_missing_multiprocessing(self, compile_file_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(compile_file_mock.called)
def test_compile_dir_maxlevels(self):
# Test the actual impact of maxlevels parameter
depth = 3
path = self.directory
for i in range(1, depth + 1):
path = os.path.join(path, f"dir_{i}")
source = os.path.join(path, 'script.py')
os.mkdir(path)
shutil.copyfile(self.source_path, source)
pyc_filename = importlib.util.cache_from_source(source)
compileall.compile_dir(self.directory, quiet=True, maxlevels=depth - 1)
self.assertFalse(os.path.isfile(pyc_filename))
compileall.compile_dir(self.directory, quiet=True, maxlevels=depth)
self.assertTrue(os.path.isfile(pyc_filename))
def _test_ddir_only(self, *, ddir, parallel=True):
"""Recursive compile_dir ddir must contain package paths; bpo39769."""
fullpath = ["test", "foo"]
path = self.directory
mods = []
for subdir in fullpath:
path = os.path.join(path, subdir)
os.mkdir(path)
script_helper.make_script(path, "__init__", "")
mods.append(script_helper.make_script(path, "mod",
"def fn(): 1/0\nfn()\n"))
compileall.compile_dir(
self.directory, quiet=True, ddir=ddir,
workers=2 if parallel else 1)
self.assertTrue(mods)
for mod in mods:
self.assertTrue(mod.startswith(self.directory), mod)
modcode = importlib.util.cache_from_source(mod)
modpath = mod[len(self.directory+os.sep):]
_, _, err = script_helper.assert_python_failure(modcode)
expected_in = os.path.join(ddir, modpath)
mod_code_obj = test.test_importlib.util.get_code_from_pyc(modcode)
self.assertEqual(mod_code_obj.co_filename, expected_in)
self.assertIn(f'"{expected_in}"', os.fsdecode(err))
def test_ddir_only_one_worker(self):
"""Recursive compile_dir ddir= contains package paths; bpo39769."""
return self._test_ddir_only(ddir="<a prefix>", parallel=False)
def test_ddir_multiple_workers(self):
"""Recursive compile_dir ddir= contains package paths; bpo39769."""
return self._test_ddir_only(ddir="<a prefix>", parallel=True)
def test_ddir_empty_only_one_worker(self):
"""Recursive compile_dir ddir='' contains package paths; bpo39769."""
return self._test_ddir_only(ddir="", parallel=False)
def test_ddir_empty_multiple_workers(self):
"""Recursive compile_dir ddir='' contains package paths; bpo39769."""
return self._test_ddir_only(ddir="", parallel=True)
def test_strip_only(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
stripdir = os.path.join(self.directory, *fullpath[:2])
compileall.compile_dir(path, quiet=True, stripdir=stripdir)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(*fullpath[2:])
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
self.assertNotIn(
stripdir,
str(err, encoding=sys.getdefaultencoding())
)
def test_prepend_only(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
prependdir = "/foo"
compileall.compile_dir(path, quiet=True, prependdir=prependdir)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(prependdir, self.directory, *fullpath)
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
def test_strip_and_prepend(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
stripdir = os.path.join(self.directory, *fullpath[:2])
prependdir = "/foo"
compileall.compile_dir(path, quiet=True,
stripdir=stripdir, prependdir=prependdir)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(prependdir, *fullpath[2:])
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
self.assertNotIn(
stripdir,
str(err, encoding=sys.getdefaultencoding())
)
def test_strip_prepend_and_ddir(self):
fullpath = ["test", "build", "real", "path", "ddir"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script_helper.make_script(path, "test", "1 / 0")
with self.assertRaises(ValueError):
compileall.compile_dir(path, quiet=True, ddir="/bar",
stripdir="/foo", prependdir="/bar")
def test_multiple_optimization_levels(self):
script = script_helper.make_script(self.directory,
"test_optimization",
"a = 0")
bc = []
for opt_level in "", 1, 2, 3:
bc.append(importlib.util.cache_from_source(script,
optimization=opt_level))
test_combinations = [[0, 1], [1, 2], [0, 2], [0, 1, 2]]
for opt_combination in test_combinations:
compileall.compile_file(script, quiet=True,
optimize=opt_combination)
for opt_level in opt_combination:
self.assertTrue(os.path.isfile(bc[opt_level]))
try:
os.unlink(bc[opt_level])
except Exception:
pass
@support.skip_unless_symlink
def test_ignore_symlink_destination(self):
# Create folders for allowed files, symlinks and prohibited area
allowed_path = os.path.join(self.directory, "test", "dir", "allowed")
symlinks_path = os.path.join(self.directory, "test", "dir", "symlinks")
prohibited_path = os.path.join(self.directory, "test", "dir", "prohibited")
os.makedirs(allowed_path)
os.makedirs(symlinks_path)
os.makedirs(prohibited_path)
# Create scripts and symlinks and remember their byte-compiled versions
allowed_script = script_helper.make_script(allowed_path, "test_allowed", "a = 0")
prohibited_script = script_helper.make_script(prohibited_path, "test_prohibited", "a = 0")
allowed_symlink = os.path.join(symlinks_path, "test_allowed.py")
prohibited_symlink = os.path.join(symlinks_path, "test_prohibited.py")
os.symlink(allowed_script, allowed_symlink)
os.symlink(prohibited_script, prohibited_symlink)
allowed_bc = importlib.util.cache_from_source(allowed_symlink)
prohibited_bc = importlib.util.cache_from_source(prohibited_symlink)
compileall.compile_dir(symlinks_path, quiet=True, limit_sl_dest=allowed_path)
self.assertTrue(os.path.isfile(allowed_bc))
self.assertFalse(os.path.isfile(prohibited_bc))
class CompileallTestsWithSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CompileallTestsWithoutSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class EncodingTest(unittest.TestCase):
"""Issue 6716: compileall should escape source code when printing errors
to stdout."""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
with open(self.source_path, 'w', encoding='utf-8') as file:
file.write('# -*- coding: utf-8 -*-\n')
file.write('print u"\u20ac"\n')
def tearDown(self):
shutil.rmtree(self.directory)
def test_error(self):
try:
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(),encoding='ascii')
compileall.compile_dir(self.directory)
finally:
sys.stdout = orig_stdout
class CommandLineTestsBase:
"""Test compileall's CLI."""
@classmethod
def setUpClass(cls):
for path in filter(os.path.isdir, sys.path):
directory_created = False
directory = pathlib.Path(path) / '__pycache__'
path = directory / 'test.try'
try:
if not directory.is_dir():
directory.mkdir()
directory_created = True
with path.open('w') as file:
file.write('# for test_compileall')
except OSError:
sys_path_writable = False
break
finally:
support.unlink(str(path))
if directory_created:
directory.rmdir()
else:
sys_path_writable = True
cls._sys_path_writable = sys_path_writable
def _skip_if_sys_path_not_writable(self):
if not self._sys_path_writable:
raise unittest.SkipTest('not all entries on sys.path are writable')
def _get_run_args(self, args):
return [*support.optim_args_from_interpreter_flags(),
'-S', '-m', 'compileall',
*args]
def assertRunOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_ok(
*self._get_run_args(args), **env_vars,
PYTHONIOENCODING='utf-8')
self.assertEqual(b'', err)
return out
def assertRunNotOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_failure(
*self._get_run_args(args), **env_vars,
PYTHONIOENCODING='utf-8')
return rc, out, err
def assertCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertTrue(os.path.exists(path))
def assertNotCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertFalse(os.path.exists(path))
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(support.rmtree, self.directory)
self.pkgdir = os.path.join(self.directory, 'foo')
os.mkdir(self.pkgdir)
self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
# Create the __init__.py and a package module.
self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
def test_no_args_compiles_path(self):
# Note that -l is implied for the no args case.
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
self.assertCompiled(bazfn)
self.assertNotCompiled(self.initfn)
self.assertNotCompiled(self.barfn)
@without_source_date_epoch # timestamp invalidation test
def test_no_args_respects_force_flag(self):
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
pycpath = importlib.util.cache_from_source(bazfn)
# Set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# Without force, no recompilation
self.assertRunOK(PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# Now force it.
self.assertRunOK('-f', PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_no_args_respects_quiet_flag(self):
self._skip_if_sys_path_not_writable()
script_helper.make_script(self.directory, 'baz', '')
noisy = self.assertRunOK(PYTHONPATH=self.directory)
self.assertIn(b'Listing ', noisy)
quiet = self.assertRunOK('-q', PYTHONPATH=self.directory)
self.assertNotIn(b'Listing ', quiet)
# Ensure that the default behavior of compileall's CLI is to create
# PEP 3147/PEP 488 pyc files.
for name, ext, switch in [
('normal', 'pyc', []),
('optimize', 'opt-1.pyc', ['-O']),
('doubleoptimize', 'opt-2.pyc', ['-OO']),
]:
def f(self, ext=ext, switch=switch):
script_helper.assert_python_ok(*(switch +
['-m', 'compileall', '-q', self.pkgdir]))
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
expected = sorted(base.format(sys.implementation.cache_tag, ext)
for base in ('__init__.{}.{}', 'bar.{}.{}'))
self.assertEqual(sorted(os.listdir(self.pkgdir_cachedir)), expected)
# Make sure there are no .pyc files in the source directory.
self.assertFalse([fn for fn in os.listdir(self.pkgdir)
if fn.endswith(ext)])
locals()['test_pep3147_paths_' + name] = f
def test_legacy_paths(self):
# Ensure that with the proper switch, compileall leaves legacy
# pyc files, and no __pycache__ directory.
self.assertRunOK('-b', '-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertFalse(os.path.exists(self.pkgdir_cachedir))
expected = sorted(['__init__.py', '__init__.pyc', 'bar.py',
'bar.pyc'])
self.assertEqual(sorted(os.listdir(self.pkgdir)), expected)
def test_multiple_runs(self):
# Bug 8527 reported that multiple calls produced empty
# __pycache__/__pycache__ directories.
self.assertRunOK('-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
cachecachedir = os.path.join(self.pkgdir_cachedir, '__pycache__')
self.assertFalse(os.path.exists(cachecachedir))
# Call compileall again.
self.assertRunOK('-q', self.pkgdir)
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
self.assertFalse(os.path.exists(cachecachedir))
@without_source_date_epoch # timestamp invalidation test
def test_force(self):
self.assertRunOK('-q', self.pkgdir)
pycpath = importlib.util.cache_from_source(self.barfn)
# set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# without force, no recompilation
self.assertRunOK('-q', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# now force it.
self.assertRunOK('-q', '-f', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_recursion_control(self):
subpackage = os.path.join(self.pkgdir, 'spam')
os.mkdir(subpackage)
subinitfn = script_helper.make_script(subpackage, '__init__', '')
hamfn = script_helper.make_script(subpackage, 'ham', '')
self.assertRunOK('-q', '-l', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
def test_recursion_limit(self):
subpackage = os.path.join(self.pkgdir, 'spam')
subpackage2 = os.path.join(subpackage, 'ham')
subpackage3 = os.path.join(subpackage2, 'eggs')
for pkg in (subpackage, subpackage2, subpackage3):
script_helper.make_pkg(pkg)
subinitfn = os.path.join(subpackage, '__init__.py')
hamfn = script_helper.make_script(subpackage, 'ham', '')
spamfn = script_helper.make_script(subpackage2, 'spam', '')
eggfn = script_helper.make_script(subpackage3, 'egg', '')
self.assertRunOK('-q', '-r 0', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(
os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', '-r 1', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertNotCompiled(spamfn)
self.assertRunOK('-q', '-r 2', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertNotCompiled(eggfn)
self.assertRunOK('-q', '-r 5', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertCompiled(eggfn)
@support.skip_unless_symlink
def test_symlink_loop(self):
# Currently, compileall ignores symlinks to directories.
# If that limitation is ever lifted, it should protect against
# recursion in symlink loops.
pkg = os.path.join(self.pkgdir, 'spam')
script_helper.make_pkg(pkg)
os.symlink('.', os.path.join(pkg, 'evil'))
os.symlink('.', os.path.join(pkg, 'evil2'))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(os.path.join(
self.pkgdir, 'spam', 'evil', 'evil2', '__init__.py'
))
def test_quiet(self):
noisy = self.assertRunOK(self.pkgdir)
quiet = self.assertRunOK('-q', self.pkgdir)
self.assertNotEqual(b'', noisy)
self.assertEqual(b'', quiet)
def test_silent(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
_, quiet, _ = self.assertRunNotOK('-q', self.pkgdir)
_, silent, _ = self.assertRunNotOK('-qq', self.pkgdir)
self.assertNotEqual(b'', quiet)
self.assertEqual(b'', silent)
def test_regexp(self):
self.assertRunOK('-q', '-x', r'ba[^\\/]*$', self.pkgdir)
self.assertNotCompiled(self.barfn)
self.assertCompiled(self.initfn)
def test_multiple_dirs(self):
pkgdir2 = os.path.join(self.directory, 'foo2')
os.mkdir(pkgdir2)
init2fn = script_helper.make_script(pkgdir2, '__init__', '')
bar2fn = script_helper.make_script(pkgdir2, 'bar2', '')
self.assertRunOK('-q', self.pkgdir, pkgdir2)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
self.assertCompiled(init2fn)
self.assertCompiled(bar2fn)
def test_d_compile_error(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir)
self.assertRegex(out, b'File "dinsdale')
def test_d_runtime_error(self):
bazfn = script_helper.make_script(self.pkgdir, 'baz', 'raise Exception')
self.assertRunOK('-q', '-d', 'dinsdale', self.pkgdir)
fn = script_helper.make_script(self.pkgdir, 'bing', 'import baz')
pyc = importlib.util.cache_from_source(bazfn)
os.rename(pyc, os.path.join(self.pkgdir, 'baz.pyc'))
os.remove(bazfn)
rc, out, err = script_helper.assert_python_failure(fn, __isolated=False)
self.assertRegex(err, b'File "dinsdale')
def test_include_bad_file(self):
rc, out, err = self.assertRunNotOK(
'-i', os.path.join(self.directory, 'nosuchfile'), self.pkgdir)
self.assertRegex(out, b'rror.*nosuchfile')
self.assertNotRegex(err, b'Traceback')
self.assertFalse(os.path.exists(importlib.util.cache_from_source(
self.pkgdir_cachedir)))
def test_include_file_with_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f1.py')+os.linesep)
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'), f4)
self.assertCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertCompiled(f4)
def test_include_file_no_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'))
self.assertNotCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertNotCompiled(f4)
def test_include_on_stdin(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
p = script_helper.spawn_python(*(self._get_run_args(()) + ['-i', '-']))
p.stdin.write((f3+os.linesep).encode('ascii'))
script_helper.kill_python(p)
self.assertNotCompiled(f1)
self.assertNotCompiled(f2)
self.assertCompiled(f3)
self.assertNotCompiled(f4)
def test_compiles_as_much_as_possible(self):
bingfn = script_helper.make_script(self.pkgdir, 'bing', 'syntax(error')
rc, out, err = self.assertRunNotOK('nosuchfile', self.initfn,
bingfn, self.barfn)
self.assertRegex(out, b'rror')
self.assertNotCompiled(bingfn)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
def test_invalid_arg_produces_message(self):
out = self.assertRunOK('badfilename')
self.assertRegex(out, b"Can't list 'badfilename'")
def test_pyc_invalidation_mode(self):
script_helper.make_script(self.pkgdir, 'f1', '')
pyc = importlib.util.cache_from_source(
os.path.join(self.pkgdir, 'f1.py'))
self.assertRunOK('--invalidation-mode=checked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11)
self.assertRunOK('--invalidation-mode=unchecked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b01)
@skipUnless(_have_multiprocessing, "requires multiprocessing")
def test_workers(self):
bar2fn = script_helper.make_script(self.directory, 'bar2', '')
files = []
for suffix in range(5):
pkgdir = os.path.join(self.directory, 'foo{}'.format(suffix))
os.mkdir(pkgdir)
fn = script_helper.make_script(pkgdir, '__init__', '')
files.append(script_helper.make_script(pkgdir, 'bar2', ''))
self.assertRunOK(self.directory, '-j', '0')
self.assertCompiled(bar2fn)
for file in files:
self.assertCompiled(file)
@mock.patch('compileall.compile_dir')
def test_workers_available_cores(self, compile_dir):
with mock.patch("sys.argv",
new=[sys.executable, self.directory, "-j0"]):
compileall.main()
self.assertTrue(compile_dir.called)
self.assertEqual(compile_dir.call_args[-1]['workers'], 0)
def test_strip_and_prepend(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
stripdir = os.path.join(self.directory, *fullpath[:2])
prependdir = "/foo"
self.assertRunOK("-s", stripdir, "-p", prependdir, path)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(prependdir, *fullpath[2:])
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
self.assertNotIn(
stripdir,
str(err, encoding=sys.getdefaultencoding())
)
def test_multiple_optimization_levels(self):
path = os.path.join(self.directory, "optimizations")
os.makedirs(path)
script = script_helper.make_script(path,
"test_optimization",
"a = 0")
bc = []
for opt_level in "", 1, 2, 3:
bc.append(importlib.util.cache_from_source(script,
optimization=opt_level))
test_combinations = [["0", "1"],
["1", "2"],
["0", "2"],
["0", "1", "2"]]
for opt_combination in test_combinations:
self.assertRunOK(path, *("-o" + str(n) for n in opt_combination))
for opt_level in opt_combination:
self.assertTrue(os.path.isfile(bc[int(opt_level)]))
try:
os.unlink(bc[opt_level])
except Exception:
pass
@support.skip_unless_symlink
def test_ignore_symlink_destination(self):
# Create folders for allowed files, symlinks and prohibited area
allowed_path = os.path.join(self.directory, "test", "dir", "allowed")
symlinks_path = os.path.join(self.directory, "test", "dir", "symlinks")
prohibited_path = os.path.join(self.directory, "test", "dir", "prohibited")
os.makedirs(allowed_path)
os.makedirs(symlinks_path)
os.makedirs(prohibited_path)
# Create scripts and symlinks and remember their byte-compiled versions
allowed_script = script_helper.make_script(allowed_path, "test_allowed", "a = 0")
prohibited_script = script_helper.make_script(prohibited_path, "test_prohibited", "a = 0")
allowed_symlink = os.path.join(symlinks_path, "test_allowed.py")
prohibited_symlink = os.path.join(symlinks_path, "test_prohibited.py")
os.symlink(allowed_script, allowed_symlink)
os.symlink(prohibited_script, prohibited_symlink)
allowed_bc = importlib.util.cache_from_source(allowed_symlink)
prohibited_bc = importlib.util.cache_from_source(prohibited_symlink)
self.assertRunOK(symlinks_path, "-e", allowed_path)
self.assertTrue(os.path.isfile(allowed_bc))
self.assertFalse(os.path.isfile(prohibited_bc))
def test_hardlink_bad_args(self):
# Bad arguments combination, hardlink deduplication make sense
# only for more than one optimization level
self.assertRunNotOK(self.directory, "-o 1", "--hardlink-dupes")
def test_hardlink(self):
# 'a = 0' code produces the same bytecode for the 3 optimization
# levels. All three .pyc files must have the same inode (hardlinks).
#
# If deduplication is disabled, all pyc files must have different
# inodes.
for dedup in (True, False):
with tempfile.TemporaryDirectory() as path:
with self.subTest(dedup=dedup):
script = script_helper.make_script(path, "script", "a = 0")
pycs = get_pycs(script)
args = ["-q", "-o 0", "-o 1", "-o 2"]
if dedup:
args.append("--hardlink-dupes")
self.assertRunOK(path, *args)
self.assertEqual(is_hardlink(pycs[0], pycs[1]), dedup)
self.assertEqual(is_hardlink(pycs[1], pycs[2]), dedup)
self.assertEqual(is_hardlink(pycs[0], pycs[2]), dedup)
class CommandLineTestsWithSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CommandLineTestsNoSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class HardlinkDedupTestsBase:
# Test hardlink_dupes parameter of compileall.compile_dir()
def setUp(self):
self.path = None
@contextlib.contextmanager
def temporary_directory(self):
with tempfile.TemporaryDirectory() as path:
self.path = path
yield path
self.path = None
def make_script(self, code, name="script"):
return script_helper.make_script(self.path, name, code)
def compile_dir(self, *, dedup=True, optimize=(0, 1, 2), force=False):
compileall.compile_dir(self.path, quiet=True, optimize=optimize,
hardlink_dupes=dedup, force=force)
def test_bad_args(self):
# Bad arguments combination, hardlink deduplication make sense
# only for more than one optimization level
with self.temporary_directory():
self.make_script("pass")
with self.assertRaises(ValueError):
compileall.compile_dir(self.path, quiet=True, optimize=0,
hardlink_dupes=True)
with self.assertRaises(ValueError):
# same optimization level specified twice:
# compile_dir() removes duplicates
compileall.compile_dir(self.path, quiet=True, optimize=[0, 0],
hardlink_dupes=True)
def create_code(self, docstring=False, assertion=False):
lines = []
if docstring:
lines.append("'module docstring'")
lines.append('x = 1')
if assertion:
lines.append("assert x == 1")
return '\n'.join(lines)
def iter_codes(self):
for docstring in (False, True):
for assertion in (False, True):
code = self.create_code(docstring=docstring, assertion=assertion)
yield (code, docstring, assertion)
def test_disabled(self):
# Deduplication disabled, no hardlinks
for code, docstring, assertion in self.iter_codes():
with self.subTest(docstring=docstring, assertion=assertion):
with self.temporary_directory():
script = self.make_script(code)
pycs = get_pycs(script)
self.compile_dir(dedup=False)
self.assertFalse(is_hardlink(pycs[0], pycs[1]))
self.assertFalse(is_hardlink(pycs[0], pycs[2]))
self.assertFalse(is_hardlink(pycs[1], pycs[2]))
def check_hardlinks(self, script, docstring=False, assertion=False):
pycs = get_pycs(script)
self.assertEqual(is_hardlink(pycs[0], pycs[1]),
not assertion)
self.assertEqual(is_hardlink(pycs[0], pycs[2]),
not assertion and not docstring)
self.assertEqual(is_hardlink(pycs[1], pycs[2]),
not docstring)
def test_hardlink(self):
# Test deduplication on all combinations
for code, docstring, assertion in self.iter_codes():
with self.subTest(docstring=docstring, assertion=assertion):
with self.temporary_directory():
script = self.make_script(code)
self.compile_dir()
self.check_hardlinks(script, docstring, assertion)
def test_only_two_levels(self):
# Don't build the 3 optimization levels, but only 2
for opts in ((0, 1), (1, 2), (0, 2)):
with self.subTest(opts=opts):
with self.temporary_directory():
# code with no dostring and no assertion:
# same bytecode for all optimization levels
script = self.make_script(self.create_code())
self.compile_dir(optimize=opts)
pyc1 = get_pyc(script, opts[0])
pyc2 = get_pyc(script, opts[1])
self.assertTrue(is_hardlink(pyc1, pyc2))
def test_duplicated_levels(self):
# compile_dir() must not fail if optimize contains duplicated
# optimization levels and/or if optimization levels are not sorted.
with self.temporary_directory():
# code with no dostring and no assertion:
# same bytecode for all optimization levels
script = self.make_script(self.create_code())
self.compile_dir(optimize=[1, 0, 1, 0])
pyc1 = get_pyc(script, 0)
pyc2 = get_pyc(script, 1)
self.assertTrue(is_hardlink(pyc1, pyc2))
def test_recompilation(self):
# Test compile_dir() when pyc files already exists and the script
# content changed
with self.temporary_directory():
script = self.make_script("a = 0")
self.compile_dir()
# All three levels have the same inode
self.check_hardlinks(script)
pycs = get_pycs(script)
inode = os.stat(pycs[0]).st_ino
# Change of the module content
script = self.make_script("print(0)")
# Recompilation without -o 1
self.compile_dir(optimize=[0, 2], force=True)
# opt-1.pyc should have the same inode as before and others should not
self.assertEqual(inode, os.stat(pycs[1]).st_ino)
self.assertTrue(is_hardlink(pycs[0], pycs[2]))
self.assertNotEqual(inode, os.stat(pycs[2]).st_ino)
# opt-1.pyc and opt-2.pyc have different content
self.assertFalse(filecmp.cmp(pycs[1], pycs[2], shallow=True))
def test_import(self):
# Test that import updates a single pyc file when pyc files already
# exists and the script content changed
with self.temporary_directory():
script = self.make_script(self.create_code(), name="module")
self.compile_dir()
# All three levels have the same inode
self.check_hardlinks(script)
pycs = get_pycs(script)
inode = os.stat(pycs[0]).st_ino
# Change of the module content
script = self.make_script("print(0)", name="module")
# Import the module in Python with -O (optimization level 1)
script_helper.assert_python_ok(
"-O", "-c", "import module", __isolated=False, PYTHONPATH=self.path
)
# Only opt-1.pyc is changed
self.assertEqual(inode, os.stat(pycs[0]).st_ino)
self.assertEqual(inode, os.stat(pycs[2]).st_ino)
self.assertFalse(is_hardlink(pycs[1], pycs[2]))
# opt-1.pyc and opt-2.pyc have different content
self.assertFalse(filecmp.cmp(pycs[1], pycs[2], shallow=True))
class HardlinkDedupTestsWithSourceEpoch(HardlinkDedupTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class HardlinkDedupTestsNoSourceEpoch(HardlinkDedupTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
if __name__ == "__main__":
unittest.main()
| 43.108676 | 98 | 0.614079 |
246c8686c6bacd19d5f185a0d5db10cc12d2ffaa | 4,119 | py | Python | gcal.py | frangiz/birthday-reminder | 92ec6d5e93f0e2428c94d22795cdb994c2024b4f | [
"MIT"
] | null | null | null | gcal.py | frangiz/birthday-reminder | 92ec6d5e93f0e2428c94d22795cdb994c2024b4f | [
"MIT"
] | null | null | null | gcal.py | frangiz/birthday-reminder | 92ec6d5e93f0e2428c94d22795cdb994c2024b4f | [
"MIT"
] | null | null | null | import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import googleapiclient.errors
from models import EventResponse
from pydantic import parse_obj_as
from datetime import datetime
from typing import List, Dict
from pathlib import Path
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
class Gcal():
def __init__(self):
self.service = self._get_calendar_service()
self.calendar_id = None
def _find_credentials_filename(self):
return list(Path('.').glob('client_secret*.json'))[0]
def _get_calendar_service(self):
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
self._find_credentials_filename(), SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('calendar', 'v3', credentials=creds)
def get_calendars(self):
calendars_result = self.service.calendarList().list().execute()
calendars = calendars_result.get('items', [])
res = []
for calendar in calendars:
summary = calendar['summary']
id = calendar['id']
primary = calendar.get('primary')
res.append((id, summary, primary))
return res
def list_calendars(self):
print('Getting list of calendars')
calendars = self.get_calendars()
if not calendars:
print('No calendars found.')
for id, summary, primary in calendars:
primary = "Primary" if primary else ""
print(f"{summary}\t{id}\t{primary}")
def select_calendar(self, calendar_id: str):
self.calendar_id = calendar_id
def create_event(self, body: Dict[str, object]):
event_result = self.service.events().insert(calendarId=self.calendar_id, body=body).execute()
print(f"created event: {event_result['id']=}, {event_result['start']=}, {event_result['description']=}")
def list_events(self) -> List[EventResponse]:
#now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
now = "2020-01-01T00:00:00.000001Z"
events = []
has_next = True
page_token = None
while has_next:
events_result = self.service.events().list(calendarId=self.calendar_id, timeMin=now,
singleEvents=True,
orderBy='startTime', pageToken=page_token,
privateExtendedProperty="tag=generated-birthday-event").execute()
events.extend(parse_obj_as(List[EventResponse], events_result.get('items', [])))
page_token = events_result.get('nextPageToken', "")
has_next = (page_token != "")
return events
def del_event(self, event_id: str) -> None:
try:
self.service.events().delete(
calendarId=self.calendar_id,
eventId=event_id,
).execute()
except googleapiclient.errors.HttpError:
print("Failed to delete event")
return
#print("Event deleted")
| 38.495327 | 114 | 0.595047 |
bac33723c00df6463daa9ec5d8ecb411a2c9290a | 12,553 | py | Python | design_baselines/cma_es/relabel_experiments.py | young-geng/design-baselines-icml | ce4183babac304ed5f83151a602d75de6c4740c4 | [
"MIT"
] | 28 | 2020-06-30T00:40:33.000Z | 2022-03-22T05:16:27.000Z | design_baselines/cma_es/relabel_experiments.py | young-geng/design-baselines-icml | ce4183babac304ed5f83151a602d75de6c4740c4 | [
"MIT"
] | 5 | 2020-09-17T07:15:52.000Z | 2022-03-31T23:33:27.000Z | design_baselines/cma_es/relabel_experiments.py | young-geng/design-baselines-icml | ce4183babac304ed5f83151a602d75de6c4740c4 | [
"MIT"
] | 3 | 2020-09-17T06:55:53.000Z | 2021-08-09T20:27:26.000Z | from ray import tune
import click
import ray
import os
@click.group()
def cli():
"""A group of experiments for training Conservative Score Models
and reproducing our ICLR 2021 results.
"""
#############
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-dkitty')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def dkitty(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on DKittyMorphology-Exact-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "DKittyMorphology-Exact-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-ant')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def ant(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on AntMorphology-Exact-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "AntMorphology-Exact-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-hopper')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def hopper(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on HopperController-Exact-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "HopperController-Exact-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-superconductor')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def superconductor(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on Superconductor-RandomForest-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "Superconductor-RandomForest-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-chembl')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def chembl(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on ChEMBL-ResNet-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": False,
"task": "ChEMBL-ResNet-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"use_vae": True,
"vae_beta": 0.01,
"vae_epochs": 20,
"vae_batch_size": 128,
"vae_hidden_size": 64,
"vae_latent_size": 256,
"vae_activation": "relu",
"vae_kernel_size": 3,
"vae_num_blocks": 4,
"vae_lr": 0.001,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 50,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-gfp')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def gfp(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on GFP-Transformer-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "GFP-Transformer-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"use_vae": False,
"vae_beta": 0.01,
"vae_epochs": 20,
"vae_batch_size": 128,
"vae_hidden_size": 64,
"vae_latent_size": 256,
"vae_activation": "relu",
"vae_kernel_size": 3,
"vae_num_blocks": 4,
"vae_lr": 0.001,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-tf-bind-8')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def tf_bind_8(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on TFBind8-Exact-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "TFBind8-Exact-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"use_vae": False,
"vae_beta": 0.01,
"vae_epochs": 20,
"vae_batch_size": 128,
"vae_hidden_size": 64,
"vae_latent_size": 256,
"vae_activation": "relu",
"vae_kernel_size": 3,
"vae_num_blocks": 3,
"vae_lr": 0.001,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
@cli.command()
@click.option('--local-dir', type=str, default='cma-es-utr')
@click.option('--cpus', type=int, default=24)
@click.option('--gpus', type=int, default=1)
@click.option('--num-parallel', type=int, default=1)
@click.option('--num-samples', type=int, default=1)
def utr(local_dir, cpus, gpus, num_parallel, num_samples):
"""Evaluate CMA-ES on UTR-ResNet-v0
"""
# Final Version
from design_baselines.cma_es import cma_es
ray.init(num_cpus=cpus,
num_gpus=gpus,
include_dashboard=False,
_temp_dir=os.path.expanduser('~/tmp'))
tune.run(cma_es, config={
"logging_dir": "data",
"normalize_ys": True,
"normalize_xs": True,
"task": "UTR-ResNet-v0",
"task_kwargs": {"relabel": True},
"bootstraps": 5,
"val_size": 200,
"optimize_ground_truth": False,
"use_vae": False,
"vae_beta": 0.01,
"vae_epochs": 20,
"vae_batch_size": 128,
"vae_hidden_size": 64,
"vae_latent_size": 256,
"vae_activation": "relu",
"vae_kernel_size": 3,
"vae_num_blocks": 4,
"vae_lr": 0.001,
"ensemble_batch_size": 100,
"hidden_size": 256,
"num_layers": 1,
"initial_max_std": 0.2,
"initial_min_std": 0.1,
"ensemble_lr": 0.001,
"ensemble_epochs": 100,
"cma_max_iterations": 100,
"cma_sigma": 0.5,
"solver_samples": 128},
num_samples=num_samples,
local_dir=local_dir,
resources_per_trial={'cpu': cpus // num_parallel,
'gpu': gpus / num_parallel - 0.01})
| 32.022959 | 71 | 0.582889 |
a3a9b21b037a7735cb67a7cf2c803facb0c85819 | 4,918 | py | Python | tests/approaches/test_random_options_approach.py | Learning-and-Intelligent-Systems/predicators | 0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e | [
"MIT"
] | 24 | 2021-11-20T16:35:41.000Z | 2022-03-30T03:49:52.000Z | tests/approaches/test_random_options_approach.py | Learning-and-Intelligent-Systems/predicators | 0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e | [
"MIT"
] | 214 | 2021-10-12T01:17:50.000Z | 2022-03-31T20:18:36.000Z | tests/approaches/test_random_options_approach.py | Learning-and-Intelligent-Systems/predicators | 0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e | [
"MIT"
] | 1 | 2022-02-15T20:24:17.000Z | 2022-02-15T20:24:17.000Z | """Test cases for the random options approach class."""
from gym.spaces import Box
from predicators.src import utils
from predicators.src.approaches.random_options_approach import \
RandomOptionsApproach
from predicators.src.structs import Action, DefaultState, \
ParameterizedOption, Predicate, State, Task, Type
def test_random_options_approach():
"""Tests for RandomOptionsApproach class."""
utils.reset_config({"env": "cover"})
cup_type = Type("cup_type", ["feat1"])
cup = cup_type("cup")
state = State({cup: [0.5]})
def _simulator(s, a):
ns = s.copy()
assert a.arr.shape == (1, )
ns[cup][0] += a.arr.item()
return ns
params_space = Box(0, 1, (1, ))
def _policy(_1, _2, _3, p):
return Action(p)
def _initiable(_1, _2, _3, p):
return p > 0.25
def _terminal(s, _1, _2, _3):
return s[cup][0] > 9.9
parameterized_option = ParameterizedOption("Move", [], params_space,
_policy, _initiable, _terminal)
def _solved_classifier(s, o):
return s[o[0]][0] > 7.5
Solved = Predicate("Solved", [cup_type], _solved_classifier)
task = Task(state, {Solved([cup])})
approach = RandomOptionsApproach({Solved}, {parameterized_option},
{cup_type}, params_space, [task])
assert not approach.is_learning_based
policy = approach.solve(task, 500)
solved = False
act_var = None
for _ in range(10):
act = policy(state)
assert act.has_option()
if act_var is None:
act_var = act.arr.item()
else:
# RandomOptionsApproach should use the same option all the way
# to the end of the execution when the task is solved, so the
# parameter should always be the same.
assert abs(act_var - act.arr.item()) < 1e-3
state = _simulator(state, act)
if task.goal.issubset(utils.abstract(state, {Solved})):
solved = True
break
assert solved
# Test what happens when there's no initializable option.
parameterized_option2 = ParameterizedOption("Move", [], params_space,
_policy,
lambda _1, _2, _3, _4: False,
_terminal)
task = Task(state, {Solved([cup])})
approach = RandomOptionsApproach({Solved}, {parameterized_option2},
{cup_type}, params_space, task)
policy = approach.solve(task, 500)
act = policy(state)
assert not act.has_option() # should have fallen back to random action
# Test what happens when there's no object of the right type.
parameterized_option3 = ParameterizedOption("Move", [cup_type],
params_space, _policy,
lambda _1, _2, _3, _4: False,
_terminal)
task = Task(state, {Solved([cup])})
approach = RandomOptionsApproach({Solved}, {parameterized_option3},
{cup_type}, params_space, task)
policy = approach.solve(task, 500)
act = policy(DefaultState)
assert not act.has_option() # should have fallen back to random action
# Test what happens when the option is always terminal.
parameterized_option4 = ParameterizedOption("Move", [], params_space,
_policy, _initiable,
lambda _1, _2, _3, _4: True)
task = Task(state, {Solved([cup])})
approach = RandomOptionsApproach({Solved}, {parameterized_option4},
{cup_type}, params_space, [task])
policy = approach.solve(task, 500)
act_var = None
actions = []
for _ in range(10):
act = policy(state)
actions.append(act)
assert act.has_option()
if act_var is None:
act_var = act.arr.item()
else:
# RandomOptionsApproach should use different options on each step.
assert abs(act_var - act.arr.item()) > 1e-3
act_var = act.arr.item()
state = _simulator(state, act)
# Test reproducibility
assert str(actions) == "[Action(_arr=array([0.70787615], dtype=float32)), Action(_arr=array([0.3698764], dtype=float32)), Action(_arr=array([0.29010695], dtype=float32)), Action(_arr=array([0.9975787], dtype=float32)), Action(_arr=array([0.9942262], dtype=float32)), Action(_arr=array([0.98252517], dtype=float32)), Action(_arr=array([0.55868745], dtype=float32)), Action(_arr=array([0.68523175], dtype=float32)), Action(_arr=array([0.99104315], dtype=float32)), Action(_arr=array([0.8620031], dtype=float32))]" # pylint: disable=line-too-long
| 44.709091 | 548 | 0.580724 |
b7d24f1ba810cf673dd976b6ca446d3f7a770fd9 | 1,876 | py | Python | scripts/code_tutorials/predict_EL.py | mickvanhulst-TomTom/REL | 2c62807cf11f8f4b97ccf730c83b9758211a012e | [
"MIT"
] | null | null | null | scripts/code_tutorials/predict_EL.py | mickvanhulst-TomTom/REL | 2c62807cf11f8f4b97ccf730c83b9758211a012e | [
"MIT"
] | null | null | null | scripts/code_tutorials/predict_EL.py | mickvanhulst-TomTom/REL | 2c62807cf11f8f4b97ccf730c83b9758211a012e | [
"MIT"
] | null | null | null | from REL.entity_disambiguation import EntityDisambiguation
from REL.mention_detection import MentionDetection
from REL.ner import Cmns, load_flair_ner
from REL.utils import process_results
def example_preprocessing():
# Example splitting, should be of format {doc_1: {sent_idx: [sentence, []]}, .... }}
text = """Obama will visit Germany. And have a meeting with Merkel tomorrow.
Obama will visit Germany. And have a meeting with Merkel tomorrow. Go all the way or blah blah Charles Bukowski."""
spans = [] # [(0, 5), (17, 7), (50, 6)]
processed = {"test_doc": [text, spans], "test_doc2": [text, spans]}
return processed
base_url = "/users/vanhulsm/Desktop/projects/data/"
wiki_version = "wiki_2014"
# 1. Input sentences when using Flair.
input_text = example_preprocessing()
# For Mention detection two options.
# 2. Mention detection, we used the NER tagger, user can also use his/her own mention detection module.
mention_detection = MentionDetection(base_url, wiki_version)
# 2. Alternatively. if you want to use your own MD system (or ngram detection),
# the required input is: {doc_name: [text, spans] ... }.
mentions_dataset, n_mentions = mention_detection.format_spans(input_text)
# 2. Alternative MD module is using an n-gram tagger.
tagger_ner = load_flair_ner("ner-fast")
# tagger_ngram = Cmns(base_url, wiki_version, n=5)
mentions_dataset, n_mentions = mention_detection.find_mentions(input_text, tagger_ner)
# 3. Load model.
config = {
"mode": "eval",
"model_path": "{}/{}/generated/model".format(base_url, wiki_version),
}
model = EntityDisambiguation(base_url, wiki_version, config)
# 4. Entity disambiguation.
predictions, timing = model.predict(mentions_dataset)
# 5. Optionally use our function to get results in a usable format.
result = process_results(mentions_dataset, predictions, input_text)
print(result)
| 37.52 | 119 | 0.747868 |
93b318e898fcf9cf008c4cc2f7bcfd96ee6e2316 | 1,585 | py | Python | tests/test_utils/test_util_plots.py | moonson619/AI4Water-1 | 285d46824502b6a787e42570b72432f4f6acf45e | [
"MIT"
] | 17 | 2021-05-21T13:01:52.000Z | 2022-03-19T15:17:10.000Z | tests/test_utils/test_util_plots.py | moonson619/AI4Water-1 | 285d46824502b6a787e42570b72432f4f6acf45e | [
"MIT"
] | 3 | 2021-10-31T22:40:28.000Z | 2021-11-08T02:28:35.000Z | tests/test_utils/test_util_plots.py | moonson619/AI4Water-1 | 285d46824502b6a787e42570b72432f4f6acf45e | [
"MIT"
] | 7 | 2021-08-06T07:27:50.000Z | 2022-01-26T00:38:32.000Z |
import unittest
import numpy as np
import matplotlib.pyplot as plt
from ai4water.utils.visualizations import murphy_diagram, fdc_plot
class TestMurphyDiagrams(unittest.TestCase):
y = np.random.randint(1, 1000, 100)
f1 = np.random.randint(1, 1000, 100)
f2 = np.random.randint(1, 1000, 100)
def test_basic(self):
ax = murphy_diagram(self.y, self.f1, self.f2, show=False)
assert isinstance(ax, plt.Axes)
return
def test_basic_diff(self):
murphy_diagram(self.y, self.f1, self.f2, plot_type="diff", show=False)
return
def test_raise_error(self):
self.assertRaises(AssertionError, murphy_diagram,
observed=self.y,
predicted=self.f1,
reference_model="LinearRegression",
plot_type="diff")
return
def test_with_reference_model(self):
inputs = np.random.random((100, 2))
ax = murphy_diagram(self.y,
self.f1,
reference_model="LinearRegression",
inputs=inputs, plot_type="diff",
show=False)
assert isinstance(ax, plt.Axes)
return
class TestFDC(unittest.TestCase):
def test_basic(self):
simulated = np.random.random(100)
observed = np.random.random(100)
ax = fdc_plot(simulated, observed,
show=False)
assert isinstance(ax, plt.Axes)
return
if __name__ == "__main__":
unittest.main() | 26.864407 | 78 | 0.577918 |
52226fb81897960859a6fa297c62ad0f96e18ff1 | 1,228 | py | Python | docs/tools/nb_to_doc.py | Keris/yasc | eb55000e730fb2e60515a39dac82b851239a2c57 | [
"MIT"
] | 4 | 2020-05-21T06:35:48.000Z | 2021-04-12T10:49:55.000Z | docs/tools/nb_to_doc.py | Keris/yasc | eb55000e730fb2e60515a39dac82b851239a2c57 | [
"MIT"
] | null | null | null | docs/tools/nb_to_doc.py | Keris/yasc | eb55000e730fb2e60515a39dac82b851239a2c57 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
Convert empty IPython notebook to a sphinx doc page.
"""
import sys
from subprocess import check_call as sh
def convert_nb(nbname):
# Execute the notebook
sh(
[
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
"--inplace",
nbname,
]
)
# Convert to .rst for Sphinx
sh(
[
"jupyter",
"nbconvert",
"--to",
"rst",
nbname,
"--TagRemovePreprocessor.remove_cell_tags={'hide'}",
"--TagRemovePreprocessor.remove_input_tags={'hide-input'}",
"--TagRemovePreprocessor.remove_all_outputs_tags={'hide-output'}",
]
)
# Clear notebook output
sh(
[
"jupyter",
"nbconvert",
"--to",
"notebook",
"--inplace",
"--ClearOutputPreprocessor.enabled=True",
nbname,
]
)
# Touch the .rst file so it has a later modify time than the source
sh(["touch", nbname + ".rst"])
if __name__ == "__main__":
for nbname in sys.argv[1:]:
convert_nb(nbname)
| 20.466667 | 78 | 0.482085 |
899f50179fbea94df448603f7a74fc92f6f19a5d | 36,674 | py | Python | mmdet/core/anchor/anchor_generator.py | Bin-ze/Food_detection | 1c1a067f12644f2b0289e49aec4637d580722f70 | [
"Apache-2.0"
] | 4 | 2021-08-15T04:31:49.000Z | 2022-03-10T07:11:48.000Z | mmdet/core/anchor/anchor_generator.py | Bin-ze/Food_detection | 1c1a067f12644f2b0289e49aec4637d580722f70 | [
"Apache-2.0"
] | null | null | null | mmdet/core/anchor/anchor_generator.py | Bin-ze/Food_detection | 1c1a067f12644f2b0289e49aec4637d580722f70 | [
"Apache-2.0"
] | null | null | null | import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from .builder import PRIOR_GENERATORS
@PRIOR_GENERATORS.register_module()
class AnchorGenerator:
"""Standard anchor generator for 2D anchor-based detectors.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels in order (w, h).
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int] | None): The basic sizes
of anchors in multiple levels.
If None is given, strides will be used as base_sizes.
(If strides are non square, the shortest stride is taken.)
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. If a list of tuple of
float is given, they will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0 in V2.0.
Examples:
>>> from mmdet.core import AnchorGenerator
>>> self = AnchorGenerator([16], [1.], [1.], [9])
>>> all_anchors = self.grid_anchors([(2, 2)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]])]
>>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
>>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]]), \
tensor([[-9., -9., 9., 9.]])]
"""
def __init__(self,
strides,
ratios,
scales=None,
base_sizes=None,
scale_major=True,
octave_base_scale=None,
scales_per_octave=None,
centers=None,
center_offset=0.):
# check center and center_offset
if center_offset != 0:
assert centers is None, 'center cannot be set when center_offset' \
f'!=0, {centers} is given.'
if not (0 <= center_offset <= 1):
raise ValueError('center_offset should be in range [0, 1], '
f'{center_offset} is given.')
if centers is not None:
assert len(centers) == len(strides), \
'The number of strides should be the same as centers, got ' \
f'{strides} and {centers}'
# calculate base sizes of anchors
self.strides = [_pair(stride) for stride in strides]
self.base_sizes = [min(stride) for stride in self.strides
] if base_sizes is None else base_sizes
assert len(self.base_sizes) == len(self.strides), \
'The number of strides should be the same as base sizes, got ' \
f'{self.strides} and {self.base_sizes}'
# calculate scales of anchors
assert ((octave_base_scale is not None
and scales_per_octave is not None) ^ (scales is not None)), \
'scales and octave_base_scale with scales_per_octave cannot' \
' be set at the same time'
if scales is not None:
self.scales = torch.Tensor(scales)
elif octave_base_scale is not None and scales_per_octave is not None:
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
scales = octave_scales * octave_base_scale
self.scales = torch.Tensor(scales)
else:
raise ValueError('Either scales or octave_base_scale with '
'scales_per_octave should be set')
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.centers = centers
self.center_offset = center_offset
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
"""list[int]: total number of base anchors in a feature grid"""
return self.num_base_priors
@property
def num_base_priors(self):
"""list[int]: The number of priors (anchors) at a point
on the feature grid"""
return [base_anchors.size(0) for base_anchors in self.base_anchors]
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.strides)
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(
base_size,
scales=self.scales,
ratios=self.ratios,
center=center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self,
base_size,
scales,
ratios,
center=None):
"""Generate base anchors of a single level.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between between the height
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * w
y_center = self.center_offset * h
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
y_center + 0.5 * hs
]
base_anchors = torch.stack(base_anchors, dim=-1)
return base_anchors
def _meshgrid(self, x, y, row_major=True):
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool, optional): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
# use shape instead of len to keep tracing while exporting to onnx
xx = x.repeat(y.shape[0])
yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_priors(self, featmap_sizes, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
device (str): The device where the anchors will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_priors(
featmap_sizes[i], level_idx=i, device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_priors(self, featmap_size, level_idx, device='cuda'):
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
base_anchors = self.base_anchors[level_idx].to(device)
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
shift_x = torch.arange(0, feat_w, device=device) * stride_w
shift_y = torch.arange(0, feat_h, device=device) * stride_h
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def sparse_priors(self,
prior_idxs,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda'):
"""Generate sparse anchors according to the ``prior_idxs``.
Args:
prior_idxs (Tensor): The index of corresponding anchors
in the feature map.
featmap_size (tuple[int]): feature map size arrange as (h, w).
level_idx (int): The level index of corresponding feature
map.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (obj:`torch.device`): The device where the points is
located.
Returns:
Tensor: Anchor with shape (N, 4), N should be equal to
the length of ``prior_idxs``.
"""
height, width = featmap_size
num_base_anchors = self.num_base_anchors[level_idx]
base_anchor_id = prior_idxs % num_base_anchors
x = (prior_idxs //
num_base_anchors) % width * self.strides[level_idx][0]
y = (prior_idxs // width //
num_base_anchors) % height * self.strides[level_idx][1]
priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \
self.base_anchors[level_idx][base_anchor_id, :].to(device)
return priors
def grid_anchors(self, featmap_sizes, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
device (str): Device where the anchors will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
#warnings.warn('``grid_anchors`` would be deprecated soon. '
# 'Please use ``grid_priors`` ')
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_anchors(
self.base_anchors[i].to(device),
featmap_sizes[i],
self.strides[i],
device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_anchors(self,
base_anchors,
featmap_size,
stride=(16, 16),
device='cuda'):
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_anchors``.
Args:
base_anchors (torch.Tensor): The base anchors of a feature grid.
featmap_size (tuple[int]): Size of the feature maps.
stride (tuple[int], optional): Stride of the feature map in order
(w, h). Defaults to (16, 16).
device (str, optional): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
# warnings.warn(
# '``single_level_grid_anchors`` would be deprecated soon. '
# 'Please use ``single_level_grid_priors`` ')
# keep featmap_size as Tensor instead of int, so that we
# can covert to ONNX correctly
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride[0]
shift_y = torch.arange(0, feat_h, device=device) * stride[1]
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
"""Generate valid flags of anchors in multiple feature levels.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in
multiple feature levels.
pad_shape (tuple): The padded shape of the image.
device (str): Device where the anchors will be put on.
Return:
list(torch.Tensor): Valid flags of anchors in multiple levels.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = pad_shape[:2]
valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w),
(valid_feat_h, valid_feat_w),
self.num_base_anchors[i],
device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self,
featmap_size,
valid_size,
num_base_anchors,
device='cuda'):
"""Generate the valid flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps, arrange
as (h, w).
valid_size (tuple[int]): The valid size of the feature maps.
num_base_anchors (int): The number of base anchors.
device (str, optional): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(valid.size(0),
num_base_anchors).contiguous().view(-1)
return valid
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}octave_base_scale='
repr_str += f'{self.octave_base_scale},\n'
repr_str += f'{indent_str}scales_per_octave='
repr_str += f'{self.scales_per_octave},\n'
repr_str += f'{indent_str}num_levels={self.num_levels}\n'
repr_str += f'{indent_str}centers={self.centers},\n'
repr_str += f'{indent_str}center_offset={self.center_offset})'
return repr_str
@PRIOR_GENERATORS.register_module()
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
min_sizes (list[float]): The list of minimum anchor sizes on each
level.
max_sizes (list[float]): The list of maximum anchor sizes on each
level.
basesize_ratio_range (tuple(float)): Ratio range of anchors. Being
used when not setting min_sizes and max_sizes.
input_size (int): Size of feature map, 300 for SSD300, 512 for
SSD512. Being used when not setting min_sizes and max_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
min_sizes=None,
max_sizes=None,
basesize_ratio_range=(0.15, 0.9),
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert not (min_sizes is None) ^ (max_sizes is None)
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
if min_sizes is None and max_sizes is None:
# use hard code to generate SSD anchors
self.input_size = input_size
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
raise ValueError(
'basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
raise ValueError(
'When not setting min_sizes and max_sizes,'
'basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
raise ValueError(
'Only support 300 or 512 in SSDAnchorGenerator when '
'not setting min_sizes and max_sizes, '
f'got {self.input_size}.')
assert len(min_sizes) == len(max_sizes) == len(strides)
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
@PRIOR_GENERATORS.register_module()
class LegacyAnchorGenerator(AnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
Note:
Difference to the V2.0 anchor generator:
1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
2. The width/height are minused by 1 when calculating the anchors' \
centers and corners to meet the V1.x coordinate system.
3. The anchors' corners are quantized.
Args:
strides (list[int] | list[tuple[int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int]): The basic sizes of anchors in multiple levels.
If None is given, strides will be used to generate base_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. It a list of float
is given, this list will be used to shift the centers of anchors.
center_offset (float): The offset of center in propotion to anchors'
width and height. By default it is 0.5 in V2.0 but it should be 0.5
in v1.x models.
Examples:
>>> from mmdet.core import LegacyAnchorGenerator
>>> self = LegacyAnchorGenerator(
>>> [16], [1.], [1.], [9], center_offset=0.5)
>>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
>>> print(all_anchors)
[tensor([[ 0., 0., 8., 8.],
[16., 0., 24., 8.],
[ 0., 16., 8., 24.],
[16., 16., 24., 24.]])]
"""
def gen_single_level_base_anchors(self,
base_size,
scales,
ratios,
center=None):
"""Generate base anchors of a single level.
Note:
The width/height of anchors are minused by 1 when calculating \
the centers and corners to meet the V1.x coordinate system.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between between the height.
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature map.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * (w - 1)
y_center = self.center_offset * (h - 1)
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
]
base_anchors = torch.stack(base_anchors, dim=-1).round()
return base_anchors
@PRIOR_GENERATORS.register_module()
class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
can be found in `LegacyAnchorGenerator`.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
super(LegacySSDAnchorGenerator, self).__init__(
strides=strides,
ratios=ratios,
basesize_ratio_range=basesize_ratio_range,
input_size=input_size,
scale_major=scale_major)
self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
for stride in strides]
self.base_anchors = self.gen_base_anchors()
@PRIOR_GENERATORS.register_module()
class YOLOAnchorGenerator(AnchorGenerator):
"""Anchor generator for YOLO.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
base_sizes (list[list[tuple[int, int]]]): The basic sizes
of anchors in multiple levels.
"""
def __init__(self, strides, base_sizes):
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.base_sizes = []
num_anchor_per_level = len(base_sizes[0])
for base_sizes_per_level in base_sizes:
assert num_anchor_per_level == len(base_sizes_per_level)
self.base_sizes.append(
[_pair(base_size) for base_size in base_sizes_per_level])
self.base_anchors = self.gen_base_anchors()
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.base_sizes)
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_sizes_per_level in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(base_sizes_per_level,
center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
"""Generate base anchors of a single level.
Args:
base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
anchors.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
x_center, y_center = center
base_anchors = []
for base_size in base_sizes_per_level:
w, h = base_size
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchor = torch.Tensor([
x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
y_center + 0.5 * h
])
base_anchors.append(base_anchor)
base_anchors = torch.stack(base_anchors, dim=0)
return base_anchors
def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
"""Generate responsible anchor flags of grid cells in multiple scales.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in multiple
feature levels.
gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
device (str): Device where the anchors will be put on.
Return:
list(torch.Tensor): responsible flags of anchors in multiple level
"""
assert self.num_levels == len(featmap_sizes)
multi_level_responsible_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
flags = self.single_level_responsible_flags(
featmap_sizes[i],
gt_bboxes,
anchor_stride,
self.num_base_anchors[i],
device=device)
multi_level_responsible_flags.append(flags)
return multi_level_responsible_flags
def single_level_responsible_flags(self,
featmap_size,
gt_bboxes,
stride,
num_base_anchors,
device='cuda'):
"""Generate the responsible flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps.
gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
stride (tuple(int)): stride of current level
num_base_anchors (int): The number of base anchors.
device (str, optional): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
# row major indexing
gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
responsible_grid = torch.zeros(
feat_h * feat_w, dtype=torch.uint8, device=device)
responsible_grid[gt_bboxes_grid_idx] = 1
responsible_grid = responsible_grid[:, None].expand(
responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
return responsible_grid
| 42.793466 | 79 | 0.570977 |
1245552c15aa7009842a797583daa87aba0a6961 | 50,276 | py | Python | allennlp/training/trainer.py | eitanhaimashiah/allennlp | 8f0baa229fe0312d140d18dc1bdb894beb2150c7 | [
"Apache-2.0"
] | null | null | null | allennlp/training/trainer.py | eitanhaimashiah/allennlp | 8f0baa229fe0312d140d18dc1bdb894beb2150c7 | [
"Apache-2.0"
] | 1 | 2018-06-08T21:12:54.000Z | 2018-06-08T21:12:54.000Z | allennlp/training/trainer.py | eitanhaimashiah/allennlp | 8f0baa229fe0312d140d18dc1bdb894beb2150c7 | [
"Apache-2.0"
] | 1 | 2018-10-22T18:52:14.000Z | 2018-10-22T18:52:14.000Z | """
A :class:`~allennlp.training.trainer.Trainer` is responsible for training a
:class:`~allennlp.models.model.Model`.
Typically you might create a configuration file specifying the model and
training parameters and then use :mod:`~allennlp.commands.train`
rather than instantiating a ``Trainer`` yourself.
"""
# pylint: disable=too-many-lines
import logging
import os
import shutil
import time
import re
import datetime
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any, Set
import torch
import torch.optim.lr_scheduler
from torch.nn.parallel import replicate, parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from tensorboardX import SummaryWriter
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import peak_memory_mb, gpu_memory_mb
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def is_sparse(tensor):
return tensor.is_sparse
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float:
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Supports sparse gradients.
Parameters
----------
parameters : ``(Iterable[torch.Tensor])``
An iterable of Tensors that will have gradients normalized.
max_norm : ``float``
The max norm of the gradients.
norm_type : ``float``
The type of the used p-norm. Can be ``'inf'`` for infinity norm.
Returns
-------
Total norm of the parameters (viewed as a single vector).
"""
# pylint: disable=invalid-name,protected-access
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
if is_sparse(p.grad):
# need to coalesce the repeated indices before finding norm
grad = p.grad.data.coalesce()
param_norm = grad._values().norm(norm_type)
else:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
if is_sparse(p.grad):
p.grad.data._values().mul_(clip_coef)
else:
p.grad.data.mul_(clip_coef)
return total_norm
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group['params']:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device())
class TensorboardWriter:
"""
Wraps a pair of ``SummaryWriter`` instances but is a no-op if they're ``None``.
Allows Tensorboard logging without always checking for Nones first.
"""
def __init__(self, train_log: SummaryWriter = None, validation_log: SummaryWriter = None) -> None:
self._train_log = train_log
self._validation_log = validation_log
@staticmethod
def _item(value: Any):
if hasattr(value, 'item'):
val = value.item()
else:
val = value
return val
def add_train_scalar(self, name: str, value: float, global_step: int) -> None:
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), global_step)
def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, global_step)
def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), global_step)
def time_to_str(timestamp: int) -> str:
"""
Convert seconds past Epoch to human readable string.
"""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
datetimestamp.year, datetimestamp.month, datetimestamp.day,
datetimestamp.hour, datetimestamp.minute, datetimestamp.second
)
def str_to_time(time_str: str) -> datetime.datetime:
"""
Convert human readable string to datetime.datetime.
"""
pieces: Any = [int(piece) for piece in time_str.split('-')]
return datetime.datetime(*pieces)
class Trainer:
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None) -> None:
"""
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
shuffle: ``bool``, optional (default=True)
Whether to shuffle the instances in the iterator or not.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``int``, optional (default = -1)
An integer specifying the CUDA device to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``PytorchLRScheduler``, optional, (default = None)
A Pytorch learning rate scheduler. The learning rate will be decayed with respect to
this schedule at the end of each epoch. If you use
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau`, this will use the ``validation_metric``
provided to determine if learning has plateaued. To support updating the learning
rate on every batch, this can optionally implement ``step_batch(batch_num_total)`` which
updates the learning rate given the batch number.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
"""
self._model = model
self._iterator = iterator
self._validation_iterator = validation_iterator
self._shuffle = shuffle
self._optimizer = optimizer
self._train_data = train_dataset
self._validation_data = validation_dataset
if patience is None: # no early stopping
if validation_dataset:
logger.warning('You provided a validation dataset but patience was set to None, '
'meaning that early stopping is disabled')
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError('{} is an invalid value for "patience": it must be a positive integer '
'or None (if you want to disable early stopping)'.format(patience))
self._patience = patience
self._num_epochs = num_epochs
self._serialization_dir = serialization_dir
self._num_serialized_models_to_keep = num_serialized_models_to_keep
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._serialized_paths: List[Any] = []
self._last_permanent_saved_checkpoint_time = time.time()
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
increase_or_decrease = validation_metric[0]
if increase_or_decrease not in ["+", "-"]:
raise ConfigurationError("Validation metrics must specify whether they should increase "
"or decrease by pre-pending the metric name with a +/-.")
self._validation_metric = validation_metric[1:]
self._validation_metric_decreases = increase_or_decrease == "-"
if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):
raise ConfigurationError("Expected an int or list for cuda_device, got {}".format(cuda_device))
if isinstance(cuda_device, list):
logger.warning(f"Multiple GPU support is experimental not recommended for use. "
"In some cases it may lead to incorrect results or undefined behavior.")
self._multiple_gpu = True
self._cuda_devices = cuda_device
# data_parallel will take care of transfering to cuda devices,
# so the iterator keeps data on CPU.
self._iterator_device = -1
else:
self._multiple_gpu = False
self._cuda_devices = [cuda_device]
self._iterator_device = cuda_device
if self._cuda_devices[0] != -1:
self._model = self._model.cuda(self._cuda_devices[0])
self._log_interval = 10 # seconds
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._log_histograms_this_batch = False
# We keep the total batch number as a class variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._last_log = 0.0 # time of last logging
if serialization_dir is not None:
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
validation_log = SummaryWriter(os.path.join(serialization_dir, "log", "validation"))
self._tensorboard = TensorboardWriter(train_log, validation_log)
else:
self._tensorboard = TensorboardWriter()
self._warned_tqdm_ignores_underscores = False
def _enable_gradient_clipping(self) -> None:
if self._grad_clipping is not None:
# Pylint is unable to tell that we're in the case that _grad_clipping is not None...
# pylint: disable=invalid-unary-operand-type
clip_function = lambda grad: grad.clamp(-self._grad_clipping, self._grad_clipping)
for parameter in self._model.parameters():
if parameter.requires_grad:
parameter.register_hook(clip_function)
def _enable_activation_logging(self) -> None:
"""
Log activations to tensorboard
"""
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure on self._log_histograms_this_batch to
# determine whether to send the activations to tensorboard,
# since we don't want them on every call.
for _, module in self._model.named_modules():
if not getattr(module, 'should_log_activations', False):
# skip it
continue
def hook(module_, inputs, outputs):
# pylint: disable=unused-argument,cell-var-from-loop
log_prefix = 'activation_histogram/{0}'.format(module_.__class__)
if self._log_histograms_this_batch:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self._tensorboard.add_train_histogram(log_name,
outputs,
self._batch_num_total)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self._tensorboard.add_train_histogram(log_name,
output,
self._batch_num_total)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self._tensorboard.add_train_histogram(log_name,
tensor,
self._batch_num_total)
else:
# skip it
pass
module.register_forward_hook(hook)
def _rescale_gradients(self) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if self._grad_norm:
parameters_to_clip = [p for p in self._model.parameters()
if p.grad is not None]
return sparse_clip_norm(parameters_to_clip, self._grad_norm)
return None
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}
def _batch_loss(self, batch: torch.Tensor, for_training: bool) -> torch.Tensor:
"""
Does a forward pass on the given batch and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = self._data_parallel(batch)
else:
output_dict = self._model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self._model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
return loss
def _get_metrics(self, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]:
"""
Gets the metrics but sets ``"loss"`` to
the total loss divided by the ``num_batches`` so that
the ``"loss"`` metric is "average loss per batch".
"""
metrics = self._model.get_metrics(reset=reset)
metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
return metrics
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
logger.info(f"Peak CPU memory usage MB: {peak_memory_mb()}")
for gpu, memory in gpu_memory_mb().items():
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self._model.train()
# Get tqdm for the training batches
train_generator = self._iterator(self._train_data,
num_epochs=1,
shuffle=self._shuffle,
cuda_device=self._iterator_device)
num_training_batches = self._iterator.get_num_batches(self._train_data)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
if self._histogram_interval is not None:
histogram_parameters = set(self._model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
for batch in train_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self._log_histograms_this_batch = self._histogram_interval is not None and (
batch_num_total % self._histogram_interval == 0)
self._optimizer.zero_grad()
loss = self._batch_loss(batch, for_training=True)
loss.backward()
train_loss += loss.item()
batch_grad_norm = self._rescale_gradients()
# This does nothing if batch_num_total is None or you are using an
# LRScheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._log_histograms_this_batch:
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self._model.named_parameters()}
self._optimizer.step()
for name, param in self._model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, )).cpu()
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7),
batch_num_total)
else:
self._optimizer.step()
# Update the description with the latest metrics
metrics = self._get_metrics(train_loss, batches_this_epoch)
description = self._description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if batch_num_total % self._summary_interval == 0:
self._parameter_and_gradient_statistics_to_tensorboard(batch_num_total, batch_grad_norm)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"], batch_num_total)
self._metrics_to_tensorboard(batch_num_total,
{"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._log_histograms_this_batch:
self._histograms_to_tensorboard(batch_num_total, histogram_parameters)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, time_to_str(int(last_save_time))), [], is_best=False
)
return self._get_metrics(train_loss, batches_this_epoch, reset=True)
def _should_stop_early(self, metric_history: List[float]) -> bool:
"""
uses patience and the validation metric to determine if training should stop early
"""
if self._patience and self._patience < len(metric_history):
# Pylint can't figure out that in this branch `self._patience` is an int.
# pylint: disable=invalid-unary-operand-type
# Is the best score in the past N epochs worse than or equal the best score overall?
if self._validation_metric_decreases:
return min(metric_history[-self._patience:]) >= min(metric_history[:-self._patience])
else:
return max(metric_history[-self._patience:]) <= max(metric_history[:-self._patience])
return False
def _parameter_and_gradient_statistics_to_tensorboard(self, # pylint: disable=invalid-name
epoch: int,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
# Log parameter values to Tensorboard
for name, param in self._model.named_parameters():
self._tensorboard.add_train_scalar("parameter_mean/" + name,
param.data.mean(),
epoch)
self._tensorboard.add_train_scalar("parameter_std/" + name, param.data.std(), epoch)
if param.grad is not None:
if is_sparse(param.grad):
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self._tensorboard.add_train_scalar("gradient_mean/" + name,
grad_data.mean(),
epoch)
self._tensorboard.add_train_scalar("gradient_std/" + name,
grad_data.std(),
epoch)
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self._tensorboard.add_train_scalar("gradient_norm",
batch_grad_norm,
epoch)
def _histograms_to_tensorboard(self, epoch: int, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in self._model.named_parameters():
if name in histogram_parameters:
self._tensorboard.add_train_histogram("parameter_histogram/" + name,
param,
epoch)
def _metrics_to_tensorboard(self,
epoch: int,
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
for name in metric_names:
train_metric = train_metrics.get(name)
if train_metric is not None:
self._tensorboard.add_train_scalar(name, train_metric, epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self._tensorboard.add_validation_scalar(name, val_metric, epoch)
def _metrics_to_console(self, # pylint: disable=no-self-use
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Logs all of the train metrics (and validation metrics, if provided) to the console.
"""
val_metrics = val_metrics or {}
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
metric_names = set(train_metrics.keys())
if val_metrics:
metric_names.update(val_metrics.keys())
name_length = max([len(x) for x in metric_names])
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in metric_names:
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name.ljust(name_length), train_metric, val_metric)
elif val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._model.eval()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self._iterator
val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False,
cuda_device=self._iterator_device)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self._batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = self._get_metrics(val_loss, batches_this_epoch)
description = self._description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter, validation_metric_per_epoch = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError("Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?")
self._enable_gradient_clipping()
self._enable_activation_logging()
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
best_epoch_val_metrics: Dict[str, float] = {}
epochs_trained = 0
training_start_time = time.time()
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = self._get_metrics(val_loss, num_batches, reset=True)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
# Check validation metric to see if it's the best so far
is_best_so_far = self._is_best_so_far(this_epoch_val_metric, validation_metric_per_epoch)
if is_best_so_far:
best_epoch_val_metrics = val_metrics.copy()
validation_metric_per_epoch.append(this_epoch_val_metric)
if self._should_stop_early(validation_metric_per_epoch):
logger.info("Ran out of patience. Stopping training.")
break
else:
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
best_epoch_val_metrics = {}
this_epoch_val_metric = None
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
for index, param_group in enumerate(self._optimizer.param_groups):
learning_rate = param_group.get("lr")
if learning_rate is not None:
self._tensorboard.add_train_scalar(
f"learning_rate/param_group{index:d}", learning_rate, epoch)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
training_elapsed_time = time.time() - training_start_time
metrics = {
"training_duration": time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time)),
"training_start_epoch": epoch_counter,
"training_epochs": epochs_trained
}
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if validation_metric_per_epoch:
# We may not have had validation data, so we need to hide this behind an if.
if self._validation_metric_decreases:
best_validation_metric = min(validation_metric_per_epoch)
else:
best_validation_metric = max(validation_metric_per_epoch)
metrics.update({f"best_validation_{k}": v for k, v in best_epoch_val_metrics.items()})
metrics['best_epoch'] = [i for i, value in enumerate(validation_metric_per_epoch)
if value == best_validation_metric][-1]
return metrics
def _is_best_so_far(self,
this_epoch_val_metric: float,
validation_metric_per_epoch: List[float]):
if not validation_metric_per_epoch:
return True
elif self._validation_metric_decreases:
return this_epoch_val_metric < min(validation_metric_per_epoch)
else:
return this_epoch_val_metric > max(validation_metric_per_epoch)
def _description_from_metrics(self, metrics: Dict[str, float]) -> str:
if (not self._warned_tqdm_ignores_underscores and
any(metric_name.startswith("_") for metric_name in metrics)):
logger.warning("Metrics with names beginning with \"_\" will "
"not be logged to the tqdm progress bar.")
self._warned_tqdm_ignores_underscores = True
return ', '.join(["%s: %.4f" % (name, value) for name, value in
metrics.items() if not name.startswith("_")]) + " ||"
def _save_checkpoint(self,
epoch: Union[int, str],
val_metric_per_epoch: List[float],
is_best: Optional[bool] = None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if self._serialization_dir is not None:
model_path = os.path.join(self._serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self._model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self._optimizer.state_dict(),
'batch_num_total': self._batch_num_total}
training_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
logger.info("Best validation performance so far. "
"Copying weights to '%s/best.th'.", self._serialization_dir)
shutil.copyfile(model_path, os.path.join(self._serialization_dir, "best.th"))
if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:
self._serialized_paths.append([time.time(), model_path, training_path])
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = save_time - self._last_permanent_saved_checkpoint_time
if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
if remove_path:
for fname in paths_to_remove[1:]:
os.remove(fname)
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (self._serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(self._serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs: Any = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(self._serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return (model_path, training_state_path)
def _restore_checkpoint(self) -> Tuple[int, List[float]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = self.find_latest_checkpoint()
if latest_checkpoint is None:
# No checkpoint to restore, start at 0
return 0, []
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=util.device_mapping(-1))
training_state = torch.load(training_state_path, map_location=util.device_mapping(-1))
self._model.load_state_dict(model_state)
self._optimizer.load_state_dict(training_state["optimizer"])
move_optimizer_to_cuda(self._optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
logger.warning("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: List[float] = []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get('batch_num_total')
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return, val_metric_per_epoch
# Requires custom from_params.
@classmethod
def from_params(cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'Trainer':
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
shuffle = params.pop_bool("shuffle", True)
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = params.pop_int("cuda_device", -1)
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
if cuda_device >= 0:
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if lr_scheduler_params:
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
scheduler = None
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
params.assert_empty(cls.__name__)
return Trainer(model, optimizer, iterator,
train_data, validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
shuffle=shuffle,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=scheduler,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval)
| 49.097656 | 114 | 0.607129 |
8d983c7c5b8c9675dae4653164da9a28df98ed17 | 2,683 | gyp | Python | syzygy/trace/rpc/rpc.gyp | nzeh/syzygy | 3573e3d458dbb4285753c28a7cb42ced739f9f55 | [
"Apache-2.0"
] | 343 | 2015-01-07T05:58:44.000Z | 2022-03-15T14:55:21.000Z | syzygy/trace/rpc/rpc.gyp | nzeh/syzygy-nzeh | 3757e53f850644721284073de318e218224dd411 | [
"Apache-2.0"
] | 61 | 2015-03-19T18:20:21.000Z | 2019-10-23T12:58:23.000Z | syzygy/trace/rpc/rpc.gyp | nzeh/syzygy-nzeh | 3757e53f850644721284073de318e218224dd411 | [
"Apache-2.0"
] | 66 | 2015-01-20T15:35:05.000Z | 2021-11-25T16:49:41.000Z | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
'chromium_code': 1,
'midl_out_dir': '<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc',
},
'target_defaults': {
'all_dependent_settings': {
'include_dirs': ['<(SHARED_INTERMEDIATE_DIR)'],
},
},
'targets': [
{
'target_name': 'call_trace_rpc_lib',
'type': 'static_library',
# Build our IDL file to the shared intermediate directory using the
# midl_rpc.gypi include (because the default rules for .idl files are
# specific to COM interfaces). This include expects the prefix and
# midl_out_dir variables to be defined.
'variables': {
'prefix': 'CallTrace',
},
'includes': ['../../build/midl_rpc.gypi'],
'sources': ['call_trace_rpc.idl'],
'dependencies': [
'<(src)/syzygy/common/rpc/rpc.gyp:common_rpc_lib',
'<(src)/syzygy/trace/protocol/protocol.gyp:protocol_lib',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc/call_trace_rpc.h',
'<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc/call_trace_rpc_c.cc',
'<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc/call_trace_rpc_s.cc',
],
'process_outputs_as_sources': 1,
# This target exports a hard dependency because it generates header files.
'hard_dependency': 1,
},
{
'target_name': 'logger_rpc_lib',
'type': 'static_library',
# Build our IDL file to the shared intermediate directory using the
# midl_rpc.gypi include (because the default rules for .idl files are
# specific to COM interfaces). This include expects the prefix and
# midl_out_dir variables to be defined.
'variables': {
'prefix': 'Logger',
},
'includes': ['../../build/midl_rpc.gypi'],
'sources': ['logger_rpc.idl'],
'dependencies': [
'<(src)/syzygy/common/rpc/rpc.gyp:common_rpc_lib',
'<(src)/syzygy/trace/protocol/protocol.gyp:protocol_lib',
],
# This target exports a hard dependency because it generates header files.
'hard_dependency': 1,
},
],
}
| 37.263889 | 80 | 0.657846 |
e18fe0209231f479c5989ac428717aad07b303b4 | 4,701 | py | Python | app.py | JohannesJolkkonen/mock-business-data-generator | 64c93d4e722bffa78aa72c1fd31ab959a2f9771f | [
"MIT"
] | null | null | null | app.py | JohannesJolkkonen/mock-business-data-generator | 64c93d4e722bffa78aa72c1fd31ab959a2f9771f | [
"MIT"
] | null | null | null | app.py | JohannesJolkkonen/mock-business-data-generator | 64c93d4e722bffa78aa72c1fd31ab959a2f9771f | [
"MIT"
] | null | null | null | import pandas as pd
import datetime
from mimesis import Generic
import random
import numpy as np
products_head = ['ID', 'Name', 'Price', 'Unit Cost', 'Manufacturer']
customers_head = ['id', 'Name', 'Address', 'City', 'Country', 'Website', 'Email', 'Phone', 'Registration Date']
staff_head = ['id', 'Name', 'Title', 'Address', 'Contract Date', 'Telephone', 'Email', 'Termination Date', 'Office', 'Salary']
sales_head = ['Tx Id', 'Customer id', 'Product ID', 'Sales Date', 'Sales Manager', 'Point of Sales', 'Quantity', 'Total Price']
def main():
generate_products(340)
generate_staff(400)
generate_customers(4000)
generate_sales('./products.csv', './customers.csv', './employees.csv', 100000)
def generate_customers(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=customers_head)
### Generate information for n customers and add them into dataframe
for i in range(n):
id = 21000 + i
name = gen.business.company()
address = gen.address.address()
city = gen.address.city()
country = gen.address.country()
web = gen.internet.home_page()
email = gen.person.email()
phone = gen.person.telephone()
registered = gen.datetime.datetime()
df.loc[i] = [id, name, address, city, country, web, email, phone, registered]
print(f'Generated customer-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./customers.csv', index=False)
def generate_products(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=products_head)
### Generate information for n products and add them into dataframe
for i in range(n):
if i % 4 == 0:
name = gen.hardware.graphics()
if i % 5 == 0:
name = gen.hardware.cpu()
else:
name = gen.hardware.phone_model()
id = name[:3].upper() + str(gen.numbers.integer_number(300,900))
price = gen.random.uniform(200,1040, 2)
unit_cost = round(gen.random.uniform(0.2,0.95,2) * price, 2)
manufacturer = name.split(' ')[0]
df.loc[i] = [id, name, price, unit_cost, manufacturer]
print(f'Generated product-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./products.csv', index=False)
def generate_staff(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=staff_head)
### Generate information for n employees and add them into dataframe
for i in range(n):
name = gen.person.full_name()
title = gen.person.occupation()
address = gen.address.address()
phone = gen.person.telephone()
office = gen.address.continent()
contract_date = gen.datetime.date(2012, 2021)
email = gen.person.email()
salary = int(np.random.normal(loc=3200, scale=1000))
if i % 4 == 0: # Lazy way to insert expired contracts into the data
contract_len = datetime.timedelta(random.randint(120,900))
termination_date = contract_date + contract_len
else:
termination_date = np.nan
df.loc[i] = [i, name, title, address, contract_date, phone, email, termination_date, office, salary]
print(f'Generated staff-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./employees.csv', index=False)
def generate_sales(products, customers, staff, n):
begin_timer = datetime.datetime.now()
### Reduce input tables to numpy arrays to make sampling faster
df = pd.DataFrame(columns=sales_head)
cust = pd.read_csv(customers).id.values
prod = pd.read_csv(products).values
staff = pd.read_csv(staff).id.values
gen = Generic('en')
### Select random customers, products and employees and generate sales events for them
for i in range(n):
cust_id = cust[np.random.choice(cust.shape[0])]
product = prod[np.random.choice(prod.shape[0])]
sales_person_id = staff[np.random.choice(staff.shape[0])]
sales_date = gen.datetime.datetime(2012, 2021)
pos = random.choice(['Webstore', 'Wholesale', 'Flagship', 'Reseller'])
qty = np.random.randint(6, 400)
total_price = qty * product[2]
df.loc[i] = [i, cust_id, product[0], sales_date, sales_person_id, pos, qty, total_price]
print(f'Generated sales-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./sales.csv', index=False)
main() | 39.504202 | 127 | 0.64348 |
694f37d612d4c46e673dc894b05a0a446190732c | 1,400 | py | Python | python/paddle/v2/framework/tests/test_top_k_op.py | AI-books/Paddle | 5b5f4f514047975ac09ec42b31e46dabf235e7dd | [
"Apache-2.0"
] | null | null | null | python/paddle/v2/framework/tests/test_top_k_op.py | AI-books/Paddle | 5b5f4f514047975ac09ec42b31e46dabf235e7dd | [
"Apache-2.0"
] | null | null | null | python/paddle/v2/framework/tests/test_top_k_op.py | AI-books/Paddle | 5b5f4f514047975ac09ec42b31e46dabf235e7dd | [
"Apache-2.0"
] | 1 | 2020-06-04T04:27:15.000Z | 2020-06-04T04:27:15.000Z | import unittest
import numpy as np
from op_test import OpTest
class TestTopkOp(OpTest):
def setUp(self):
self.op_type = "top_k"
k = 1
input = np.random.random((32, 84)).astype("float32")
output = np.ndarray((32, k))
indices = np.ndarray((32, k))
self.inputs = {'X': input}
self.attrs = {'k': k}
for rowid in xrange(32):
row = input[rowid]
output[rowid] = np.sort(row)[-k:]
indices[rowid] = row.argsort()[-k:]
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
self.check_output()
class TestTopkOp3d(OpTest):
def setUp(self):
self.op_type = "top_k"
k = 1
input = np.random.random((32, 2, 84)).astype("float32")
input_flat_2d = input.reshape(64, 84)
output = np.ndarray((64, k))
indices = np.ndarray((64, k)).astype("int")
# FIXME: should use 'X': input for a 3d input
self.inputs = {'X': input_flat_2d}
self.attrs = {'k': k}
for rowid in xrange(64):
row = input_flat_2d[rowid]
output[rowid] = np.sort(row)[-k:]
indices[rowid] = row.argsort()[-k:]
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
| 25.925926 | 63 | 0.55 |
6a835d3b001efaa29e71486e6b86d6597cd63fa4 | 670 | py | Python | angel-ps/python/pyangel/ml/regression/__init__.py | weien8899/angel | 829ce1a02e147d1f93b6375c2d07208ea31e53a2 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-11-24T07:40:30.000Z | 2017-11-24T07:40:30.000Z | angel-ps/python/pyangel/ml/regression/__init__.py | weien8899/angel | 829ce1a02e147d1f93b6375c2d07208ea31e53a2 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | angel-ps/python/pyangel/ml/regression/__init__.py | weien8899/angel | 829ce1a02e147d1f93b6375c2d07208ea31e53a2 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-09-28T00:31:04.000Z | 2018-09-28T00:31:04.000Z | #
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
#
| 41.875 | 102 | 0.770149 |
0bc8c93bdb51323da52274bd9d27a91524604558 | 66,517 | py | Python | src/models/sklearnwrapper.py | softsys4ai/athena | 26f60aaf431fa8e9c9f5498ed52cdf10cfd7acdd | [
"MIT"
] | 38 | 2020-01-03T19:25:54.000Z | 2022-01-26T09:22:01.000Z | src/models/sklearnwrapper.py | softsys4ai/athena | 26f60aaf431fa8e9c9f5498ed52cdf10cfd7acdd | [
"MIT"
] | 11 | 2020-01-06T19:31:50.000Z | 2021-09-18T05:32:48.000Z | src/models/sklearnwrapper.py | softsys4ai/athena | 26f60aaf431fa8e9c9f5498ed52cdf10cfd7acdd | [
"MIT"
] | 10 | 2020-01-07T00:50:10.000Z | 2022-03-21T09:45:23.000Z | """
Implement weak defense model for Athena on top of IBM Trusted-AI ART 1.2.0.
@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the classifiers for scikit-learn models.
"""
import logging
import importlib
import numpy as np
from art.classifiers.classifier import Classifier, ClassifierGradients, ClassifierDecisionTree
from art.utils import to_categorical
from models.image_processor import transform
logger = logging.getLogger(__name__)
# pylint: disable=C0103
def WeakDefense(
model, trans_configs, image_shape, clip_values=(0., 1.),
preprocessing_defences=None, postprocessing_defences=None,
preprocessing=(0, 1)
):
"""
Create a `Classifier` instance from a scikit-learn Classifier model. This is a convenience function that
instantiates the correct wrapper class for the given scikit-learn model.
:param model: scikit-learn Classifier model.
:type model: `sklearn.base.BaseEstimator`
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:type clip_values: `tuple`
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
:type preprocessing: `tuple`
"""
if model.__class__.__module__.split(".")[0] != "sklearn":
raise TypeError("Model is not an sklearn model. Received '%s'" % model.__class__)
sklearn_name = model.__class__.__name__
module = importlib.import_module("models.sklearnwrapper")
if hasattr(module, "Scikitlearn%s" % sklearn_name):
return getattr(module, "Scikitlearn%s" % sklearn_name)(
model=model,
trans_configs=trans_configs,
image_shape=image_shape,
clip_values=clip_values,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
# This basic class at least generically handles `fit`, `predict` and `save`
return ScikitlearnClassifier(model, trans_configs, image_shape, clip_values,
preprocessing_defences, postprocessing_defences,
preprocessing)
class ScikitlearnClassifier(Classifier):
"""
Wrapper class for scikit-learn classifier models.
"""
def __init__(
self,
model,
trans_configs,
image_shape,
clip_values=None,
preprocessing_defences=None,
postprocessing_defences=None,
preprocessing=(0, 1),
):
"""
Create a `Classifier` instance from a scikit-learn classifier model.
:param model: scikit-learn classifier model.
:type model: `sklearn.base.BaseEstimator`
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:type clip_values: `tuple`
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
:type preprocessing: `tuple`
"""
super(ScikitlearnClassifier, self).__init__(
clip_values=clip_values,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self.classifier = model
self._trans_configs = trans_configs
self._image_shape = image_shape
self._input_shape = self._get_input_shape(model)
self._nb_classes = self._get_nb_classes()
self._num_queries = 0
def fit(self, x, y, **kwargs):
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:param kwargs: Dictionary of framework-specific arguments. These should be parameters supported by the
`fit` function in `sklearn` classifier and will be passed to this function as such.
:type kwargs: `dict`
:return: `None`
"""
# Apply transformation
_input_shape = self._get_input_shape(self.classifier)
if x.shape == _input_shape:
x = x.reshape((-1, self._image_shape[0], self._image_shape[1], self._image_shape[2]))
x_preprocessed = transform(x, self._trans_configs)
x_preprocessed = x_preprocessed.reshape(self._get_input_shape(self.classifier))
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x_preprocessed, y, fit=True)
y_preprocessed = np.argmax(y_preprocessed, axis=1)
self.classifier.fit(x_preprocessed, y_preprocessed, **kwargs)
self._input_shape = self._get_input_shape(self.classifier)
self._nb_classes = self._get_nb_classes()
def predict(self, x, **kwargs):
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:type x: `np.ndarray`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
:rtype: `np.ndarray`
"""
# Apply transformation
_num_examples = x.shape[0]
if len(x.shape) <= 2:
x = x.reshape((-1, self._image_shape[0], self._image_shape[1], self._image_shape[2]))
x_preprocessed = transform(x, self._trans_configs)
_input_shape = [i for i in self._input_shape]
_input_shape = tuple([_num_examples] + _input_shape)
x_preprocessed = x_preprocessed.reshape(_input_shape)
# Apply defences
x_preprocessed, _ = self._apply_preprocessing(x_preprocessed, y=None, fit=False)
if hasattr(self.classifier, "predict_proba") and callable(getattr(self.classifier, "predict_proba")):
y_pred = self.classifier.predict_proba(x_preprocessed)
elif hasattr(self.classifier, "predict") and callable(getattr(self.classifier, "predict")):
y_pred = to_categorical(self.classifier.predict(x_preprocessed), nb_classes=self.classifier.classes_.shape[0])
else:
raise ValueError("The provided model does not have methods `predict_proba` or `predict`.")
# Apply postprocessing
predictions = self._apply_postprocessing(preds=y_pred, fit=False)
# Increase the number of model queries
self._num_queries += x.shape[0]
return predictions
def nb_classes(self):
"""
Return the number of output classes.
:return: Number of classes in the data.
:rtype: `int` or `None`
"""
if hasattr(self.classifier, "n_classes_"):
_nb_classes = self.classifier.n_classes_
else:
_nb_classes = None
return _nb_classes
def save(self, filename, path=None):
import pickle
with open(filename + ".pickle", "wb") as file_pickle:
pickle.dump(self.classifier, file=file_pickle)
def reset_model_queries(self):
"""
Set the number of model queries to 0
"""
self._num_queries = 0
@property
def num_queries(self):
"""
Return the number of model queries
"""
return self._num_queries
def _get_input_shape(self, model):
if hasattr(model, "n_features_"):
_input_shape = (model.n_features_,)
elif hasattr(model, "feature_importances_"):
_input_shape = (len(model.feature_importances_),)
elif hasattr(model, "coef_"):
if len(model.coef_.shape) == 1:
_input_shape = (model.coef_.shape[0],)
else:
_input_shape = (model.coef_.shape[1],)
elif hasattr(model, "support_vectors_"):
_input_shape = (model.support_vectors_.shape[1],)
elif hasattr(model, "steps"):
_input_shape = self._get_input_shape(model.steps[0][1])
else:
logger.warning("Input shape not recognised. The model might not have been fitted.")
_input_shape = None
return _input_shape
def _get_nb_classes(self):
if hasattr(self.classifier, "n_classes_"):
_nb_classes = self.classifier.n_classes_
elif hasattr(self.classifier, "classes_"):
_nb_classes = self.classifier.classes_.shape[0]
else:
logger.warning("Number of classes not recognised. The model might not have been fitted.")
_nb_classes = None
return _nb_classes
#
# class ScikitlearnDecisionTreeClassifier(ScikitlearnClassifier):
# """
# Wrapper class for scikit-learn Decision Tree Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Decision Tree Classifier model.
#
# :param model: scikit-learn Decision Tree Classifier model.
# :type model: `sklearn.tree.DecisionTreeClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.tree import DecisionTreeClassifier
#
# if not isinstance(model, DecisionTreeClassifier) and model is not None:
# raise TypeError("Model must be of type sklearn.tree.DecisionTreeClassifier.")
#
# super(ScikitlearnDecisionTreeClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
# def get_classes_at_node(self, node_id):
# """
# Returns the classification for a given node.
#
# :return: major class in node.
# :rtype: `float`
# """
# return np.argmax(self.classifier.tree_.value[node_id])
#
# def get_threshold_at_node(self, node_id):
# """
# Returns the threshold of given id for a node.
#
# :return: threshold value of feature split in this node.
# :rtype: `float`
# """
# return self.classifier.tree_.threshold[node_id]
#
# def get_feature_at_node(self, node_id):
# """
# Returns the feature of given id for a node.
#
# :return: feature index of feature split in this node.
# :rtype: `int`
# """
# return self.classifier.tree_.feature[node_id]
#
# def get_left_child(self, node_id):
# """
# Returns the id of the left child node of node_id.
#
# :return: the indices of the left child in the tree.
# :rtype: `int`
# """
# return self.classifier.tree_.children_left[node_id]
#
# def get_right_child(self, node_id):
# """
# Returns the id of the right child node of node_id.
#
# :return: the indices of the right child in the tree.
# :rtype: `int`
# """
# return self.classifier.tree_.children_right[node_id]
#
# def get_decision_path(self, x):
# """
# Returns the path through nodes in the tree when classifying x. Last one is leaf, first one root node.
#
# :return: the indices of the nodes in the array structure of the tree.
# :rtype: `np.ndarray`
# """
# if len(np.shape(x)) == 1:
# return self.classifier.decision_path(x.reshape(1, -1)).indices
#
# return self.classifier.decision_path(x).indices
#
# def get_values_at_node(self, node_id):
# """
# Returns the feature of given id for a node.
#
# :return: Normalized values at node node_id.
# :rtype: `nd.array`
# """
# return self.classifier.tree_.value[node_id] / np.linalg.norm(self.classifier.tree_.value[node_id])
#
# def _get_leaf_nodes(self, node_id, i_tree, class_label, box):
# from copy import deepcopy
# from art.metrics.verification_decisions_trees import LeafNode, Box, Interval
#
# leaf_nodes = list()
#
# if self.get_left_child(node_id) != self.get_right_child(node_id):
#
# node_left = self.get_left_child(node_id)
# node_right = self.get_right_child(node_id)
#
# box_left = deepcopy(box)
# box_right = deepcopy(box)
#
# feature = self.get_feature_at_node(node_id)
# box_split_left = Box(intervals={feature: Interval(-np.inf, self.get_threshold_at_node(node_id))})
# box_split_right = Box(intervals={feature: Interval(self.get_threshold_at_node(node_id), np.inf)})
#
# if box.intervals:
# box_left.intersect_with_box(box_split_left)
# box_right.intersect_with_box(box_split_right)
# else:
# box_left = box_split_left
# box_right = box_split_right
#
# leaf_nodes += self._get_leaf_nodes(node_left, i_tree, class_label, box_left)
# leaf_nodes += self._get_leaf_nodes(node_right, i_tree, class_label, box_right)
#
# else:
# leaf_nodes.append(
# LeafNode(
# tree_id=i_tree,
# class_label=class_label,
# node_id=node_id,
# box=box,
# value=self.get_values_at_node(node_id)[0, class_label],
# )
# )
#
# return leaf_nodes
#
#
# class ScikitlearnDecisionTreeRegressor(ScikitlearnDecisionTreeClassifier):
# """
# Wrapper class for scikit-learn Decision Tree Regressor models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Regressor` instance from a scikit-learn Decision Tree Regressor model.
#
# :param model: scikit-learn Decision Tree Regressor model.
# :type model: `sklearn.tree.DecisionTreeRegressor`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.tree import DecisionTreeRegressor
#
# if not isinstance(model, DecisionTreeRegressor):
# raise TypeError("Model must be of type sklearn.tree.DecisionTreeRegressor.")
#
# ScikitlearnDecisionTreeClassifier.__init__(
# self,
# model=None,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
# def get_values_at_node(self, node_id):
# """
# Returns the feature of given id for a node.
#
# :return: Normalized values at node node_id.
# :rtype: `nd.array`
# """
# return self.classifier.tree_.value[node_id]
#
# def _get_leaf_nodes(self, node_id, i_tree, class_label, box):
# from copy import deepcopy
# from art.metrics.verification_decisions_trees import LeafNode, Box, Interval
#
# leaf_nodes = list()
#
# if self.get_left_child(node_id) != self.get_right_child(node_id):
#
# node_left = self.get_left_child(node_id)
# node_right = self.get_right_child(node_id)
#
# box_left = deepcopy(box)
# box_right = deepcopy(box)
#
# feature = self.get_feature_at_node(node_id)
# box_split_left = Box(intervals={feature: Interval(-np.inf, self.get_threshold_at_node(node_id))})
# box_split_right = Box(intervals={feature: Interval(self.get_threshold_at_node(node_id), np.inf)})
#
# if box.intervals:
# box_left.intersect_with_box(box_split_left)
# box_right.intersect_with_box(box_split_right)
# else:
# box_left = box_split_left
# box_right = box_split_right
#
# leaf_nodes += self._get_leaf_nodes(node_left, i_tree, class_label, box_left)
# leaf_nodes += self._get_leaf_nodes(node_right, i_tree, class_label, box_right)
#
# else:
# leaf_nodes.append(
# LeafNode(
# tree_id=i_tree,
# class_label=class_label,
# node_id=node_id,
# box=box,
# value=self.get_values_at_node(node_id)[0, 0],
# )
# )
#
# return leaf_nodes
#
#
# class ScikitlearnExtraTreeClassifier(ScikitlearnDecisionTreeClassifier):
# """
# Wrapper class for scikit-learn Extra TreeClassifier Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Extra TreeClassifier Classifier model.
#
# :param model: scikit-learn Extra TreeClassifier Classifier model.
# :type model: `sklearn.tree.ExtraTreeClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.tree import ExtraTreeClassifier
#
# if not isinstance(model, ExtraTreeClassifier):
# raise TypeError("Model must be of type sklearn.tree.ExtraTreeClassifier.")
#
# super(ScikitlearnExtraTreeClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
#
# class ScikitlearnAdaBoostClassifier(ScikitlearnClassifier):
# """
# Wrapper class for scikit-learn AdaBoost Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn AdaBoost Classifier model.
#
# :param model: scikit-learn AdaBoost Classifier model.
# :type model: `sklearn.ensemble.AdaBoostClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.ensemble import AdaBoostClassifier
#
# if not isinstance(model, AdaBoostClassifier):
# raise TypeError("Model must be of type sklearn.ensemble.AdaBoostClassifier.")
#
# super(ScikitlearnAdaBoostClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
#
# class ScikitlearnBaggingClassifier(ScikitlearnClassifier):
# """
# Wrapper class for scikit-learn Bagging Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Bagging Classifier model.
#
# :param model: scikit-learn Bagging Classifier model.
# :type model: `sklearn.ensemble.BaggingClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.ensemble import BaggingClassifier
#
# if not isinstance(model, BaggingClassifier):
# raise TypeError("Model must be of type sklearn.ensemble.BaggingClassifier.")
#
# super(ScikitlearnBaggingClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
#
# class ScikitlearnExtraTreesClassifier(ScikitlearnClassifier, ClassifierDecisionTree):
# """
# Wrapper class for scikit-learn Extra Trees Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Extra Trees Classifier model.
#
# :param model: scikit-learn Extra Trees Classifier model.
# :type model: `sklearn.ensemble.ExtraTreesClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.ensemble import ExtraTreesClassifier
#
# if not isinstance(model, ExtraTreesClassifier):
# raise TypeError("Model must be of type sklearn.ensemble.ExtraTreesClassifier.")
#
# super(ScikitlearnExtraTreesClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
# def get_trees(self): # lgtm [py/similar-function]
# """
# Get the decision trees.
#
# :return: A list of decision trees.
# :rtype: `[Tree]`
# """
# from art.metrics.verification_decisions_trees import Box, Tree
#
# trees = list()
#
# for i_tree, decision_tree_model in enumerate(self.classifier.estimators_):
# box = Box()
#
# # if num_classes == 2:
# # class_label = -1
# # else:
# # class_label = i_tree % num_classes
#
# extra_tree_classifier = ScikitlearnExtraTreeClassifier(model=decision_tree_model)
#
# for i_class in range(self.classifier.n_classes_):
# class_label = i_class
#
# # pylint: disable=W0212
# trees.append(
# Tree(
# class_id=class_label,
# leaf_nodes=extra_tree_classifier._get_leaf_nodes(0, i_tree, class_label, box),
# )
# )
#
# return trees
#
#
# class ScikitlearnGradientBoostingClassifier(ScikitlearnClassifier, ClassifierDecisionTree):
# """
# Wrapper class for scikit-learn Gradient Boosting Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Gradient Boosting Classifier model.
#
# :param model: scikit-learn Gradient Boosting Classifier model.
# :type model: `sklearn.ensemble.GradientBoostingClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.ensemble import GradientBoostingClassifier
#
# if not isinstance(model, GradientBoostingClassifier):
# raise TypeError("Model must be of type sklearn.ensemble.GradientBoostingClassifier.")
#
# super(ScikitlearnGradientBoostingClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
# def get_trees(self):
# """
# Get the decision trees.
#
# :return: A list of decision trees.
# :rtype: `[Tree]`
# """
# from art.metrics.verification_decisions_trees import Box, Tree
#
# trees = list()
# num_trees, num_classes = self.classifier.estimators_.shape
#
# for i_tree in range(num_trees):
# box = Box()
#
# for i_class in range(num_classes):
# decision_tree_classifier = ScikitlearnDecisionTreeRegressor(
# model=self.classifier.estimators_[i_tree, i_class]
# )
#
# if num_classes == 2:
# class_label = None
# else:
# class_label = i_class
#
# # pylint: disable=W0212
# trees.append(
# Tree(
# class_id=class_label,
# leaf_nodes=decision_tree_classifier._get_leaf_nodes(0, i_tree, class_label, box),
# )
# )
#
# return trees
#
#
# class ScikitlearnRandomForestClassifier(ScikitlearnClassifier):
# """
# Wrapper class for scikit-learn Random Forest Classifier models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Random Forest Classifier model.
#
# :param model: scikit-learn Random Forest Classifier model.
# :type model: `sklearn.ensemble.RandomForestClassifier`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# # pylint: disable=E0001
# from sklearn.ensemble import RandomForestClassifier
#
# if not isinstance(model, RandomForestClassifier):
# raise TypeError("Model must be of type sklearn.ensemble.RandomForestClassifier.")
#
# super(ScikitlearnRandomForestClassifier, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
# def get_trees(self): # lgtm [py/similar-function]
# """
# Get the decision trees.
#
# :return: A list of decision trees.
# :rtype: `[Tree]`
# """
# from art.metrics.verification_decisions_trees import Box, Tree
#
# trees = list()
#
# for i_tree, decision_tree_model in enumerate(self.classifier.estimators_):
# box = Box()
#
# # if num_classes == 2:
# # class_label = -1
# # else:
# # class_label = i_tree % num_classes
#
# decision_tree_classifier = ScikitlearnDecisionTreeClassifier(model=decision_tree_model)
#
# for i_class in range(self.classifier.n_classes_):
# class_label = i_class
#
# # pylint: disable=W0212
# trees.append(
# Tree(
# class_id=class_label,
# leaf_nodes=decision_tree_classifier._get_leaf_nodes(0, i_tree, class_label, box),
# )
# )
#
# return trees
#
#
# class ScikitlearnLogisticRegression(ScikitlearnClassifier, ClassifierGradients):
# """
# Wrapper class for scikit-learn Logistic Regression models.
# """
#
# def __init__(
# self, model, clip_values=None, preprocessing_defences=None, postprocessing_defences=None, preprocessing=(0, 1)
# ):
# """
# Create a `Classifier` instance from a scikit-learn Logistic Regression model.
#
# :param model: scikit-learn LogisticRegression model
# :type model: `sklearn.linear_model.LogisticRegression`
# :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
# for features.
# :type clip_values: `tuple`
# :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
# :type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
# :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
# :type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
# :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
# used for data preprocessing. The first value will be subtracted from the input. The input will then
# be divided by the second one.
# :type preprocessing: `tuple`
# """
# super(ScikitlearnLogisticRegression, self).__init__(
# model=model,
# clip_values=clip_values,
# preprocessing_defences=preprocessing_defences,
# postprocessing_defences=postprocessing_defences,
# preprocessing=preprocessing,
# )
# self.classifier = model
#
# def nb_classes(self):
# """
# Return the number of output classes.
#
# :return: Number of classes in the data.
# :rtype: `int` or `None`
# """
# if hasattr(self.classifier, "coef_"):
# _nb_classes = self.classifier.classes_.shape[0]
# else:
# _nb_classes = None
# return _nb_classes
#
# def class_gradient(self, x, label=None, **kwargs):
# """
# Compute per-class derivatives w.r.t. `x`.
#
# | Paper link: http://cs229.stanford.edu/proj2016/report/ItkinaWu-AdversarialAttacksonImageRecognition-report.pdf
# | Typo in https://arxiv.org/abs/1605.07277 (equation 6)
#
# :param x: Sample input with shape as expected by the model.
# :type x: `np.ndarray`
# :param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
# output is computed for all samples. If multiple values as provided, the first dimension should
# match the batch size of `x`, and each value will be used as target for its corresponding sample in
# `x`. If `None`, then gradients for all classes will be computed for each sample.
# :type label: `int` or `list`
# :return: Array of gradients of input features w.r.t. each class in the form
# `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
# `(batch_size, 1, input_shape)` when `label` parameter is specified.
# :rtype: `np.ndarray`
# """
# if not hasattr(self.classifier, "coef_"):
# raise ValueError(
# """Model has not been fitted. Run function `fit(x, y)` of classifier first or provide a
# fitted model."""
# )
#
# nb_samples = x.shape[0]
#
# # Apply preprocessing
# x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
#
# y_pred = self.classifier.predict_proba(X=x_preprocessed)
# weights = self.classifier.coef_
#
# if self.nb_classes() > 2:
# w_weighted = np.matmul(y_pred, weights)
#
# def _f_class_gradient(i_class, i_sample):
# if self.nb_classes() == 2:
# return (-1.0) ** (i_class + 1.0) * y_pred[i_sample, 0] * y_pred[i_sample, 1] * weights[0, :]
#
# return weights[i_class, :] - w_weighted[i_sample, :]
#
# if label is None:
# # Compute the gradients w.r.t. all classes
# class_gradients = list()
#
# for i_class in range(self.nb_classes()):
# class_gradient = np.zeros(x.shape)
# for i_sample in range(nb_samples):
# class_gradient[i_sample, :] += _f_class_gradient(i_class, i_sample)
# class_gradients.append(class_gradient)
#
# gradients = np.swapaxes(np.array(class_gradients), 0, 1)
#
# elif isinstance(label, (int, np.integer)):
# # Compute the gradients only w.r.t. the provided label
# class_gradient = np.zeros(x.shape)
# for i_sample in range(nb_samples):
# class_gradient[i_sample, :] += _f_class_gradient(label, i_sample)
#
# gradients = np.swapaxes(np.array([class_gradient]), 0, 1)
#
# elif (
# (isinstance(label, list) and len(label) == nb_samples)
# or isinstance(label, np.ndarray)
# and label.shape == (nb_samples,)
# ):
# # For each sample, compute the gradients w.r.t. the indicated target class (possibly distinct)
# class_gradients = list()
# unique_labels = list(np.unique(label))
#
# for unique_label in unique_labels:
# class_gradient = np.zeros(x.shape)
# for i_sample in range(nb_samples):
# # class_gradient[i_sample, :] += label[i_sample, unique_label] * (weights[unique_label, :]
# # - w_weighted[i_sample, :])
# class_gradient[i_sample, :] += _f_class_gradient(unique_label, i_sample)
#
# class_gradients.append(class_gradient)
#
# gradients = np.swapaxes(np.array(class_gradients), 0, 1)
# lst = [unique_labels.index(i) for i in label]
# gradients = np.expand_dims(gradients[np.arange(len(gradients)), lst], axis=1)
#
# else:
# raise TypeError("Unrecognized type for argument `label` with type " + str(type(label)))
#
# gradients = self._apply_preprocessing_gradient(x, gradients)
#
# return gradients
#
# def loss_gradient(self, x, y, **kwargs):
# """
# Compute the gradient of the loss function w.r.t. `x`.
#
# | Paper link: http://cs229.stanford.edu/proj2016/report/ItkinaWu-AdversarialAttacksonImageRecognition-report.pdf
# | Typo in https://arxiv.org/abs/1605.07277 (equation 6)
#
# :param x: Sample input with shape as expected by the model.
# :type x: `np.ndarray`
# :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
# (nb_samples,).
# :type y: `np.ndarray`
# :return: Array of gradients of the same shape as `x`.
# :rtype: `np.ndarray`
# """
# # pylint: disable=E0001
# from sklearn.utils.class_weight import compute_class_weight
#
# if not hasattr(self.classifier, "coef_"):
# raise ValueError(
# """Model has not been fitted. Run function `fit(x, y)` of classifier first or provide a
# fitted model."""
# )
#
# # Apply preprocessing
# x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
#
# num_samples, _ = x_preprocessed.shape
# gradients = np.zeros(x_preprocessed.shape)
#
# y_index = np.argmax(y_preprocessed, axis=1)
# if self.classifier.class_weight is None or self.classifier.class_weight == "balanced":
# class_weight = np.ones(self.nb_classes())
# else:
# class_weight = compute_class_weight(
# class_weight=self.classifier.class_weight, classes=self.classifier.classes_, y=y_index
# )
#
# y_pred = self.classifier.predict_proba(X=x_preprocessed)
# weights = self.classifier.coef_
#
# # Consider the special case of a binary logistic regression model:
# if self.nb_classes() == 2:
# for i_sample in range(num_samples):
# gradients[i_sample, :] += (
# class_weight[1] * (1.0 - y_preprocessed[i_sample, 1])
# - class_weight[0] * (1.0 - y_preprocessed[i_sample, 0])
# ) * (y_pred[i_sample, 0] * y_pred[i_sample, 1] * weights[0, :])
# else:
# w_weighted = np.matmul(y_pred, weights)
#
# for i_sample in range(num_samples):
# for i_class in range(self.nb_classes()):
# gradients[i_sample, :] += (
# class_weight[i_class]
# * (1.0 - y_preprocessed[i_sample, i_class])
# * (weights[i_class, :] - w_weighted[i_sample, :])
# )
#
# gradients = self._apply_preprocessing_gradient(x, gradients)
#
# return gradients
class ScikitlearnSVC(ScikitlearnClassifier, ClassifierGradients):
"""
Wrapper class for scikit-learn C-Support Vector Classification models.
"""
def __init__(
self,
model,
trans_configs,
image_shape,
clip_values=None,
preprocessing_defences=None,
postprocessing_defences=None,
preprocessing=(0, 1)
):
"""
Create a `Classifier` instance from a scikit-learn C-Support Vector Classification model.
:param model: scikit-learn C-Support Vector Classification model.
:type model: `sklearn.svm.SVC` or `sklearn.svm.LinearSVC`
:param trans_configs: the corresponding transformation(s)
:type trans_configs: dictionary.
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:type clip_values: `tuple`
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:type preprocessing_defences: :class:`.Preprocessor` or `list(Preprocessor)` instances
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:type postprocessing_defences: :class:`.Postprocessor` or `list(Postprocessor)` instances
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
:type preprocessing: `tuple`
"""
# pylint: disable=E0001
from sklearn.svm import SVC, LinearSVC
if not isinstance(model, SVC) and not isinstance(model, LinearSVC):
raise TypeError(
"Model must be of type sklearn.svm.SVC or sklearn.svm.LinearSVC. Found type {}".format(type(model))
)
super(ScikitlearnSVC, self).__init__(
model=model,
trans_configs=trans_configs,
image_shape=image_shape,
clip_values=clip_values,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self.classifier = model
self._trans_configs = trans_configs
self._image_shape = image_shape
self._kernel = self._kernel_func()
self._num_queries = 0
def class_gradient(self, x, label=None, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:type label: `int` or `list`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
:rtype: `np.ndarray`
"""
# pylint: disable=E0001
from sklearn.svm import SVC, LinearSVC
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
num_samples, _ = x_preprocessed.shape
if isinstance(self.classifier, SVC):
if self.classifier.fit_status_:
raise AssertionError("Model has not been fitted correctly.")
support_indices = [0] + list(np.cumsum(self.classifier.n_support_))
if self.nb_classes() == 2:
sign_multiplier = -1
else:
sign_multiplier = 1
if label is None:
gradients = np.zeros((x_preprocessed.shape[0], self.nb_classes(), x_preprocessed.shape[1]))
for i_label in range(self.nb_classes()):
for i_sample in range(num_samples):
for not_label in range(self.nb_classes()):
if i_label != not_label:
if not_label < i_label:
label_multiplier = -1
else:
label_multiplier = 1
for label_sv in range(support_indices[i_label], support_indices[i_label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[
not_label if not_label < i_label else not_label - 1, label_sv
]
grad_kernel = self._get_kernel_gradient_sv(label_sv, x_preprocessed[i_sample])
gradients[i_sample, i_label] += label_multiplier * alpha_i_k_y_i * grad_kernel
for not_label_sv in range(support_indices[not_label], support_indices[not_label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[
i_label if i_label < not_label else i_label - 1, not_label_sv
]
grad_kernel = self._get_kernel_gradient_sv(not_label_sv, x_preprocessed[i_sample])
gradients[i_sample, i_label] += label_multiplier * alpha_i_k_y_i * grad_kernel
elif isinstance(label, (int, np.integer)):
gradients = np.zeros((x_preprocessed.shape[0], 1, x_preprocessed.shape[1]))
for i_sample in range(num_samples):
for not_label in range(self.nb_classes()):
if label != not_label:
if not_label < label:
label_multiplier = -1
else:
label_multiplier = 1
for label_sv in range(support_indices[label], support_indices[label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[
not_label if not_label < label else not_label - 1, label_sv
]
grad_kernel = self._get_kernel_gradient_sv(label_sv, x_preprocessed[i_sample])
gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel
for not_label_sv in range(support_indices[not_label], support_indices[not_label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[
label if label < not_label else label - 1, not_label_sv
]
grad_kernel = self._get_kernel_gradient_sv(not_label_sv, x_preprocessed[i_sample])
gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel
elif (
(isinstance(label, list) and len(label) == num_samples)
or isinstance(label, np.ndarray)
and label.shape == (num_samples,)
):
gradients = np.zeros((x_preprocessed.shape[0], 1, x_preprocessed.shape[1]))
for i_sample in range(num_samples):
for not_label in range(self.nb_classes()):
if label[i_sample] != not_label:
if not_label < label[i_sample]:
label_multiplier = -1
else:
label_multiplier = 1
for label_sv in range(
support_indices[label[i_sample]], support_indices[label[i_sample] + 1]
):
alpha_i_k_y_i = self.classifier.dual_coef_[
not_label if not_label < label[i_sample] else not_label - 1, label_sv
]
grad_kernel = self._get_kernel_gradient_sv(label_sv, x_preprocessed[i_sample])
gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel
for not_label_sv in range(support_indices[not_label], support_indices[not_label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[
label[i_sample] if label[i_sample] < not_label else label[i_sample] - 1,
not_label_sv,
]
grad_kernel = self._get_kernel_gradient_sv(not_label_sv, x_preprocessed[i_sample])
gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel
else:
raise TypeError("Unrecognized type for argument `label` with type " + str(type(label)))
gradients = self._apply_preprocessing_gradient(x, gradients * sign_multiplier)
elif isinstance(self.classifier, LinearSVC):
if label is None:
gradients = np.zeros((x_preprocessed.shape[0], self.nb_classes(), x_preprocessed.shape[1]))
for i in range(self.nb_classes()):
for i_sample in range(num_samples):
if self.nb_classes() == 2:
gradients[i_sample, i] = self.classifier.coef_[0] * (2 * i - 1)
else:
gradients[i_sample, i] = self.classifier.coef_[i]
elif isinstance(label, (int, np.integer)):
gradients = np.zeros((x_preprocessed.shape[0], 1, x_preprocessed.shape[1]))
for i_sample in range(num_samples):
if self.nb_classes() == 2:
gradients[i_sample, 0] = self.classifier.coef_[0] * (2 * label - 1)
else:
gradients[i_sample, 0] = self.classifier.coef_[label]
elif (
(isinstance(label, list) and len(label) == num_samples)
or isinstance(label, np.ndarray)
and label.shape == (num_samples,)
):
gradients = np.zeros((x_preprocessed.shape[0], 1, x_preprocessed.shape[1]))
for i_sample in range(num_samples):
if self.nb_classes() == 2:
gradients[i_sample, 0] = self.classifier.coef_[0] * (2 * label[i_sample] - 1)
else:
gradients[i_sample, 0] = self.classifier.coef_[label[i_sample]]
else:
raise TypeError("Unrecognized type for argument `label` with type " + str(type(label)))
gradients = self._apply_preprocessing_gradient(x, gradients)
return gradients
def _kernel_grad(self, sv, x_sample):
"""
Applies the kernel gradient to a support vector.
:param sv: A support vector.
:type sv: `np.ndarray`
:param x_sample: The sample the gradient is taken with respect to.
:type x_sample: `np.ndarray`
:return: the kernel gradient.
:rtype: `np.ndarray`
"""
# pylint: disable=W0212
if self.classifier.kernel == "linear":
grad = sv
elif self.classifier.kernel == "poly":
grad = (
self.classifier.degree
* (self.classifier._gamma * np.sum(x_sample * sv) + self.classifier.coef0) ** (self.classifier.degree - 1)
* sv
)
elif self.classifier.kernel == "rbf":
grad = (
2
* self.classifier._gamma
* (-1)
* np.exp(-self.classifier._gamma * np.linalg.norm(x_sample - sv, ord=2))
* (x_sample - sv)
)
elif self.classifier.kernel == "sigmoid":
raise NotImplementedError
else:
raise NotImplementedError("Loss gradients for kernel '{}' are not implemented.".format(self.classifier.kernel))
return grad
def _get_kernel_gradient_sv(self, i_sv, x_sample):
"""
Applies the kernel gradient to all of a model's support vectors.
:param i_sv: A support vector index.
:type i_sv: `int`
:param x_sample: A sample vector.
:type x_sample: `np.ndarray`
:return: The kernelized product of the vectors.
:rtype: `np.ndarray`
"""
x_i = self.classifier.support_vectors_[i_sv, :]
return self._kernel_grad(x_i, x_sample)
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
Following equation (1) with lambda=0.
| Paper link: https://pralab.diee.unica.it/sites/default/files/biggio14-svm-chapter.pdf
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
# pylint: disable=E0001
from sklearn.svm import SVC, LinearSVC
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
num_samples, _ = x_preprocessed.shape
gradients = np.zeros_like(x_preprocessed)
y_index = np.argmax(y_preprocessed, axis=1)
if isinstance(self.classifier, SVC):
if self.classifier.fit_status_:
raise AssertionError("Model has not been fitted correctly.")
if y_preprocessed.shape[1] == 2:
sign_multiplier = 1
else:
sign_multiplier = -1
i_not_label_i = None
label_multiplier = None
support_indices = [0] + list(np.cumsum(self.classifier.n_support_))
for i_sample in range(num_samples):
i_label = y_index[i_sample]
for i_not_label in range(self.nb_classes()):
if i_label != i_not_label:
if i_not_label < i_label:
i_not_label_i = i_not_label
label_multiplier = -1
elif i_not_label > i_label:
i_not_label_i = i_not_label - 1
label_multiplier = 1
for i_label_sv in range(support_indices[i_label], support_indices[i_label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[i_not_label_i, i_label_sv] * label_multiplier
grad_kernel = self._get_kernel_gradient_sv(i_label_sv, x_preprocessed[i_sample])
gradients[i_sample, :] += sign_multiplier * alpha_i_k_y_i * grad_kernel
for i_not_label_sv in range(support_indices[i_not_label], support_indices[i_not_label + 1]):
alpha_i_k_y_i = self.classifier.dual_coef_[i_not_label_i, i_not_label_sv] * label_multiplier
grad_kernel = self._get_kernel_gradient_sv(i_not_label_sv, x_preprocessed[i_sample])
gradients[i_sample, :] += sign_multiplier * alpha_i_k_y_i * grad_kernel
elif isinstance(self.classifier, LinearSVC):
for i_sample in range(num_samples):
i_label = y_index[i_sample]
if self.nb_classes() == 2:
i_label_i = 0
if i_label == 0:
label_multiplier = 1
elif i_label == 1:
label_multiplier = -1
else:
raise ValueError("Label index not recognized because it is not 0 or 1.")
else:
i_label_i = i_label
label_multiplier = -1
gradients[i_sample] = label_multiplier * self.classifier.coef_[i_label_i]
else:
raise TypeError("Model not recognized.")
gradients = self._apply_preprocessing_gradient(x, gradients)
return gradients
def _kernel_func(self):
"""
Return the function for the kernel of this SVM.
:return: A callable kernel function.
:rtype: `str` or callable
"""
# pylint: disable=E0001
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics.pairwise import polynomial_kernel, linear_kernel, rbf_kernel
if isinstance(self.classifier, LinearSVC):
kernel = "linear"
elif isinstance(self.classifier, SVC):
kernel = self.classifier.kernel
else:
raise NotImplementedError("SVM model not yet supported.")
if kernel == "linear":
kernel_func = linear_kernel
elif kernel == "poly":
kernel_func = polynomial_kernel
elif kernel == "rbf":
kernel_func = rbf_kernel
elif callable(kernel):
kernel_func = kernel
else:
raise NotImplementedError("Kernel '{}' not yet supported.".format(kernel))
return kernel_func
def q_submatrix(self, rows, cols):
"""
Returns the q submatrix of this SVM indexed by the arrays at rows and columns.
:param rows: the row vectors.
:type rows: `np.ndarray`
:param cols: the column vectors.
:type cols: `np.ndarray`
:return: a submatrix of Q.
:rtype: `np.ndarray`
"""
submatrix_shape = (rows.shape[0], cols.shape[0])
y_row = self.classifier.predict(rows)
y_col = self.classifier.predict(cols)
y_row[y_row == 0] = -1
y_col[y_col == 0] = -1
q_rc = np.zeros(submatrix_shape)
for row in range(q_rc.shape[0]):
for col in range(q_rc.shape[1]):
q_rc[row][col] = self._kernel([rows[row]], [cols[col]])[0][0] * y_row[row] * y_col[col]
return q_rc
def predict(self, x, **kwargs):
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:type x: `np.ndarray`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
:rtype: `np.ndarray`
"""
# pylint: disable=E0001
from sklearn.svm import SVC
# Apply transformation
_num_examples = x.shape[0]
if len(x.shape) <= 2:
x = x.reshape((-1, self._image_shape[0], self._image_shape[1], self._image_shape[2]))
x_preprocessed = transform(x, self._trans_configs)
_input_shape = [i for i in self._input_shape]
_input_shape = tuple([_num_examples] + _input_shape)
x_preprocessed = x_preprocessed.reshape(_input_shape)
# Apply defences
x_preprocessed, _ = self._apply_preprocessing(x_preprocessed, y=None, fit=False)
if isinstance(self.classifier, SVC) and self.classifier.probability:
y_pred = self.classifier.predict_proba(X=x_preprocessed)
else:
y_pred_label = self.classifier.predict(X=x_preprocessed)
targets = np.array(y_pred_label).reshape(-1)
one_hot_targets = np.eye(self.nb_classes())[targets]
y_pred = one_hot_targets
# increase the number of model queries
self._num_queries += x.shape[0]
return y_pred
def reset_model_queries(self):
"""
Set the number of model queries to 0
"""
self._num_queries = 0
@property
def queries(self):
"""
Return the number of model queries
"""
return self._num_queries
def nb_classes(self):
"""
Return the number of output classes.
:return: Number of classes in the data.
:rtype: `int` or `None`
"""
if hasattr(self.classifier, "classes_"):
_nb_classes = len(self.classifier.classes_)
else:
_nb_classes = None
return _nb_classes
ScikitlearnLinearSVC = ScikitlearnSVC
| 43.503597 | 123 | 0.606356 |
0665ec84c5b902d0afd5fea227ebb748721af55d | 2,182 | py | Python | src/utils/graph_utils.py | tumeteor/ST-GCN | aaf682df01c5ce6cca94b4b6d43513e87755f094 | [
"Apache-2.0"
] | 1 | 2021-01-05T21:48:02.000Z | 2021-01-05T21:48:02.000Z | src/utils/graph_utils.py | tumeteor/ST-GCN | aaf682df01c5ce6cca94b4b6d43513e87755f094 | [
"Apache-2.0"
] | null | null | null | src/utils/graph_utils.py | tumeteor/ST-GCN | aaf682df01c5ce6cca94b4b6d43513e87755f094 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
def get_berlin_graph(g, berlin_lat=52, berlin_lon=13):
"""
Get only the berlin area from JURBEY
Args:
g (JURBEY):
berlin_lat (int): the rounded Berlin lat
berlin_lon (int): the rounded Berlin lon
Returns:
"""
nodes = list(g.nodes())
for node in nodes:
coord = g.nodes[node]['data'].coord
if abs(coord.lat - berlin_lat) > 1 or abs(coord.lon - berlin_lon) > 1:
g.remove_node(node)
return g
def get_node_coord(g, node):
"""Fetch coordonates of a node using the map
Args:
g (JURBEY): The map
node (int): Node id
Returns:
(float, float): Coordinates tuple
"""
try:
from_coord = g.nodes[node]['data'].coord
from_coord_lat = from_coord.lat
from_coord_lon = from_coord.lon
except KeyError:
from_coord_lat = float('nan')
from_coord_lon = float('nan')
return from_coord_lat, from_coord_lon
def get_bounding_box(g, nb_cols, nb_rows):
nodes = list(g.nodes)
coords = [get_node_coord(g, node) for node in nodes]
(min_lat, min_lon), (max_lat, max_lon) = (min(x for x, y in coords), min(y for x, y in coords)),\
(max(x for x, y in coords), max(y for x, y in coords))
return np.linspace(min_lat, max_lat, nb_cols), np.linspace(min_lon, max_lon, nb_rows)
def get_cluster_index(g, lat_grid, lon_grid, nb_col):
"""Compute the cluster ID
Number of columns in the grid is used here to determine the ID. It first
find in which lat / lon bucket the node belongs to using
:func:`np.searchsorted` with the np.linspace buckets.
Args:
g (JURBEY): the map
lat_grid (np.linspace): Latitude buckets
lon_grid (np.linspace): Longitude buckets
Returns:
int: The cluster ID of the node
"""
nodes = list(g.nodes)
coords = [get_node_coord(g, node) for node in nodes]
tmp = pd.DataFrame(np.searchsorted(lat_grid, [coord[0] for coord in coords]))
tmp[1] = np.searchsorted(lon_grid, [coord[1] for coord in coords])
return tmp[0] * nb_col + tmp[1]
| 28.710526 | 101 | 0.623281 |
90b7043b4eef2e27eaae577e250534a4b7dc3362 | 689 | py | Python | Networks/CNNCifar.py | Soptq/Overlap-FedAvg | dbdbb77a6fc83979a710f88922fabbf540433a7e | [
"MIT"
] | 7 | 2021-06-21T16:39:29.000Z | 2022-03-17T05:11:59.000Z | Networks/CNNCifar.py | Soptq/Overlap-FedAvg | dbdbb77a6fc83979a710f88922fabbf540433a7e | [
"MIT"
] | null | null | null | Networks/CNNCifar.py | Soptq/Overlap-FedAvg | dbdbb77a6fc83979a710f88922fabbf540433a7e | [
"MIT"
] | 1 | 2021-12-23T08:20:22.000Z | 2021-12-23T08:20:22.000Z | import torch
from torch import nn
import torch.nn.functional as F
class CNNCifar(nn.Module):
def __init__(self, num_classes):
super(CNNCifar, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 5)
self.fc1 = nn.Linear(64 * 5 * 5, 512)
self.fc2 = nn.Linear(512, num_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.softmax(x)
return x | 29.956522 | 46 | 0.561684 |
e8eb084843b5350c98e92ffddf6b2ea05ee444cb | 13,066 | py | Python | cdr_plugin_folder_to_folder/processing/Loops.py | samarthdd/cdr-plugin-folder-to-folder | def780f5590f63066194ff7b348fd256d7f74a10 | [
"Apache-2.0"
] | null | null | null | cdr_plugin_folder_to_folder/processing/Loops.py | samarthdd/cdr-plugin-folder-to-folder | def780f5590f63066194ff7b348fd256d7f74a10 | [
"Apache-2.0"
] | null | null | null | cdr_plugin_folder_to_folder/processing/Loops.py | samarthdd/cdr-plugin-folder-to-folder | def780f5590f63066194ff7b348fd256d7f74a10 | [
"Apache-2.0"
] | null | null | null | import os
import os.path
import sys
import threading
import asyncio
import subprocess
import shutil
from multiprocessing.pool import ThreadPool
from osbot_utils.testing.Duration import Duration
from osbot_utils.utils.Files import create_folder, folder_exists, folder_delete_all
from cdr_plugin_folder_to_folder.common_settings.Config import Config, API_VERSION
from cdr_plugin_folder_to_folder.processing.Events_Log import Events_Log
from cdr_plugin_folder_to_folder.processing.Events_Log_Elastic import Events_Log_Elastic
from cdr_plugin_folder_to_folder.processing.File_Processing import File_Processing
from cdr_plugin_folder_to_folder.metadata.Metadata_Service import Metadata_Service
from cdr_plugin_folder_to_folder.pre_processing.Status import Status, FileStatus
from cdr_plugin_folder_to_folder.pre_processing.Hash_Json import Hash_Json
from cdr_plugin_folder_to_folder.processing.Report_Elastic import Report_Elastic
from cdr_plugin_folder_to_folder.storage.Storage import Storage
from elasticsearch import Elasticsearch
from datetime import datetime
from cdr_plugin_folder_to_folder.utils.Log_Duration import log_duration
from cdr_plugin_folder_to_folder.utils.Logging import log_error, log_info
from cdr_plugin_folder_to_folder.processing.Analysis_Elastic import Analysis_Elastic
class Loops(object):
continue_processing = False
processing_started = False
lock = asyncio.Lock()
def __init__(self):
self.use_es = False
self.config = Config()
self.status = Status()
self.storage = Storage()
self.hash_json = Hash_Json()
self.events = Events_Log(self.config.hd2_status_location)
self.events_elastic = Events_Log_Elastic()
self.hash=None
self.report_elastic = Report_Elastic()
self.analysis_elastic = Analysis_Elastic()
self.report_elastic.setup()
self.analysis_elastic.setup()
create_folder(self.storage.hd2_processed())
create_folder(self.storage.hd2_not_processed())
def IsProcessing(self):
return Loops.processing_started
def StopProcessing(self):
Loops.continue_processing = False
def HasBeenStopped(self):
return not Loops.continue_processing
def git_commit(self):
git_commit = 'Not available'
try:
git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode("utf-8").rstrip()
except Exception as e:
pass
return git_commit
def ProcessDirectoryWithEndpoint(self, itempath, file_hash, endpoint_index):
if not os.path.isdir(itempath):
return False
log_info(message=f"Starting ProcessDirectoryWithEndpoint on endpoint # {endpoint_index} for file {file_hash}")
meta_service = Metadata_Service()
original_file_path = meta_service.get_original_file_paths(itempath)
events = Events_Log(itempath)
endpoint = "http://" + self.config.endpoints['Endpoints'][endpoint_index]['IP'] + ":" + self.config.endpoints['Endpoints'][endpoint_index]['Port']
events.add_log("Processing with: " + endpoint)
meta_service.set_f2f_plugin_version(itempath, API_VERSION)
meta_service.set_f2f_plugin_git_commit(itempath, self.git_commit())
try:
file_processing = File_Processing(events, self.events_elastic, self.report_elastic, self.analysis_elastic, meta_service)
if not file_processing.processDirectory(endpoint, itempath):
events.add_log("CANNOT be processed")
return False
log_data = {
'file': original_file_path,
'status': FileStatus.COMPLETED,
'error': 'none',
'timestamp': datetime.now(),
}
log_info('ProcessDirectoryWithEndpoint', data=log_data)
meta_service.set_error(itempath, "none")
meta_service.set_status(itempath, FileStatus.COMPLETED)
self.hash_json.update_status(file_hash, FileStatus.COMPLETED)
events.add_log("Has been processed")
return True
except Exception as error:
log_data = {
'file': original_file_path,
'status': FileStatus.FAILED,
'error': str(error),
}
log_error(message='error in ProcessDirectoryWithEndpoint', data=log_data)
meta_service.set_error(itempath, str(error))
meta_service.set_status(itempath, FileStatus.FAILED)
self.hash_json.update_status(file_hash, FileStatus.FAILED)
events.add_log("ERROR:" + str(error))
return False
def ProcessDirectory(self, thread_data):
(itempath, file_hash, process_index) = thread_data
endpoint_index = process_index % self.config.endpoints_count
if not Loops.continue_processing:
return False
tik = datetime.now()
process_result = self.ProcessDirectoryWithEndpoint(itempath, file_hash, endpoint_index)
if process_result:
self.status.add_completed()
tok = datetime.now()
delta = tok - tik
meta_service = Metadata_Service()
meta_service.set_hd2_to_hd3_copy_time(itempath, delta.total_seconds())
else:
self.status.add_failed()
return process_result
# note: removing retries from this method (it should not be handled like this
#for idx in range(self.config.endpoints_count):
# if self.ProcessDirectoryWithEndpoint(itempath, file_hash, endpoint_index):
# return
# # The Endpoint failed to process the file
# # Retry it with the next one
# endpoint_index = (endpoint_index + 1) % self.config.endpoints_count
def updateHashJson(self):
self.hash_json.reset()
meta_service = Metadata_Service()
for hash_folder in os.listdir(self.storage.hd2_data()):
metadata_folder = self.storage.hd2_data(hash_folder)
if not os.path.isdir(metadata_folder):
continue
metadata = meta_service.get_from_file(metadata_folder)
file_name = metadata.get_file_name()
original_hash = metadata.get_original_hash()
status = metadata.get_rebuild_status()
if status != FileStatus.COMPLETED:
self.hash_json.add_file(original_hash, file_name)
self.hash_json.save()
self.status.set_processing_counters(len(self.hash_json.data()))
return self.hash_json.data()
def moveProcessedFiles(self):
json_list = self.hash_json.data()
for key in json_list:
source_path = self.storage.hd2_data(key)
if (FileStatus.COMPLETED == json_list[key]["file_status"]):
destination_path = self.storage.hd2_processed(key)
if folder_exists(destination_path):
folder_delete_all(destination_path)
shutil.move(source_path, destination_path)
if (FileStatus.FAILED == json_list[key]["file_status"]):
meta_service = Metadata_Service()
meta_service.get_from_file(source_path)
metadata = meta_service.metadata
if ("Engine response could not be decoded" == metadata.get_error()) and \
metadata.get_original_file_extension() in ['.xml', '.json']:
destination_path = self.storage.hd2_not_processed(key)
if folder_exists(destination_path):
folder_delete_all(destination_path)
shutil.move(source_path, destination_path)
def LoopHashDirectoriesInternal(self, thread_count, do_single):
if folder_exists(self.storage.hd2_data()) is False:
log_message = "ERROR: rootdir does not exist: " + self.storage.hd2_data()
log_error(log_message)
return False
if not isinstance(thread_count,int):
raise TypeError("thread_count must be a integer")
if not isinstance(do_single,bool):
raise TypeError("thread_count must be a integer")
log_message = f"LoopHashDirectoriesInternal started with {thread_count} threads"
self.events.add_log(log_message)
log_info(log_message)
json_list = self.updateHashJson()
log_message = f"LoopHashDirectoriesInternal started with {thread_count} threads"
self.events.add_log(log_message)
log_info(log_message)
threads = list()
process_index = 0
log_info(message=f'before Mapping thread_data for {len(json_list)} files')
thread_data = []
for key in json_list:
file_hash = key
itempath = self.storage.hd2_data(key)
if (FileStatus.COMPLETED == json_list[key]["file_status"]):
self.events.add_log(f"The file processing has been already completed")
continue
if not os.path.exists(itempath):
self.events.add_log(f"ERROR: Path \"{itempath}\" does not exist")
json_list[key]["file_status"] = FileStatus.FAILED
continue
process_index += 1
thread_data.append((itempath, file_hash, process_index,))
# # limit the number of parallel threads
#
# if process_index % int(thread_count) == 0: # todo: refactor this workflow to use multiprocess and queues
# # Clean up the threads
# for index, thread in enumerate(threads): # todo: since at the moment this will block allocating new threads until
# thread.join() # all have finishing execution
#
# process_index += 1
# log_info(message=f"in LoopHashDirectoriesInternal process_index={process_index} , thread #{process_index % int(thread_count) }")
# x = threading.Thread(target=self.ProcessDirectory, args=(itempath, file_hash, process_index,))
# threads.append(x)
# x.start()
#
# if do_single:
# break
#
# if not Loops.continue_processing:
# break
# for index, thread in enumerate(threads):
# thread.join()
log_info(message=f'after mapped thread_data, there are {len(thread_data)} mapped items')
#thread_data = thread_data[:500]
#log_info(message=f'to start with only processing {len(thread_data)} thread_data items')
pool = ThreadPool(thread_count)
results = pool.map(self.ProcessDirectory, thread_data)
pool.close()
pool.join()
self.moveProcessedFiles()
self.events.add_log("LoopHashDirectoriesInternal finished")
return True
async def LoopHashDirectoriesAsync(self, thread_count, do_single = False):
await Loops.lock.acquire()
try:
Loops.continue_processing = True
Loops.processing_started = True
self.status.set_started()
self.LoopHashDirectoriesInternal(thread_count, do_single)
finally:
Loops.processing_started = False
Loops.lock.release()
self.status.set_stopped()
self.hash_json.save()
@log_duration
def LoopHashDirectories(self, thread_count=None):
#Allow only a single loop to be run at a time
if self.IsProcessing():
log_error(message="ERROR: Attempt to start processing while processing is in progress")
return False
self.status.StartStatusThread()
thread_count = thread_count or self.config.thread_count
log_info(message="in LoopHashDirectories, about to start main loop")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.LoopHashDirectoriesAsync(thread_count))
log_info(message="in LoopHashDirectories, Loop completed")
self.status.StopStatusThread()
return True
@log_duration
def LoopHashDirectoriesSequential(self):
#Allow only a single loop to be run at a time
if self.IsProcessing():
log_error("ERROR: Attempt to start processing while processing is in progress")
return False
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.LoopHashDirectoriesAsync(1))
return True
@log_duration
def ProcessSingleFile(self):
if self.IsProcessing():
log_error("ERROR: Attempt to start processing while processing is in progress")
return False
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.LoopHashDirectoriesAsync(1, True))
return True
| 39.355422 | 154 | 0.650543 |
1578852a68f7a6d2c6c5dfecd6b5cfc2c062ab29 | 255 | py | Python | foundryapp/foundryapp/doctype/mapping/mapping.py | umaepoch/foundryapp | 75e20cb399b114d416d3bdd286edd8c5a4690c75 | [
"MIT"
] | null | null | null | foundryapp/foundryapp/doctype/mapping/mapping.py | umaepoch/foundryapp | 75e20cb399b114d416d3bdd286edd8c5a4690c75 | [
"MIT"
] | null | null | null | foundryapp/foundryapp/doctype/mapping/mapping.py | umaepoch/foundryapp | 75e20cb399b114d416d3bdd286edd8c5a4690c75 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, yashwanth and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Mapping(Document):
pass
| 23.181818 | 49 | 0.772549 |
fbf53c7146e3ca7f0d19f6405176785d9dbbd2c3 | 3,028 | py | Python | neuralprocess/data/step.py | MIC-DKFZ/gpconvcnp | 4f29412f9a928362c5d3626703df4a5d5b1489d1 | [
"MIT"
] | 8 | 2021-07-03T01:48:32.000Z | 2021-12-17T13:11:07.000Z | neuralprocess/data/step.py | MIC-DKFZ/gpconvcnp | 4f29412f9a928362c5d3626703df4a5d5b1489d1 | [
"MIT"
] | null | null | null | neuralprocess/data/step.py | MIC-DKFZ/gpconvcnp | 4f29412f9a928362c5d3626703df4a5d5b1489d1 | [
"MIT"
] | 3 | 2021-08-04T12:58:58.000Z | 2021-09-18T18:34:58.000Z | import numpy as np
import torch
from neuralprocess.data.base import FunctionGenerator
class StepFunctionGenerator(FunctionGenerator):
"""
Generate step functions.
Args:
batch_size (int): Batch size.
y_range (tuple): Allowed y range.
number_of_steps (int): Number of steps in the interval. If list
or tuple, will be drawn randomly.
min_step_width (float): Will have at least this much space
between steps.
min_step_height (float): Steps will have a y delta of at least
this much.
"""
def __init__(
self,
batch_size,
y_range=[-3, 3],
number_of_steps=[3, 10],
min_step_width=0.1,
min_step_height=0.1,
*args,
**kwargs
):
super().__init__(batch_size, *args, **kwargs)
self.y_range = y_range
self.number_of_steps = number_of_steps
self.min_step_width = min_step_width
self.min_step_height = min_step_height
def apply(self, x):
"""
Generate y values for input.
Args:
x (np.ndarray): x values, shape (1, N, 1)
Returns:
np.ndarray: y values, shape (B, N, 1)
"""
if hasattr(self.number_of_steps, "__iter__"):
number_of_steps = np.random.randint(*self.number_of_steps)
else:
number_of_steps = self.number_of_steps
step_indices = np.random.randint(0, x.shape[1], number_of_steps)
step_indices.sort()
step_x = x[0, :, 0][step_indices]
step_width = np.abs(step_x[1:] - step_x[:-1])
if np.any(step_width < self.min_step_width):
raise RuntimeError("Couln't generate step function with wide enough steps.")
y = np.zeros((self.batch_size, *x.shape[1:]), dtype=np.float32)
new_values = np.random.uniform(*self.y_range, size=(self.batch_size, 1))
y[:, : step_indices[0], 0] = np.repeat(new_values, step_indices[0], 1)
for i in range(number_of_steps - 1):
old_values = new_values
new_values = np.random.uniform(*self.y_range, size=(self.batch_size, 1))
if self.min_step_height > 0:
diffs = new_values - old_values
ind = np.where(np.abs(diffs) < self.min_step_height)
new_values[ind] += (np.sign(diffs) * self.min_step_height)[ind]
y[:, step_indices[i] : step_indices[i + 1], 0] = np.repeat(
new_values, step_indices[i + 1] - step_indices[i], 1
)
old_values = new_values
new_values = np.random.uniform(*self.y_range, size=(self.batch_size, 1))
if self.min_step_height > 0:
diffs = new_values - old_values
ind = np.where(np.abs(diffs) < self.min_step_height)
new_values[ind] += (np.sign(diffs) * self.min_step_height)[ind]
y[:, step_indices[-1] :, 0] = np.repeat(
new_values, y.shape[1] - step_indices[-1], 1
)
return y | 34.409091 | 88 | 0.588177 |
83ab5d37c9f2c8599fd47ca852b3c217e3e7a1a2 | 5,509 | py | Python | wagtail/images/forms.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
] | 8,851 | 2016-12-09T19:01:45.000Z | 2022-03-31T04:45:06.000Z | wagtail/images/forms.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
] | 5,197 | 2016-12-09T19:24:37.000Z | 2022-03-31T22:17:55.000Z | wagtail/images/forms.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
] | 2,548 | 2016-12-09T18:16:55.000Z | 2022-03-31T21:34:38.000Z | from django import forms
from django.conf import settings
from django.forms.models import modelform_factory
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from wagtail.admin import widgets
from wagtail.admin.forms.collections import (
BaseCollectionMemberForm, CollectionChoiceField, collection_member_permission_formset_factory)
from wagtail.core.models import Collection
from wagtail.images.fields import WagtailImageField
from wagtail.images.formats import get_image_formats
from wagtail.images.models import Image
from wagtail.images.permissions import permission_policy as images_permission_policy
# Callback to allow us to override the default form field for the image file field and collection field.
def formfield_for_dbfield(db_field, **kwargs):
# Check if this is the file field
if db_field.name == 'file':
return WagtailImageField(label=capfirst(db_field.verbose_name), **kwargs)
elif db_field.name == 'collection':
return CollectionChoiceField(label=_("Collection"), queryset=Collection.objects.all(), empty_label=None, **kwargs)
# For all other fields, just call its formfield() method.
return db_field.formfield(**kwargs)
class BaseImageForm(BaseCollectionMemberForm):
permission_policy = images_permission_policy
class Meta:
# set the 'file' widget to a FileInput rather than the default ClearableFileInput
# so that when editing, we don't get the 'currently: ...' banner which is
# a bit pointless here
widgets = {
'tags': widgets.AdminTagWidget,
'file': forms.FileInput(),
'focal_point_x': forms.HiddenInput(attrs={'class': 'focal_point_x'}),
'focal_point_y': forms.HiddenInput(attrs={'class': 'focal_point_y'}),
'focal_point_width': forms.HiddenInput(attrs={'class': 'focal_point_width'}),
'focal_point_height': forms.HiddenInput(attrs={'class': 'focal_point_height'}),
}
def get_image_base_form():
base_form_override = getattr(settings, "WAGTAILIMAGES_IMAGE_FORM_BASE", "")
if base_form_override:
from django.utils.module_loading import import_string
base_form = import_string(base_form_override)
else:
base_form = BaseImageForm
return base_form
def get_image_form(model):
fields = model.admin_form_fields
if 'collection' not in fields:
# force addition of the 'collection' field, because leaving it out can
# cause dubious results when multiple collections exist (e.g adding the
# document to the root collection where the user may not have permission) -
# and when only one collection exists, it will get hidden anyway.
fields = list(fields) + ['collection']
return modelform_factory(
model,
form=get_image_base_form(),
fields=fields,
formfield_callback=formfield_for_dbfield,
)
def get_image_multi_form(model_class):
# edit form for use within the multiple uploader
ImageForm = get_image_form(model_class)
# Make a new form with the file and focal point fields excluded
class ImageEditForm(ImageForm):
class Meta(ImageForm.Meta):
model = model_class
exclude = (
'file',
'focal_point_x',
'focal_point_y',
'focal_point_width',
'focal_point_height',
)
return ImageEditForm
class ImageInsertionForm(forms.Form):
"""
Form for selecting parameters of the image (e.g. format) prior to insertion
into a rich text area
"""
format = forms.ChoiceField(
label=_("Format"),
choices=[(format.name, format.label) for format in get_image_formats()],
widget=forms.RadioSelect
)
image_is_decorative = forms.BooleanField(required=False, label=_("Image is decorative"))
alt_text = forms.CharField(required=False, label=_("Alt text"))
def clean_alt_text(self):
alt_text = self.cleaned_data['alt_text']
image_is_decorative = self.cleaned_data['image_is_decorative']
# Empty the alt text value if the image is set to be decorative
if image_is_decorative:
return ''
else:
# Alt text is required if image is not decorative.
if not alt_text:
msg = _("Please add some alt text for your image or mark it as decorative")
self.add_error('alt_text', msg)
return alt_text
class URLGeneratorForm(forms.Form):
filter_method = forms.ChoiceField(
label=_("Filter"),
choices=(
('original', _("Original size")),
('width', _("Resize to width")),
('height', _("Resize to height")),
('min', _("Resize to min")),
('max', _("Resize to max")),
('fill', _("Resize to fill")),
),
)
width = forms.IntegerField(label=_("Width"), min_value=0)
height = forms.IntegerField(label=_("Height"), min_value=0)
closeness = forms.IntegerField(label=_("Closeness"), min_value=0, initial=0)
GroupImagePermissionFormSet = collection_member_permission_formset_factory(
Image,
[
('add_image', _("Add"), _("Add/edit images you own")),
('change_image', _("Edit"), _("Edit any image")),
('choose_image', _("Choose"), _("Select images in choosers")),
],
'wagtailimages/permissions/includes/image_permissions_formset.html'
)
| 37.732877 | 122 | 0.672173 |
edca1d68600070456de1772820b62287b9b2d4a9 | 1,466 | py | Python | geo/app/app/backend/eclipse2017_app.py | isabella232/eclipse2017 | a328c6d437638b1fe89aa8a24579406f01fe970d | [
"Apache-2.0"
] | 18 | 2017-03-17T18:10:55.000Z | 2022-03-30T17:35:55.000Z | geo/app/app/backend/eclipse2017_app.py | google/eclipse2017 | a328c6d437638b1fe89aa8a24579406f01fe970d | [
"Apache-2.0"
] | 1 | 2021-06-27T16:01:10.000Z | 2021-06-27T16:21:50.000Z | geo/app/app/backend/eclipse2017_app.py | isabella232/eclipse2017 | a328c6d437638b1fe89aa8a24579406f01fe970d | [
"Apache-2.0"
] | 8 | 2017-03-28T13:34:50.000Z | 2021-06-27T15:39:10.000Z | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from geo import geo
from routes import Routes
blueprints = (
(geo.create_blueprint(), '/services/geo'),)
base_routes = Routes()
class Eclipse2017GeoApp(Flask):
"""
Eclipse 2017 application.
"""
def __init__(
self, project_id, session_enc_key, google_oauth2_client_id,
google_oauth2_client_secret, debug=False,
blueprints=blueprints, routes=base_routes, geo=geo,
**kwargs):
super(Eclipse2017GeoApp, self).__init__(__name__, **kwargs)
self.config['PROJECT_ID'] = project_id
self.config['SECRET_KEY'] = session_enc_key
self.config['GOOGLE_OAUTH2_CLIENT_ID'] = google_oauth2_client_id
self.config['GOOGLE_OAUTH2_CLIENT_SECRET'] = google_oauth2_client_secret
self.geo = geo
self.debug = debug
routes.register(self, blueprints)
| 29.918367 | 80 | 0.706685 |
419430a0b5ee11430d108335867ffe836d0deec0 | 7,648 | py | Python | flopco/flopco.py | ABaaaC/flopco-pytorch | 85465b493fade1b73b8209caa38d82d1c8d2a0ef | [
"MIT"
] | null | null | null | flopco/flopco.py | ABaaaC/flopco-pytorch | 85465b493fade1b73b8209caa38d82d1c8d2a0ef | [
"MIT"
] | null | null | null | flopco/flopco.py | ABaaaC/flopco-pytorch | 85465b493fade1b73b8209caa38d82d1c8d2a0ef | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
from collections import defaultdict
from functools import partial
import copy
from flopco.compute_layer_flops import *
class FlopCo():
def __init__(self, model, img_size = (1, 3, 224, 224), custom_tensor = None, device = 'cpu', instances = None):
'''
instances: list of layer types,
supported types are [nn.Conv2d, nn.Linear,
nn.BatchNorm2d, nn.ReLU, nn.MaxPool2d, nn.AvgPool2d, nn.Softmax]
'''
self.device = device
self.model = model
self.img_size = img_size
self.custom_tensor = custom_tensor
self.input_shapes = None
self.output_shapes = None
self.flops = None
self.macs = None
self.params = None
if instances is not None:
self.instances = instances
else:
self.instances = [nn.Conv2d,
nn.Linear
]
# self.instances = [nn.Conv2d,
# nn.Linear,
# nn.BatchNorm2d,
# nn.ReLU,
# nn.MaxPool2d,
# nn.AvgPool2d,
# nn.Softmax
# ]
self.ltypes = None
self.get_ltypes()
self.get_stats(shapes = True, flops = True, macs = True, params = True)
self.total_flops = sum([sum(v) for v in self.flops.values()])
self.total_macs = sum([sum(v) for v in self.macs.values()])
self.total_params = sum(self.params.values())
self.relative_flops = defaultdict(None,\
{k: sum(v)/self.total_flops\
for k,v in self.flops.items()})
self.relative_macs = defaultdict(None,\
{k: sum(v)/self.total_macs\
for k,v in self.macs.items()})
self.relative_params = defaultdict(None,\
{k: v/self.total_params\
for k,v in self.params.items()})
del self.model
torch.cuda.empty_cache()
def __str__(self):
print_info = "\n".join([str({k:v}) for k,v in self.__dict__.items()])
return str(self.__class__) + ": \n" + print_info
def get_ltypes(self):
self.ltypes = defaultdict(defaultdict)
for mname, m in self.model.named_modules():
if m.__class__ in self.instances:
self.ltypes[mname]['type'] = type(m)
if isinstance(m, nn.Conv2d):
self.ltypes[mname]['kernel_size'] = m.kernel_size
self.ltypes[mname]['groups'] = m.groups
def count_params(self):
self.params = defaultdict(int)
for mname, m in self.model.named_modules():
if m.__class__ in self.instances:
self.params[mname] = 0
for p in m.parameters():
self.params[mname] += p.numel()
def _save_shapes(self, name, mod, inp, out):
self.input_shapes[name].append(inp[0].shape)
self.output_shapes[name].append(out.shape)
def _save_flops(self, name, mod, inp, out):
if isinstance(mod, nn.Conv2d):
flops = compute_conv2d_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.Conv1d):
flops = compute_conv1d_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.Linear):
flops = compute_fc_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.BatchNorm2d):
flops = compute_bn2d_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.ReLU):
flops = compute_relu_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.MaxPool2d):
flops = compute_maxpool2d_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.AvgPool2d):
flops = compute_avgpool2d_flops(mod, inp[0].shape, out.shape)
elif isinstance(mod, nn.Softmax):
flops = compute_softmax_flops(mod, inp[0].shape, out.shape)
else:
flops = -1
self.flops[name].append(flops)
def _save_macs(self, name, mod, inp, out):
if isinstance(mod, nn.Conv2d):
flops = compute_conv2d_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.Conv1d):
flops = compute_conv1d_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.Linear):
flops = compute_fc_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.BatchNorm2d):
flops = compute_bn2d_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.ReLU):
flops = compute_relu_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.MaxPool2d):
flops = compute_maxpool2d_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.AvgPool2d):
flops = compute_avgpool2d_flops(mod, inp[0].shape, out.shape, macs = True)
elif isinstance(mod, nn.Softmax):
flops = compute_softmax_flops(mod, inp[0].shape, out.shape, macs = True)
else:
flops = -1
self.macs[name].append(flops)
def get_stats(self, shapes = True, flops = False, macs = False, params = False):
if params:
self.count_params()
if shapes:
self.input_shapes = defaultdict(list)
self.output_shapes = defaultdict(list)
if flops:
self.flops = defaultdict(list)
if macs:
self.macs = defaultdict(list)
with torch.no_grad():
for name, m in self.model.named_modules():
to_compute = sum(map(lambda inst : isinstance(m, inst),
self.instances))
if to_compute:
if shapes:
m.register_forward_hook(partial(self._save_shapes, name))
if flops:
m.register_forward_hook(partial(self._save_flops, name))
if macs:
m.register_forward_hook(partial(self._save_macs, name))
if self.custom_tensor is None:
batch = torch.rand(*self.img_size).to(self.device)
self.model(batch)
else:
batch = self.custom_tensor
if isinstance(self.custom_tensor, list):
self.model(*batch)
elif isinstance(self.custom_tensor, dict):
self.model(**batch)
else:
raise TypeError(f'Input tensor should be of type list or dict, got {type(batch)}')
batch = None
for name, m in self.model.named_modules():
m._forward_pre_hooks.clear()
m._forward_hooks.clear()
torch.cuda.empty_cache()
| 34.295964 | 115 | 0.504446 |
256a936eca18ba091adf9a6db3118de56833f342 | 4,258 | py | Python | locallibrary/settings.py | valerie528/django_local_library | 9e7827f45b2c0dc6d9bd89b7038a2f8dcf7a048f | [
"Apache-2.0"
] | null | null | null | locallibrary/settings.py | valerie528/django_local_library | 9e7827f45b2c0dc6d9bd89b7038a2f8dcf7a048f | [
"Apache-2.0"
] | null | null | null | locallibrary/settings.py | valerie528/django_local_library | 9e7827f45b2c0dc6d9bd89b7038a2f8dcf7a048f | [
"Apache-2.0"
] | null | null | null | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'l7!vlb3r1z!7syztk69-l9dw-_#u473wzv*)@w3o-0g^rs!39y'
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' | 28.77027 | 102 | 0.719587 |
e8b2ffd1eb21914b567b8a521e07baf296cbef6e | 3,416 | py | Python | certbot-dns-ovh/setup.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | null | null | null | certbot-dns-ovh/setup.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | null | null | null | certbot-dns-ovh/setup.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | null | null | null | from distutils.version import LooseVersion
import os
import sys
from setuptools import __version__ as setuptools_version
from setuptools import find_packages
from setuptools import setup
from setuptools.command.test import test as TestCommand
version = '1.7.0.dev0'
# Remember to update local-oldest-requirements.txt when changing the minimum
# acme/certbot version.
install_requires = [
'dns-lexicon>=2.7.14', # Correct proxy use on OVH provider
'setuptools',
'zope.interface',
]
if not os.environ.get('EXCLUDE_CERTBOT_DEPS'):
install_requires.extend([
'acme>=0.31.0',
'certbot>=1.1.0',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset EXCLUDE_CERTBOT_DEPS when building wheels '
'to include certbot dependencies.')
setuptools_known_environment_markers = (LooseVersion(setuptools_version) >= LooseVersion('36.2'))
if setuptools_known_environment_markers:
install_requires.append('mock ; python_version < "3.3"')
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Error, you are trying to build certbot wheels using an old version '
'of setuptools. Version 36.2+ of setuptools is required.')
elif sys.version_info < (3,3):
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
class PyTest(TestCommand):
user_options = []
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(
name='certbot-dns-ovh',
version=version,
description="OVH DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-ovh = certbot_dns_ovh._internal.dns_ovh:Authenticator',
],
},
tests_require=["pytest"],
test_suite='certbot_dns_ovh',
cmdclass={"test": PyTest},
)
| 33.165049 | 97 | 0.647834 |
31672554d7790e95af3c77ace000773f82a2ed2d | 41,020 | py | Python | psono/restapi/tests/recoverycode.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 48 | 2018-04-19T15:50:58.000Z | 2022-01-23T15:58:11.000Z | psono/restapi/tests/recoverycode.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 9 | 2018-09-13T14:56:18.000Z | 2020-01-17T16:44:33.000Z | psono/restapi/tests/recoverycode.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 11 | 2019-09-20T11:53:47.000Z | 2021-07-18T22:41:31.000Z | from django.urls import reverse
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.utils import timezone
from rest_framework import status
from restapi import models
from .base import APITestCaseExtended
from datetime import timedelta
from ..utils import readbuffer
import random
import string
import os
import datetime
import json
import binascii
import nacl.utils
import nacl.secret
from nacl.public import PrivateKey, PublicKey, Box
class RecoveryCodeTests(APITestCaseExtended):
def setUp(self):
self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt = 'a'
self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce = 'ef37b3192178b9a97b551572314388058c14a4dabdbf63d022bcba9951809b6d'
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
username=self.test_username,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key,
secret_key_nonce=self.test_secret_key_nonce,
duo_enabled=True,
google_authenticator_enabled=True,
yubikey_otp_enabled=True,
user_sauce=self.test_user_sauce,
is_email_active=True
)
models.Google_Authenticator.objects.create(
user=self.test_user_obj,
title= 'My TItle',
secret = '1234'
)
models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = 'duo_secret_key',
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
models.Yubikey_OTP.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
yubikey_id = '1234'
)
def test_put_recoverycode(self):
"""
Tests PUT method on recoverycode
"""
url = reverse('recoverycode')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_recoverycode(self):
"""
Tests GET method on recoverycode
"""
url = reverse('recoverycode')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_new_recoverycode_with_empty_recovery_authkey(self):
"""
Tests to create a new recoverycode with an empty recovery_authkey
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '',
'recovery_data': '123456678',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_authkey' in response.data)
def test_new_recoverycode_with_no_recovery_authkey(self):
"""
Tests to create a new recoverycode with no recovery_authkey
"""
url = reverse('recoverycode')
data = {
'recovery_data': '123456678',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_authkey' in response.data)
def test_new_recoverycode_with_empty_recovery_data(self):
"""
Tests to create a new recoverycode with an empty recovery_data
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_data' in response.data)
def test_new_recoverycode_with_no_recovery_data(self):
"""
Tests to create a new recoverycode with no recovery_data
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_data' in response.data)
def test_new_recoverycode_with_recovery_data_not_in_hex(self):
"""
Tests to create a new recoverycode with recovery_data not in hex
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '123456788X',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_data' in response.data)
def test_new_recoverycode_with_empty_recovery_data_nonce(self):
"""
Tests to create a new recoverycode with an empty recovery_data_nonce
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '123456788',
'recovery_data_nonce ': '',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_data_nonce' in response.data)
def test_new_recoverycode_with_no_recovery_data_nonce(self):
"""
Tests to create a new recoverycode with no recovery_data_nonce
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_data_nonce' in response.data)
def test_new_recoverycode_with_recovery_data_nonce_not_in_hex(self):
"""
Tests to create a new recoverycode with recovery_data_nonce not in hex
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '123456788',
'recovery_data_nonce ': '123456788X',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_data_nonce' in response.data)
def test_new_recoverycode_with_empty_recovery_sauce(self):
"""
Tests to create a new recoverycode with an empty recovery_sauce
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '123456788',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_sauce' in response.data)
def test_new_recoverycode_with_no_recovery_sauce(self):
"""
Tests to create a new recoverycode with no recovery_sauce
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': '123456678',
'recovery_data': '123456788',
'recovery_data_nonce ': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_sauce' in response.data)
def test_new_recoverycode_without_authentication(self):
"""
Tests to create a new recoverycode without authentication
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': 'asdf',
'recovery_data': '123456678',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_new_recoverycode(self):
"""
Tests to create a new recoverycode
"""
url = reverse('recoverycode')
data = {
'recovery_authkey': 'asdf',
'recovery_data': '123456678',
'recovery_data_nonce ': '123456788',
'recovery_sauce': '123456788',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('recovery_code_id' in response.data)
def test_delete_recoverycode(self):
"""
Tests POST method on recoverycode
"""
url = reverse('recoverycode')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class PasswordTests(APITestCaseExtended):
def setUp(self):
self.verifier_private_key = '4491f6c03d8196d65f45f7f6ab693088b1e8cd14e728201e5cca8333f2a88b4e'
self.verifier_public_key = '7a372bb1558b0d42eaac3e238e633efd997f6496c62302bdb56c3a729a7ce41c'
self.user_public_key = '618ccedc6edc9ee8110f8a75e7bb24238759fe43f638ad41d399dae7043f9d1d'
self.user_private_key = '07b04506e36faf9c2b478383e7db6b54b6674322d8eadc9d2c1e4aa15390e315'
self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt = 'a'
self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce = '1f3f3c0f4c8a52fb0d83144bb4e7aaf04d552d43ec7a60792654ef664af17dad'
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
username=self.test_username,
authkey=make_password(self.test_authkey),
public_key=self.user_public_key,
private_key=self.test_private_key,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce=self.test_user_sauce,
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = 'b'
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'fbcd7106abf5ef076af9a1ab59e98ff5f4f81f524ede6d7155500e059b25b8b0'
self.test_user_obj2 = models.User.objects.create(
email=self.test_email2,
email_bcrypt=self.test_email_bcrypt2,
username=self.test_username2,
authkey=make_password(self.test_authkey2),
public_key=self.user_public_key,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
self.test_email3 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt3 = 'c'
self.test_username3 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey3 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_private_key3 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce3 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key3 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce3 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce3 = 'de6096562c48b5f58aaabfa9dfab3a59930daf57aa50f53a4d80d8205a91ba17'
self.test_user_obj3 = models.User.objects.create(
email=self.test_email3,
email_bcrypt=self.test_email_bcrypt3,
username=self.test_username3,
authkey=make_password(self.test_authkey3),
public_key=self.user_public_key,
private_key=self.test_private_key3,
private_key_nonce=self.test_private_key_nonce3,
secret_key=self.test_secret_key3,
secret_key_nonce=self.test_secret_key_nonce3,
user_sauce=self.test_user_sauce3,
is_email_active=True
)
self.test_recovery_authkey = 'asdf'
self.test_recovery_data = 'test_recovery_data'
self.test_recovery_data_nonce = 'test_recovery_data_nonce'
self.test_recovery_sauce = 'test_recovery_sauce'
self.test_recovery_code_obj = models.Recovery_Code.objects.create(
user = self.test_user_obj,
recovery_authkey = make_password(self.test_recovery_authkey),
recovery_data = readbuffer(self.test_recovery_data),
recovery_data_nonce = self.test_recovery_data_nonce,
verifier = self.verifier_private_key,
verifier_issue_date = timezone.now(),
recovery_sauce = self.test_recovery_sauce
)
self.test_recovery_authkey2 = 'asdf123'
self.test_recovery_data2 = 'test_recovery_data2'
self.test_recovery_data_nonce2 = 'test_recovery_data_nonce2'
self.test_recovery_sauce2 = 'test_recovery_sauce2'
self.test_recovery_code_obj_expired = models.Recovery_Code.objects.create(
user = self.test_user_obj3,
recovery_authkey = make_password(self.test_recovery_authkey2),
recovery_data = readbuffer(self.test_recovery_data2),
recovery_data_nonce = self.test_recovery_data_nonce2,
verifier = self.verifier_private_key,
verifier_issue_date = timezone.now() - datetime.timedelta(0, settings.RECOVERY_VERIFIER_TIME_VALID),
recovery_sauce = self.test_recovery_sauce2
)
def test_get_password(self):
"""
Tests GET method on password
"""
url = reverse('password')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_password_with_no_username(self):
"""
Tests PUT method on password with no username
"""
url = reverse('password')
data = {
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('username' in response.data)
def test_put_password_with_no_recovery_authkey(self):
"""
Tests PUT method on password with no recovery authkey
"""
url = reverse('password')
data = {
'username': self.test_username,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_authkey' in response.data)
def test_put_password_with_no_email_like_username(self):
"""
Tests PUT method on password with no email like username
"""
url = reverse('password')
data = {
'username': 'username',
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('username' in response.data)
def test_put_password_with_incorrect_username(self):
"""
Tests PUT method on password with incorrect username
"""
url = reverse('password')
data = {
'username': 'asdf@asdf.com',
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_password_with_incorrect_authkey(self):
"""
Tests PUT method on password with incorrect authkey
"""
url = reverse('password')
data = {
'username': self.test_username,
'recovery_authkey': 'WrongAuthKey',
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_password_with_user_who_has_no_recovery_key(self):
"""
Tests PUT method on password with user who has no recovery key
"""
url = reverse('password')
data = {
'username': self.test_username2,
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_password_with_expired_recovery_code_verifier(self):
"""
Tests PUT method on password with expired recovery code verifier
"""
url = reverse('password')
data = {
'username': self.test_username3,
'recovery_authkey': self.test_recovery_authkey2,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_password_with_missing_authkey(self):
"""
Tests PUT method on password with missing authkey
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce)
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
update_data_dec = crypto_box.encrypt(json.dumps({
'private_key': 'private_key',
'private_key_nonce': 'private_key_nonce',
'secret_key': 'secret_key',
'secret_key_nonce': 'secret_key_nonce',
}).encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data)
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex.decode(),
'update_data_nonce': update_data_nonce_hex.decode(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_missing_private_key(self):
"""
Tests PUT method on password with missing private_key
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce)
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
update_data_dec = crypto_box.encrypt(json.dumps({
'authkey': 'authkey',
'private_key_nonce': 'private_key_nonce',
'secret_key': 'secret_key',
'secret_key_nonce': 'secret_key_nonce',
}).encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data)
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex.decode(),
'update_data_nonce': update_data_nonce_hex.decode(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_missing_private_key_nonce(self):
"""
Tests PUT method on password with missing private_key_nonce
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce)
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
update_data_dec = crypto_box.encrypt(json.dumps({
'authkey': 'authkey',
'private_key': 'private_key',
'secret_key': 'secret_key',
'secret_key_nonce': 'secret_key_nonce',
}).encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data)
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex.decode(),
'update_data_nonce': update_data_nonce_hex.decode(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_missing_secret_key(self):
"""
Tests PUT method on password with missing secret_key
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce)
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
update_data_dec = crypto_box.encrypt(json.dumps({
'authkey': 'authkey',
'private_key': 'private_key',
'private_key_nonce': 'private_key_nonce',
'secret_key_nonce': 'secret_key_nonce',
}).encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data)
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex.decode(),
'update_data_nonce': update_data_nonce_hex.decode(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_missing_secret_key_nonce(self):
"""
Tests PUT method on password with missing secret_key_nonce
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce)
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
update_data_dec = crypto_box.encrypt(json.dumps({
'authkey': 'authkey',
'private_key': 'private_key',
'private_key_nonce': 'private_key_nonce',
'secret_key': 'secret_key',
}).encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data)
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex.decode(),
'update_data_nonce': update_data_nonce_hex.decode(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_invalid_json(self):
"""
Tests PUT method on password with invalid json
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce)
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
update_data_dec = crypto_box.encrypt('narf'.encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data)
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex.decode(),
'update_data_nonce': update_data_nonce_hex.decode(),
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_no_validation(self):
"""
Tests PUT method on password with no validation
"""
url = reverse('password')
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_password_with_update_data_being_no_hex(self):
"""
Tests PUT method on password with update_data being no hex
"""
url = reverse('password')
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'X',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF77',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_password_with_update_data_nonce_being_no_hex(self):
"""
Tests PUT method on password with update_data being no hex
"""
url = reverse('password')
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': 'C6B13DB4699FF60CF0C60E38C5130500E62235C152FD6129D801CDDCF0604C7D',
'update_data_nonce': '39F0F10BFC6497F74563127CA08B8DC3A8729B789BB463AF0A3B6BD1CEE9DF7X',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_password(self):
"""
Tests PUT method on password
"""
url = reverse('password')
update_data_nonce = nacl.utils.random(Box.NONCE_SIZE)
update_data_nonce_hex = nacl.encoding.HexEncoder.encode(update_data_nonce).decode()
crypto_box = Box(PrivateKey(self.user_private_key, encoder=nacl.encoding.HexEncoder),
PublicKey(self.verifier_public_key, encoder=nacl.encoding.HexEncoder))
new_authkey = 'authkey'
new_private_key = 'private_key'
new_private_key_nonce = 'private_key_nonce'
new_secret_key = 'secret_key'
new_secret_key_nonce = 'secret_key_nonce'
update_data_dec = crypto_box.encrypt(json.dumps({
'authkey': new_authkey,
'private_key': new_private_key,
'private_key_nonce': new_private_key_nonce,
'secret_key': new_secret_key,
'secret_key_nonce': new_secret_key_nonce,
}).encode("utf-8"), update_data_nonce)
update_data = update_data_dec[len(update_data_nonce):]
update_data_hex = nacl.encoding.HexEncoder.encode(update_data).decode()
data = {
'username': self.test_username,
'recovery_authkey': self.test_recovery_authkey,
'update_data': update_data_hex,
'update_data_nonce': update_data_nonce_hex,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Update was accepted, now lets check that the old verifier details have been deleted in the database and the
# user details like authkey and private / secret key have been updated
db_user = models.User.objects.get(pk=self.test_user_obj.id)
self.assertEqual(db_user.private_key, new_private_key)
self.assertEqual(db_user.private_key_nonce, new_private_key_nonce)
self.assertEqual(db_user.secret_key, new_secret_key)
self.assertEqual(db_user.secret_key_nonce, new_secret_key_nonce)
self.assertEqual(db_user.duo_enabled, False)
self.assertEqual(db_user.google_authenticator_enabled, False)
self.assertEqual(db_user.yubikey_otp_enabled, False)
self.assertTrue(check_password(new_authkey, db_user.authkey))
self.assertFalse(models.Google_Authenticator.objects.filter(user=self.test_user_obj).exists())
self.assertFalse(models.Yubikey_OTP.objects.filter(user=self.test_user_obj).exists())
self.assertFalse(models.Duo.objects.filter(user=self.test_user_obj).exists())
db_recovery_code = models.Recovery_Code.objects.get(pk=self.test_recovery_code_obj.id)
self.assertEqual(db_recovery_code.verifier, '')
self.assertIsNone(db_recovery_code.verifier_issue_date)
def test_post_password_with_no_username(self):
"""
Tests POST method on password with no username
"""
url = reverse('password')
data = {
'recovery_authkey': self.test_recovery_authkey,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('username' in response.data)
def test_post_password_with_no_recovery_authkey(self):
"""
Tests POST method on password with no recovery authkey
"""
url = reverse('password')
data = {
'username': self.test_username,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('recovery_authkey' in response.data)
def test_post_password_with_no_email_like_username(self):
"""
Tests POST method on password with no email like username
"""
url = reverse('password')
data = {
'recovery_authkey': self.test_recovery_authkey,
'username': 'username',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('username' in response.data)
def test_post_password_with_incorrect_username(self):
"""
Tests POST method on password with incorrect username
"""
url = reverse('password')
data = {
'recovery_authkey': self.test_recovery_authkey,
'username': 'asdf@asdf.com',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_password_with_incorrect_authkey(self):
"""
Tests POST method on password with incorrect authkey
"""
url = reverse('password')
data = {
'recovery_authkey': 'WrongAuthKey',
'username': self.test_username,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_password_with_user_who_has_no_recovery_key(self):
"""
Tests POST method on password with user who has no recovery key
"""
url = reverse('password')
data = {
'recovery_authkey': self.test_recovery_authkey,
'username': self.test_username2,
}
self.client.force_authenticate(user=self.test_user_obj2)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_password(self):
"""
Tests POST method on password
"""
url = reverse('password')
data = {
'recovery_authkey': self.test_recovery_authkey,
'username': self.test_username,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('recovery_data' in response.data)
self.assertEqual(response.data['recovery_data'], self.test_recovery_data)
self.assertTrue('recovery_data_nonce' in response.data)
self.assertEqual(response.data['recovery_data_nonce'], self.test_recovery_data_nonce)
self.assertTrue('user_sauce' in response.data)
self.assertEqual(response.data['user_sauce'], self.test_user_sauce)
self.assertTrue('verifier_time_valid' in response.data)
self.assertEqual(response.data['verifier_time_valid'], settings.RECOVERY_VERIFIER_TIME_VALID)
self.assertTrue('recovery_sauce' in response.data)
self.assertEqual(response.data['recovery_sauce'], self.test_recovery_sauce)
self.assertTrue('verifier_public_key' in response.data)
self.assertEqual(len(response.data['verifier_public_key']), 64)
def test_delete_password(self):
"""
Tests DELETE method on password
"""
url = reverse('password')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 36.723366 | 117 | 0.670624 |
60387fc7215a34ece20e860661209110d8f6ad7d | 1,448 | py | Python | flask_headless_cms/skeleton/app/helpers/json_builder.py | sartim/flask-headless-cms | ff568acd1687661bcdf62c5d05babf5ab446c877 | [
"MIT"
] | 5 | 2019-04-28T12:52:51.000Z | 2021-05-24T07:05:31.000Z | flask_headless_cms/skeleton/app/helpers/json_builder.py | sartim/flask-headless-cms | ff568acd1687661bcdf62c5d05babf5ab446c877 | [
"MIT"
] | 59 | 2020-03-19T07:57:32.000Z | 2021-07-14T05:52:03.000Z | flask_headless_cms/skeleton/app/helpers/json_builder.py | sartim/flask-headless-cms | ff568acd1687661bcdf62c5d05babf5ab446c877 | [
"MIT"
] | null | null | null | import flask
from flask import jsonify
from app.helpers.exceptions import JsonException
def success_js_response(dict_ctx={}, success=True, message=None):
dict_ctx["success"] = success
if not success and message is None:
raise JsonException("must define a message if returning an error")
else:
dict_ctx["message"] = message
return jsonify(dict_ctx), 200
def response_dict(obj, results, path, id=None, **kwargs):
domain = flask.request.url_root
if id:
next_url = "{}{}?id={}".format(domain, path, id)
elif kwargs:
next_url = "{}{}?{}={}&page={}".format(domain, path, ' '.join(kwargs.keys()),
kwargs[' '.join(kwargs.keys())], obj.next_num)
elif not id:
next_url = "{}{}?page={}".format(domain, path, obj.next_num)
if kwargs:
prev_url = "{}{}?{}={}&page={}".format(domain, path, ' '.join(kwargs.keys()),
kwargs[' '.join(kwargs.keys())], obj.prev_num)
elif not id:
prev_url = "{0}{1}?page={2}".format(domain, path, obj.prev_num)
elif id:
prev_url = "{}{}?id={}&page={}".format(domain, path, id, obj.prev_num)
if obj.has_next:
data = dict(count=obj.total, results=results, next=next_url, previous=prev_url if obj.prev_num else "")
else:
data = dict(count=obj.total, results=results, next="", previous="")
return data
| 37.128205 | 111 | 0.585635 |
6288f71c531a0077b15dfeba8f959cdbe32a6979 | 528 | py | Python | nelfurion/nelfurion/wsgi.py | nelfurion/nelfurion | dd4623fa2f04629231b25092fa8f4bb284cbe879 | [
"MIT"
] | null | null | null | nelfurion/nelfurion/wsgi.py | nelfurion/nelfurion | dd4623fa2f04629231b25092fa8f4bb284cbe879 | [
"MIT"
] | null | null | null | nelfurion/nelfurion/wsgi.py | nelfurion/nelfurion | dd4623fa2f04629231b25092fa8f4bb284cbe879 | [
"MIT"
] | null | null | null | """
WSGI config for nelfurion project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise import WhiteNoise
from nelfurion.settings import STATIC_ROOT
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nelfurion.settings")
application = get_wsgi_application()
application = WhiteNoise(application, root=STATIC_ROOT)
| 26.4 | 78 | 0.806818 |
e7900acef9478d60666bc488172937ef14e33dda | 10,187 | py | Python | src/BERT_experiments/tokenization.py | danieldeutsch/SumQE | 98ffbf57adc072a32a3478f6acc91bf0664433a8 | [
"MIT"
] | 20 | 2019-09-04T13:01:02.000Z | 2021-03-05T08:50:45.000Z | src/BERT_experiments/tokenization.py | danieldeutsch/SumQE | 98ffbf57adc072a32a3478f6acc91bf0664433a8 | [
"MIT"
] | 6 | 2019-11-04T22:19:55.000Z | 2022-03-11T23:58:41.000Z | src/BERT_experiments/tokenization.py | danieldeutsch/SumQE | 98ffbf57adc072a32a3478f6acc91bf0664433a8 | [
"MIT"
] | 5 | 2019-10-12T14:10:46.000Z | 2020-12-07T11:58:00.000Z | import collections
import unicodedata
import six
import tensorflow as tf
from numpy import unicode
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 33.18241 | 84 | 0.579955 |
4a7c7664af3211c5afc6847bdc43fb5bffda7bd9 | 1,300 | py | Python | https.py | crazyguitar/ktls.py | c1f4f9707d21f2f4c2accc7492c9c9ea2398d3c9 | [
"MIT"
] | 5 | 2017-12-07T08:13:49.000Z | 2019-08-02T14:51:39.000Z | https.py | crazyguitar/ktls.py | c1f4f9707d21f2f4c2accc7492c9c9ea2398d3c9 | [
"MIT"
] | null | null | null | https.py | crazyguitar/ktls.py | c1f4f9707d21f2f4c2accc7492c9c9ea2398d3c9 | [
"MIT"
] | 1 | 2020-10-22T06:49:47.000Z | 2020-10-22T06:49:47.000Z | #!/usr/bin/env python3.6
import types
import ssl
import os
from http.server import (
HTTPServer,
SimpleHTTPRequestHandler)
from ktls.utils import set_ktls_sockopt
def sendall(self, b):
""" overwrite origin socket.sendall
ref: cpython/Lib/socketserver.py +791
"""
fd = self.fileno()
os.write(fd, b)
class HTTPSServer(HTTPServer):
def get_request(self):
""" overwrite origin get_request for setting ktls
ref: cpython/Lib/socketserver.py +490
"""
conn, addr = super().get_request()
# set ktls socket options
conn = set_ktls_sockopt(conn)
conn.sendall = types.MethodType(sendall, conn)
return conn, addr
def run():
host, port = "localhost", 4433
cert, key = 'ktls/ca/cert.pem', 'ktls/ca/key.pem'
handler = SimpleHTTPRequestHandler
# prepare ssl context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.load_cert_chain(certfile=cert, keyfile=key)
ctx.set_ciphers('ECDH-ECDSA-AES128-GCM-SHA256')
# run the https server
with HTTPSServer((host, port), handler) as httpd:
httpd.socket = ctx.wrap_socket(httpd.socket,
server_side=True)
httpd.serve_forever()
try:
run()
except KeyboardInterrupt:
pass
| 21.666667 | 57 | 0.643846 |
8af87da12ccf92e87ba6953829ccb0f892b57bed | 2,531 | py | Python | src/borland/file.py | glgomes/clonebuilder | ab8cd12536d201cc3f228877307705f8e20ceb8f | [
"MIT"
] | 1 | 2020-06-23T17:02:55.000Z | 2020-06-23T17:02:55.000Z | src/borland/file.py | glgomes/clonebuilder | ab8cd12536d201cc3f228877307705f8e20ceb8f | [
"MIT"
] | null | null | null | src/borland/file.py | glgomes/clonebuilder | ab8cd12536d201cc3f228877307705f8e20ceb8f | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
@author rpereira
Apr 18, 2012
Colocar aqui todo tratamento especifico de arquivo da borland:
bpk
bpg
bpr
res
rc
"""
import re
import os
from controller.front import FrontController
GROUP_EXTS = ["bpg"]
PACKAGE_EXTS = ["bpk"]
EXE_EXTS = ["bpr"]
"""Wildcards para abrir arquivos das extensoes em FileDialogs"""
__pkgsep = "; *."
PKG_OPEN_WILDCARD = (
"Borland Package " +
"(*." +
__pkgsep.join(PACKAGE_EXTS) +
__pkgsep + __pkgsep.join(EXE_EXTS) +
__pkgsep + __pkgsep.join(GROUP_EXTS) + ")|" +
"*." +
__pkgsep.join(PACKAGE_EXTS) +
__pkgsep + __pkgsep.join(EXE_EXTS) +
__pkgsep + __pkgsep.join(GROUP_EXTS))
import fnmatch
#TODO: criar classe File, dai criar com composicao as classes BPK BPR BPG RES etc
class ProjectGroup(object):
def __init__(self, file_path):
self.file_path = file_path
self.__pkg_exts = PACKAGE_EXTS + EXE_EXTS
def find_list(self):
if os.path.exists(self.file_path):
file = open(self.file_path, 'r')
else:
file = self.__find_file()
buff = file.readlines()
exes = []
flag = False
for line in buff:
if flag:
if re.match("^#---", line, 1):
break
if re.match("^PROJECTS", line, 1):
line = line[11:]
exes = exes + line.split()
else:
if re.match("^PROJECTS", line, 1):
line = line[11:]
flag = True
exes = exes + line.split()
exes = filter(lambda a: a != "\\", exes)
return exes
def __find_file(self):
filename = self.file_path
matches = []
for path in FrontController().workspace.data['include_path']:
for root, dirnames, filenames in os.walk(path):
for ext in self.__pkg_exts:
for filename in fnmatch.filter(filenames,
filename + "." + ext):
matches.append((os.path.splitext(filename)[0],
filename, root+os.sep))
break
return matches
| 28.761364 | 95 | 0.472936 |
f7dbe8ac0c6793a82e012209701e89be028fc50a | 8,099 | py | Python | vitrage_tempest_plugin/tests/common/tempest_clients.py | openstack/vitrage-tempest-plugin | 69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f | [
"Apache-2.0"
] | 6 | 2018-08-02T12:11:09.000Z | 2019-03-05T11:45:09.000Z | vitrage_tempest_plugin/tests/common/tempest_clients.py | openstack/vitrage-tempest-plugin | 69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f | [
"Apache-2.0"
] | null | null | null | vitrage_tempest_plugin/tests/common/tempest_clients.py | openstack/vitrage-tempest-plugin | 69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f | [
"Apache-2.0"
] | 1 | 2018-08-22T12:29:54.000Z | 2018-08-22T12:29:54.000Z | # Copyright 2016 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ka_loading
from keystoneauth1 import session as ka_session
from keystoneclient.v3 import client as ks_client_v3
from neutronclient.v2_0 import client as neutron_client
from oslo_utils import importutils as utils
from tempest.common import credentials_factory as common_creds
from tempest import config
from vitrageclient import client as vc
CONF = config.CONF
_client_modules = {
'aodh': 'aodhclient.client',
'ceilometer': 'ceilometerclient.client',
'nova': 'novaclient.client',
'cinder': 'cinderclient.client',
'glance': 'glanceclient.client',
'neutron': 'neutronclient.v2_0.client',
'heat': 'heatclient.client',
'mistral': 'mistralclient.api.v2.client',
'gnocchi': 'gnocchiclient.v1.client',
'trove': 'troveclient.v1.client'
}
def driver_module(driver):
mod_name = _client_modules[driver]
module = utils.import_module(mod_name)
return module
class TempestClients(object):
@classmethod
def class_init(cls, creds):
cls.creds = creds
cls._vitrage = None
cls._ceilometer = None
cls._nova = None
cls._cinder = None
cls._glance = None
cls._neutron = None
cls._heat = None
cls._mistral = None
cls._aodh = None
cls._keystone = None
cls._gnocchi = None
@classmethod
def vitrage(cls):
"""vitrage client
:rtype: vitrageclient.v1.client.Client
"""
if not cls._vitrage:
cls._vitrage = vc.Client(
'1', session=cls._get_session_for_admin())
return cls._vitrage
@classmethod
def vitrage_client_for_user(cls):
"""vitrage client for a specific user and tenant
:rtype: vitrageclient.v1.client.Client
"""
session = cls._get_session_for_user()
return vc.Client('1', session=session)
@classmethod
def neutron_client_for_user(cls):
session = cls._get_session_for_user()
return neutron_client.Client(session=session)
@classmethod
def ceilometer(cls):
"""ceilometer client
:rtype: ceilometerclient.v2.client.Client
"""
if not cls._ceilometer:
cm_client = driver_module('ceilometer')
client = cm_client.get_client(
version=CONF.root_cause_analysis_service.ceilometer_version,
session=cls._get_session_for_admin(),
)
cls._ceilometer = client
return cls._ceilometer
@classmethod
def nova(cls):
"""nova client
:rtype: novaclient.v2.client.Client
"""
if not cls._nova:
n_client = driver_module('nova')
client = n_client.Client(
version=CONF.root_cause_analysis_service.nova_version,
session=cls._get_session_for_admin(),
)
cls._nova = client
return cls._nova
@classmethod
def cinder(cls):
"""cinder client
:rtype: cinderclient.v2.client.Client
"""
if not cls._cinder:
cin_client = driver_module('cinder')
client = cin_client.Client(
version=CONF.root_cause_analysis_service.cinder_version,
session=cls._get_session_for_admin(),
)
cls._cinder = client
return cls._cinder
@classmethod
def glance(cls):
"""glance client
:rtype: glanceclient.v2.client.Client
"""
if not cls._glance:
glan_client = driver_module('glance')
client = glan_client.Client(
version=CONF.root_cause_analysis_service.glance_version,
session=cls._get_session_for_admin(),
)
cls._glance = client
return cls._glance
@classmethod
def neutron(cls):
"""neutron client
:rtype: neutronclient.v2_0.client.Client
"""
if not cls._neutron:
ne_client = driver_module('neutron')
client = ne_client.Client(
session=cls._get_session_for_admin()
)
cls._neutron = client
return cls._neutron
@classmethod
def heat(cls):
"""heat client
:rtype: heatclient.v1.client.Client
"""
if not cls._heat:
he_client = driver_module('heat')
client = he_client.Client(
version=CONF.root_cause_analysis_service.heat_version,
session=cls._get_session_for_admin()
)
cls._heat = client
return cls._heat
@classmethod
def mistral(cls):
"""mistral client
:rtype: mistralclient.v2.client.Client
"""
if not cls._mistral:
mi_client = driver_module('mistral')
client = mi_client.Client(
session=cls._get_session_for_admin(),
)
cls._mistral = client
return cls._mistral
@classmethod
def aodh(cls):
"""aodh client
:rtype: aodhclient.v2.client.Client
"""
if not cls._aodh:
ao_client = driver_module('aodh')
client = ao_client.Client(
CONF.root_cause_analysis_service.aodh_version,
session=cls._get_session_for_admin())
cls._aodh = client
return cls._aodh
@classmethod
def keystone(cls):
"""keystone client
:rtype: keystoneclient.v3.client.Client
"""
if not cls._keystone:
sess = cls._get_session_for_admin()
cls._keystone = ks_client_v3.Client(session=sess)
return cls._keystone
@classmethod
def gnocchi(cls):
"""gnocchi client
:rtype: gnocchiclient.v1.client.Client
"""
if not cls._gnocchi:
gn_client = driver_module('gnocchi')
client = gn_client.Client(
session=cls._get_session_for_admin())
cls._gnocchi = client
return cls._gnocchi
@classmethod
def _get_session_for_admin(cls):
admin_creds = common_creds.get_configured_admin_credentials()
password = admin_creds.password
username = admin_creds.username
user_domain_id = admin_creds.user_domain_id
project_name = admin_creds.project_name
project_domain_id = admin_creds.project_domain_id
return cls._get_session(username, password, user_domain_id,
project_name, project_domain_id)
@classmethod
def _get_session_for_user(cls):
username = cls.creds.username
password = cls.creds.password
user_domain_id = cls.creds.user_domain_id
project_name = cls.creds.project_name
project_domain_id = cls.creds.project_domain_id
return cls._get_session(username, password, user_domain_id,
project_name, project_domain_id)
@classmethod
def _get_session(cls, username, password, user_domain_id, project_name,
project_domain_id):
loader = ka_loading.get_plugin_loader('password')
auth_url = CONF.identity.uri_v3
auth_plugin = loader.load_from_options(
auth_url=auth_url,
username=username, password=password, project_name=project_name,
project_domain_id=project_domain_id,
user_domain_id=user_domain_id)
return ka_session.Session(auth=auth_plugin)
| 30.794677 | 78 | 0.621064 |
08b839624d3509d757e299d31f5f7c8e83c2704e | 3,307 | py | Python | examples/cube_mesh.py | lukius/mlab-tools | 04b0f64a26305354febcbf3f235e2817cbe9077c | [
"MIT"
] | null | null | null | examples/cube_mesh.py | lukius/mlab-tools | 04b0f64a26305354febcbf3f235e2817cbe9077c | [
"MIT"
] | 1 | 2017-07-13T04:33:55.000Z | 2017-07-13T04:40:35.000Z | examples/cube_mesh.py | lukius/mlab-tools | 04b0f64a26305354febcbf3f235e2817cbe9077c | [
"MIT"
] | null | null | null | import sys
sys.path.append('../')
from mlab_tools.animation import Animation, StopAnimation
from mlab_tools.geometry import GeometryParser
from mlab_tools.polyline import AnimatedPolyLine
class CubeMeshAnimation(Animation):
"""This example shows how to use a VTK geometry file to render a 3D polyhedron mesh."""
def initialize(self):
self.parse_volumes()
self.parse_trajectory()
parser = GeometryParser.from_VTK('data/cube.vtk')
self.geometry = parser.parse()
self.polys = list()
for _, polyhedron in self.geometry:
self.polys.append(polyhedron)
self.add_object(polyhedron, opacity=0.1)
self.update_camera(focalpoint=[0,-30,120],
distance=2700,
azimuth=0,
elevation=90,
roll=-90)
def parse_volumes(self):
self.volumes = list()
self.boundary = list()
with open('data/retQSS_volumes_cube', 'r') as _file:
lines = _file.readlines()
for line in lines:
fields = line.split()
time = float(fields[0])
volume = fields[1]
self.volumes.append((time, volume))
self.current_volume_idx = 0
self.last_polyhedron = None
def parse_trajectory(self):
points = list()
self.times = list()
with open('data/retQSS_trajectory_cube', 'r') as _file:
lines = _file.readlines()
for line in lines:
time, x, y, z = map(float, line.split()[:-1])
points.append((x,y,z))
self.times.append(time)
self.trajectory = AnimatedPolyLine(points)
self.add_object(self.trajectory, color=(0,0,1))
def on_frame(self, frame_no):
current_point_idx = self.trajectory.current_point()
time = self.times[current_point_idx]
for poly in self.polys:
poly.transform(rotate=(0,0,0.3))
self.trajectory.transform(rotate=(0,0,0.3))
dist = self.get_camera().parameters()['distance']
if dist > 525:
self.update_camera(distance=-10)
while self.current_volume_idx < len(self.volumes) and\
time >= self.volumes[self.current_volume_idx][0]:
if self.last_polyhedron is not None:
self.last_polyhedron.update_properties(color=(0.5,0,0), opacity=0.05)
volume_name = self.volumes[self.current_volume_idx][1]
print 'Entering volume {}...'.format(volume_name)
polyhedron = self.geometry.get_polyhedron(volume_name)
polyhedron.update_properties(color=(0.7,0,0), opacity=0.3)
self.last_polyhedron = polyhedron
self.current_volume_idx += 1
if current_point_idx == self.trajectory.num_points()-1:
StopAnimation()
def run_animation():
animation = CubeMeshAnimation(640, 480)
animation.run(delay=20, save_to='cube')
if __name__ == '__main__':
run_animation()
| 33.744898 | 91 | 0.554279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.