body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
fb4a1046051e8c3f849c5ee1319645bff2143ea73a50989b92c7d61b454516bc
def create_parity_check_matrix(G): '\n Function that generates a parity check matrix from a given k x n generator matrix of form [Ik | P] where Ik is\n a identity matrix of rank k.\n\n The parity check matrix is in form [PT | I]\n where PT is transpose of matrix P and I is a identity matrix of rank n-k.\n :param G: a generator matrix in form [Ik | P]\n :return: H: parity check matrix in form [PT | I]\n ' rows = G.shape[0] columns = G.shape[1] P = G[(:, rows:columns)] PT = np.transpose(P) I = np.identity((columns - rows)) H = np.concatenate((PT, I), axis=1) return H
Function that generates a parity check matrix from a given k x n generator matrix of form [Ik | P] where Ik is a identity matrix of rank k. The parity check matrix is in form [PT | I] where PT is transpose of matrix P and I is a identity matrix of rank n-k. :param G: a generator matrix in form [Ik | P] :return: H: parity check matrix in form [PT | I]
Linear Coding.py
create_parity_check_matrix
Mythrillo/Information-Theory
0
python
def create_parity_check_matrix(G): '\n Function that generates a parity check matrix from a given k x n generator matrix of form [Ik | P] where Ik is\n a identity matrix of rank k.\n\n The parity check matrix is in form [PT | I]\n where PT is transpose of matrix P and I is a identity matrix of rank n-k.\n :param G: a generator matrix in form [Ik | P]\n :return: H: parity check matrix in form [PT | I]\n ' rows = G.shape[0] columns = G.shape[1] P = G[(:, rows:columns)] PT = np.transpose(P) I = np.identity((columns - rows)) H = np.concatenate((PT, I), axis=1) return H
def create_parity_check_matrix(G): '\n Function that generates a parity check matrix from a given k x n generator matrix of form [Ik | P] where Ik is\n a identity matrix of rank k.\n\n The parity check matrix is in form [PT | I]\n where PT is transpose of matrix P and I is a identity matrix of rank n-k.\n :param G: a generator matrix in form [Ik | P]\n :return: H: parity check matrix in form [PT | I]\n ' rows = G.shape[0] columns = G.shape[1] P = G[(:, rows:columns)] PT = np.transpose(P) I = np.identity((columns - rows)) H = np.concatenate((PT, I), axis=1) return H<|docstring|>Function that generates a parity check matrix from a given k x n generator matrix of form [Ik | P] where Ik is a identity matrix of rank k. The parity check matrix is in form [PT | I] where PT is transpose of matrix P and I is a identity matrix of rank n-k. :param G: a generator matrix in form [Ik | P] :return: H: parity check matrix in form [PT | I]<|endoftext|>
5cfa5d5748fb8c977151d8f18829c81322a039896b677d1a93183c6753f9813a
def code_message(G, msg): '\n The way the message is coded is simple as we only need to multiply the message with the generator matrix and\n take modulo 2 of the result.\n :param G: a generator matrix in systematic form\n :param msg: message we wish to code\n :return:\n ' return (msg.dot(G) % 2)
The way the message is coded is simple as we only need to multiply the message with the generator matrix and take modulo 2 of the result. :param G: a generator matrix in systematic form :param msg: message we wish to code :return:
Linear Coding.py
code_message
Mythrillo/Information-Theory
0
python
def code_message(G, msg): '\n The way the message is coded is simple as we only need to multiply the message with the generator matrix and\n take modulo 2 of the result.\n :param G: a generator matrix in systematic form\n :param msg: message we wish to code\n :return:\n ' return (msg.dot(G) % 2)
def code_message(G, msg): '\n The way the message is coded is simple as we only need to multiply the message with the generator matrix and\n take modulo 2 of the result.\n :param G: a generator matrix in systematic form\n :param msg: message we wish to code\n :return:\n ' return (msg.dot(G) % 2)<|docstring|>The way the message is coded is simple as we only need to multiply the message with the generator matrix and take modulo 2 of the result. :param G: a generator matrix in systematic form :param msg: message we wish to code :return:<|endoftext|>
e55b69b29743dd8201e710e0f2e50fef68d83ba77f8896a6e40566b83b33419e
def swap_columns(a, b, array): '\n Function that swaps columns of a given matrix\n :param a: int\n :param b: int\n :param array: numpy array\n :return: array_swapped: numpy array with the columns swapped\n ' array_swapped = array.copy() array_swapped[(:, a)] = array[(:, b)] array_swapped[(:, b)] = array[(:, a)] return array_swapped
Function that swaps columns of a given matrix :param a: int :param b: int :param array: numpy array :return: array_swapped: numpy array with the columns swapped
Linear Coding.py
swap_columns
Mythrillo/Information-Theory
0
python
def swap_columns(a, b, array): '\n Function that swaps columns of a given matrix\n :param a: int\n :param b: int\n :param array: numpy array\n :return: array_swapped: numpy array with the columns swapped\n ' array_swapped = array.copy() array_swapped[(:, a)] = array[(:, b)] array_swapped[(:, b)] = array[(:, a)] return array_swapped
def swap_columns(a, b, array): '\n Function that swaps columns of a given matrix\n :param a: int\n :param b: int\n :param array: numpy array\n :return: array_swapped: numpy array with the columns swapped\n ' array_swapped = array.copy() array_swapped[(:, a)] = array[(:, b)] array_swapped[(:, b)] = array[(:, a)] return array_swapped<|docstring|>Function that swaps columns of a given matrix :param a: int :param b: int :param array: numpy array :return: array_swapped: numpy array with the columns swapped<|endoftext|>
cf2be77258ed4469fcd818780fedb108b8027cbca5c7ff9deb11757125de0623
def decode_message(G, coded_msg): '\n Function that decodes given message using the given generator matrix.\n Firsly we create all combinations of length n, where n is the number of rows of the matrix, of zeros and ones.\n Then each combinations is transposed and multiplied by the generator matrix until the result is the given coded\n message in which case the transposed combination is returned. If all combinations are exhausted then a 1x1 numpy\n array is returned with the value 2.\n :param G: generator matrix in systematic form\n :param coded_msg: numpy array\n :return: rowT: decoded message\n ' '\n Dekoduje zadaną wiadomość dla zadanej macierzy G.\n Początkowo tworzymy listę wszystkich poprawnych kombinacji długości liczby wierszów macierzy G stworzoną z 0 i 1,\n przez którą następnie iterujemy i mnożymy każdą kombinację razy macierz G.\n Jeśli dla którejś kombinacji okaże się, że wynik mnożenia jest naszą zakodowaną wiadomością to zwracamy kombinację.\n W przeciwnym wypadku zwracamy ustalony array.\n ' code_words = list(itertools.product([0, 1], repeat=G.shape[0])) code_words_arr = np.array(code_words) for row in code_words_arr: rowT = np.transpose(row) if ((rowT.dot(G) % 2) == coded_msg).all(): return rowT return np.array([[2]])
Function that decodes given message using the given generator matrix. Firsly we create all combinations of length n, where n is the number of rows of the matrix, of zeros and ones. Then each combinations is transposed and multiplied by the generator matrix until the result is the given coded message in which case the transposed combination is returned. If all combinations are exhausted then a 1x1 numpy array is returned with the value 2. :param G: generator matrix in systematic form :param coded_msg: numpy array :return: rowT: decoded message
Linear Coding.py
decode_message
Mythrillo/Information-Theory
0
python
def decode_message(G, coded_msg): '\n Function that decodes given message using the given generator matrix.\n Firsly we create all combinations of length n, where n is the number of rows of the matrix, of zeros and ones.\n Then each combinations is transposed and multiplied by the generator matrix until the result is the given coded\n message in which case the transposed combination is returned. If all combinations are exhausted then a 1x1 numpy\n array is returned with the value 2.\n :param G: generator matrix in systematic form\n :param coded_msg: numpy array\n :return: rowT: decoded message\n ' '\n Dekoduje zadaną wiadomość dla zadanej macierzy G.\n Początkowo tworzymy listę wszystkich poprawnych kombinacji długości liczby wierszów macierzy G stworzoną z 0 i 1,\n przez którą następnie iterujemy i mnożymy każdą kombinację razy macierz G.\n Jeśli dla którejś kombinacji okaże się, że wynik mnożenia jest naszą zakodowaną wiadomością to zwracamy kombinację.\n W przeciwnym wypadku zwracamy ustalony array.\n ' code_words = list(itertools.product([0, 1], repeat=G.shape[0])) code_words_arr = np.array(code_words) for row in code_words_arr: rowT = np.transpose(row) if ((rowT.dot(G) % 2) == coded_msg).all(): return rowT return np.array([[2]])
def decode_message(G, coded_msg): '\n Function that decodes given message using the given generator matrix.\n Firsly we create all combinations of length n, where n is the number of rows of the matrix, of zeros and ones.\n Then each combinations is transposed and multiplied by the generator matrix until the result is the given coded\n message in which case the transposed combination is returned. If all combinations are exhausted then a 1x1 numpy\n array is returned with the value 2.\n :param G: generator matrix in systematic form\n :param coded_msg: numpy array\n :return: rowT: decoded message\n ' '\n Dekoduje zadaną wiadomość dla zadanej macierzy G.\n Początkowo tworzymy listę wszystkich poprawnych kombinacji długości liczby wierszów macierzy G stworzoną z 0 i 1,\n przez którą następnie iterujemy i mnożymy każdą kombinację razy macierz G.\n Jeśli dla którejś kombinacji okaże się, że wynik mnożenia jest naszą zakodowaną wiadomością to zwracamy kombinację.\n W przeciwnym wypadku zwracamy ustalony array.\n ' code_words = list(itertools.product([0, 1], repeat=G.shape[0])) code_words_arr = np.array(code_words) for row in code_words_arr: rowT = np.transpose(row) if ((rowT.dot(G) % 2) == coded_msg).all(): return rowT return np.array([[2]])<|docstring|>Function that decodes given message using the given generator matrix. Firsly we create all combinations of length n, where n is the number of rows of the matrix, of zeros and ones. Then each combinations is transposed and multiplied by the generator matrix until the result is the given coded message in which case the transposed combination is returned. If all combinations are exhausted then a 1x1 numpy array is returned with the value 2. :param G: generator matrix in systematic form :param coded_msg: numpy array :return: rowT: decoded message<|endoftext|>
961752af43a593c2e442ca708c8a53e11dff8382f89350a1ec177ee5aab0f346
def convert_to_systematic_matrix(G): "\n Function that converts any generator matrix to it's systematic form [I | P] using the following operations:\n swapping columns, adding rows modulo 2. It returns a 1x1 array with value 2 if the given matrix\n is not a generator matrix.\n :param G: generator matrix\n :return: numpy array\n " temp_array = G.copy() rows = temp_array.shape[0] columns = temp_array.shape[1] limit = rows i = 0 while (i < limit): found = False for j in range(i, columns): if (temp_array[(i, j)] == 1): found = True temp_array = swap_columns(j, i, temp_array) break if found: for k in range(0, rows): if (k == i): continue if (temp_array[(k, i)] == 1): temp_array[(k, :)] = (temp_array[(k, :)] + temp_array[(i, :)]) temp_array = (temp_array.copy() % 2) i = (i + 1) else: return np.array([[2]]) return temp_array
Function that converts any generator matrix to it's systematic form [I | P] using the following operations: swapping columns, adding rows modulo 2. It returns a 1x1 array with value 2 if the given matrix is not a generator matrix. :param G: generator matrix :return: numpy array
Linear Coding.py
convert_to_systematic_matrix
Mythrillo/Information-Theory
0
python
def convert_to_systematic_matrix(G): "\n Function that converts any generator matrix to it's systematic form [I | P] using the following operations:\n swapping columns, adding rows modulo 2. It returns a 1x1 array with value 2 if the given matrix\n is not a generator matrix.\n :param G: generator matrix\n :return: numpy array\n " temp_array = G.copy() rows = temp_array.shape[0] columns = temp_array.shape[1] limit = rows i = 0 while (i < limit): found = False for j in range(i, columns): if (temp_array[(i, j)] == 1): found = True temp_array = swap_columns(j, i, temp_array) break if found: for k in range(0, rows): if (k == i): continue if (temp_array[(k, i)] == 1): temp_array[(k, :)] = (temp_array[(k, :)] + temp_array[(i, :)]) temp_array = (temp_array.copy() % 2) i = (i + 1) else: return np.array([[2]]) return temp_array
def convert_to_systematic_matrix(G): "\n Function that converts any generator matrix to it's systematic form [I | P] using the following operations:\n swapping columns, adding rows modulo 2. It returns a 1x1 array with value 2 if the given matrix\n is not a generator matrix.\n :param G: generator matrix\n :return: numpy array\n " temp_array = G.copy() rows = temp_array.shape[0] columns = temp_array.shape[1] limit = rows i = 0 while (i < limit): found = False for j in range(i, columns): if (temp_array[(i, j)] == 1): found = True temp_array = swap_columns(j, i, temp_array) break if found: for k in range(0, rows): if (k == i): continue if (temp_array[(k, i)] == 1): temp_array[(k, :)] = (temp_array[(k, :)] + temp_array[(i, :)]) temp_array = (temp_array.copy() % 2) i = (i + 1) else: return np.array([[2]]) return temp_array<|docstring|>Function that converts any generator matrix to it's systematic form [I | P] using the following operations: swapping columns, adding rows modulo 2. It returns a 1x1 array with value 2 if the given matrix is not a generator matrix. :param G: generator matrix :return: numpy array<|endoftext|>
e63378c343b126bab8768ceae3e6da31ed48b2bd3f560f603dbe7495dbd574c1
def fail_next_request(self, status_code): 'Fail the next request with the given status_code.' self.log_d('fail_next_request: id={} self={} status_code={}'.format(id(self), self, status_code)) self._fail_next_request.value = status_code
Fail the next request with the given status_code.
lib/bes/web/web_server.py
fail_next_request
reconstruir/bes
0
python
def fail_next_request(self, status_code): self.log_d('fail_next_request: id={} self={} status_code={}'.format(id(self), self, status_code)) self._fail_next_request.value = status_code
def fail_next_request(self, status_code): self.log_d('fail_next_request: id={} self={} status_code={}'.format(id(self), self, status_code)) self._fail_next_request.value = status_code<|docstring|>Fail the next request with the given status_code.<|endoftext|>
b2d28ccda10011e4602fc00246621ca2ffa6a9d689de88d68663ea15cd0bf7f5
def __init__(self): 'Creates an instance of ValidationError' self.__api_name = None self.__info_message = None self.__message = None self.__index = None self.__parent_api_name = None self.__key_modified = dict()
Creates an instance of ValidationError
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
__init__
zoho/zohocrm-python-sdk-2.1
0
python
def __init__(self): self.__api_name = None self.__info_message = None self.__message = None self.__index = None self.__parent_api_name = None self.__key_modified = dict()
def __init__(self): self.__api_name = None self.__info_message = None self.__message = None self.__index = None self.__parent_api_name = None self.__key_modified = dict()<|docstring|>Creates an instance of ValidationError<|endoftext|>
375e86904e106c831724b0d2bc60a3625348b63c73318704431ab6ec62135d36
def get_api_name(self): '\n\t\tThe method to get the api_name\n\n\t\tReturns:\n\t\t\tstring: A string representing the api_name\n\t\t' return self.__api_name
The method to get the api_name Returns: string: A string representing the api_name
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
get_api_name
zoho/zohocrm-python-sdk-2.1
0
python
def get_api_name(self): '\n\t\tThe method to get the api_name\n\n\t\tReturns:\n\t\t\tstring: A string representing the api_name\n\t\t' return self.__api_name
def get_api_name(self): '\n\t\tThe method to get the api_name\n\n\t\tReturns:\n\t\t\tstring: A string representing the api_name\n\t\t' return self.__api_name<|docstring|>The method to get the api_name Returns: string: A string representing the api_name<|endoftext|>
a6e8a06882cb37d5a84e1061aa91bf4c03accc5ab8a3bdadb8bfe2cb0c20af9b
def set_api_name(self, api_name): '\n\t\tThe method to set the value to api_name\n\n\t\tParameters:\n\t\t\tapi_name (string) : A string representing the api_name\n\t\t' if ((api_name is not None) and (not isinstance(api_name, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: api_name EXPECTED TYPE: str', None, None) self.__api_name = api_name self.__key_modified['api_name'] = 1
The method to set the value to api_name Parameters: api_name (string) : A string representing the api_name
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
set_api_name
zoho/zohocrm-python-sdk-2.1
0
python
def set_api_name(self, api_name): '\n\t\tThe method to set the value to api_name\n\n\t\tParameters:\n\t\t\tapi_name (string) : A string representing the api_name\n\t\t' if ((api_name is not None) and (not isinstance(api_name, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: api_name EXPECTED TYPE: str', None, None) self.__api_name = api_name self.__key_modified['api_name'] = 1
def set_api_name(self, api_name): '\n\t\tThe method to set the value to api_name\n\n\t\tParameters:\n\t\t\tapi_name (string) : A string representing the api_name\n\t\t' if ((api_name is not None) and (not isinstance(api_name, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: api_name EXPECTED TYPE: str', None, None) self.__api_name = api_name self.__key_modified['api_name'] = 1<|docstring|>The method to set the value to api_name Parameters: api_name (string) : A string representing the api_name<|endoftext|>
600484d8c95b3ac87a7394ee5f0c5cfebc5b6ccc30a91b85c181abd29f992701
def get_info_message(self): '\n\t\tThe method to get the info_message\n\n\t\tReturns:\n\t\t\tstring: A string representing the info_message\n\t\t' return self.__info_message
The method to get the info_message Returns: string: A string representing the info_message
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
get_info_message
zoho/zohocrm-python-sdk-2.1
0
python
def get_info_message(self): '\n\t\tThe method to get the info_message\n\n\t\tReturns:\n\t\t\tstring: A string representing the info_message\n\t\t' return self.__info_message
def get_info_message(self): '\n\t\tThe method to get the info_message\n\n\t\tReturns:\n\t\t\tstring: A string representing the info_message\n\t\t' return self.__info_message<|docstring|>The method to get the info_message Returns: string: A string representing the info_message<|endoftext|>
765a6939ba5db3235bcf3a6f2ce5c088f000e34012adbc9b089fd1b27868f08d
def set_info_message(self, info_message): '\n\t\tThe method to set the value to info_message\n\n\t\tParameters:\n\t\t\tinfo_message (string) : A string representing the info_message\n\t\t' if ((info_message is not None) and (not isinstance(info_message, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: info_message EXPECTED TYPE: str', None, None) self.__info_message = info_message self.__key_modified['info_message'] = 1
The method to set the value to info_message Parameters: info_message (string) : A string representing the info_message
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
set_info_message
zoho/zohocrm-python-sdk-2.1
0
python
def set_info_message(self, info_message): '\n\t\tThe method to set the value to info_message\n\n\t\tParameters:\n\t\t\tinfo_message (string) : A string representing the info_message\n\t\t' if ((info_message is not None) and (not isinstance(info_message, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: info_message EXPECTED TYPE: str', None, None) self.__info_message = info_message self.__key_modified['info_message'] = 1
def set_info_message(self, info_message): '\n\t\tThe method to set the value to info_message\n\n\t\tParameters:\n\t\t\tinfo_message (string) : A string representing the info_message\n\t\t' if ((info_message is not None) and (not isinstance(info_message, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: info_message EXPECTED TYPE: str', None, None) self.__info_message = info_message self.__key_modified['info_message'] = 1<|docstring|>The method to set the value to info_message Parameters: info_message (string) : A string representing the info_message<|endoftext|>
2d82acb01ae9665960fab95db14735467dc5c36b9229c3be5fe181c6d7254550
def get_message(self): '\n\t\tThe method to get the message\n\n\t\tReturns:\n\t\t\tstring: A string representing the message\n\t\t' return self.__message
The method to get the message Returns: string: A string representing the message
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
get_message
zoho/zohocrm-python-sdk-2.1
0
python
def get_message(self): '\n\t\tThe method to get the message\n\n\t\tReturns:\n\t\t\tstring: A string representing the message\n\t\t' return self.__message
def get_message(self): '\n\t\tThe method to get the message\n\n\t\tReturns:\n\t\t\tstring: A string representing the message\n\t\t' return self.__message<|docstring|>The method to get the message Returns: string: A string representing the message<|endoftext|>
c53a981891f28eab188ca486bc53f4a78f31de4190464351b69fa1187875478a
def set_message(self, message): '\n\t\tThe method to set the value to message\n\n\t\tParameters:\n\t\t\tmessage (string) : A string representing the message\n\t\t' if ((message is not None) and (not isinstance(message, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: message EXPECTED TYPE: str', None, None) self.__message = message self.__key_modified['message'] = 1
The method to set the value to message Parameters: message (string) : A string representing the message
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
set_message
zoho/zohocrm-python-sdk-2.1
0
python
def set_message(self, message): '\n\t\tThe method to set the value to message\n\n\t\tParameters:\n\t\t\tmessage (string) : A string representing the message\n\t\t' if ((message is not None) and (not isinstance(message, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: message EXPECTED TYPE: str', None, None) self.__message = message self.__key_modified['message'] = 1
def set_message(self, message): '\n\t\tThe method to set the value to message\n\n\t\tParameters:\n\t\t\tmessage (string) : A string representing the message\n\t\t' if ((message is not None) and (not isinstance(message, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: message EXPECTED TYPE: str', None, None) self.__message = message self.__key_modified['message'] = 1<|docstring|>The method to set the value to message Parameters: message (string) : A string representing the message<|endoftext|>
3f16935a45fd91247751c2226d404a2a720321169dc091359b5df132a1f4ec5f
def get_index(self): '\n\t\tThe method to get the index\n\n\t\tReturns:\n\t\t\tint: An int representing the index\n\t\t' return self.__index
The method to get the index Returns: int: An int representing the index
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
get_index
zoho/zohocrm-python-sdk-2.1
0
python
def get_index(self): '\n\t\tThe method to get the index\n\n\t\tReturns:\n\t\t\tint: An int representing the index\n\t\t' return self.__index
def get_index(self): '\n\t\tThe method to get the index\n\n\t\tReturns:\n\t\t\tint: An int representing the index\n\t\t' return self.__index<|docstring|>The method to get the index Returns: int: An int representing the index<|endoftext|>
cb9ba12b3f2510a0695905b2449445378535252e19034e53b49806dc37e6aacc
def set_index(self, index): '\n\t\tThe method to set the value to index\n\n\t\tParameters:\n\t\t\tindex (int) : An int representing the index\n\t\t' if ((index is not None) and (not isinstance(index, int))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: index EXPECTED TYPE: int', None, None) self.__index = index self.__key_modified['index'] = 1
The method to set the value to index Parameters: index (int) : An int representing the index
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
set_index
zoho/zohocrm-python-sdk-2.1
0
python
def set_index(self, index): '\n\t\tThe method to set the value to index\n\n\t\tParameters:\n\t\t\tindex (int) : An int representing the index\n\t\t' if ((index is not None) and (not isinstance(index, int))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: index EXPECTED TYPE: int', None, None) self.__index = index self.__key_modified['index'] = 1
def set_index(self, index): '\n\t\tThe method to set the value to index\n\n\t\tParameters:\n\t\t\tindex (int) : An int representing the index\n\t\t' if ((index is not None) and (not isinstance(index, int))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: index EXPECTED TYPE: int', None, None) self.__index = index self.__key_modified['index'] = 1<|docstring|>The method to set the value to index Parameters: index (int) : An int representing the index<|endoftext|>
5c8c170d08aa04c624801ea8c8cd1e9a830eee51dd77ea58fa222637fa33af34
def get_parent_api_name(self): '\n\t\tThe method to get the parent_api_name\n\n\t\tReturns:\n\t\t\tstring: A string representing the parent_api_name\n\t\t' return self.__parent_api_name
The method to get the parent_api_name Returns: string: A string representing the parent_api_name
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
get_parent_api_name
zoho/zohocrm-python-sdk-2.1
0
python
def get_parent_api_name(self): '\n\t\tThe method to get the parent_api_name\n\n\t\tReturns:\n\t\t\tstring: A string representing the parent_api_name\n\t\t' return self.__parent_api_name
def get_parent_api_name(self): '\n\t\tThe method to get the parent_api_name\n\n\t\tReturns:\n\t\t\tstring: A string representing the parent_api_name\n\t\t' return self.__parent_api_name<|docstring|>The method to get the parent_api_name Returns: string: A string representing the parent_api_name<|endoftext|>
07b5f67ab30a0024eedb8071241b5fb15157abd7705a1fd6baec99266cd48421
def set_parent_api_name(self, parent_api_name): '\n\t\tThe method to set the value to parent_api_name\n\n\t\tParameters:\n\t\t\tparent_api_name (string) : A string representing the parent_api_name\n\t\t' if ((parent_api_name is not None) and (not isinstance(parent_api_name, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: parent_api_name EXPECTED TYPE: str', None, None) self.__parent_api_name = parent_api_name self.__key_modified['parent_api_name'] = 1
The method to set the value to parent_api_name Parameters: parent_api_name (string) : A string representing the parent_api_name
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
set_parent_api_name
zoho/zohocrm-python-sdk-2.1
0
python
def set_parent_api_name(self, parent_api_name): '\n\t\tThe method to set the value to parent_api_name\n\n\t\tParameters:\n\t\t\tparent_api_name (string) : A string representing the parent_api_name\n\t\t' if ((parent_api_name is not None) and (not isinstance(parent_api_name, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: parent_api_name EXPECTED TYPE: str', None, None) self.__parent_api_name = parent_api_name self.__key_modified['parent_api_name'] = 1
def set_parent_api_name(self, parent_api_name): '\n\t\tThe method to set the value to parent_api_name\n\n\t\tParameters:\n\t\t\tparent_api_name (string) : A string representing the parent_api_name\n\t\t' if ((parent_api_name is not None) and (not isinstance(parent_api_name, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: parent_api_name EXPECTED TYPE: str', None, None) self.__parent_api_name = parent_api_name self.__key_modified['parent_api_name'] = 1<|docstring|>The method to set the value to parent_api_name Parameters: parent_api_name (string) : A string representing the parent_api_name<|endoftext|>
6152561f54bd76aa304fe9f8a38d9e50dba468272cb4813995b474b1959b3939
def is_key_modified(self, key): '\n\t\tThe method to check if the user has modified the given key\n\n\t\tParameters:\n\t\t\tkey (string) : A string representing the key\n\n\t\tReturns:\n\t\t\tint: An int representing the modification\n\t\t' if ((key is not None) and (not isinstance(key, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if (key in self.__key_modified): return self.__key_modified.get(key) return None
The method to check if the user has modified the given key Parameters: key (string) : A string representing the key Returns: int: An int representing the modification
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
is_key_modified
zoho/zohocrm-python-sdk-2.1
0
python
def is_key_modified(self, key): '\n\t\tThe method to check if the user has modified the given key\n\n\t\tParameters:\n\t\t\tkey (string) : A string representing the key\n\n\t\tReturns:\n\t\t\tint: An int representing the modification\n\t\t' if ((key is not None) and (not isinstance(key, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if (key in self.__key_modified): return self.__key_modified.get(key) return None
def is_key_modified(self, key): '\n\t\tThe method to check if the user has modified the given key\n\n\t\tParameters:\n\t\t\tkey (string) : A string representing the key\n\n\t\tReturns:\n\t\t\tint: An int representing the modification\n\t\t' if ((key is not None) and (not isinstance(key, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if (key in self.__key_modified): return self.__key_modified.get(key) return None<|docstring|>The method to check if the user has modified the given key Parameters: key (string) : A string representing the key Returns: int: An int representing the modification<|endoftext|>
f2951784b1a2b8a82caa24a10f5d6e7a37be6cac4daf41c0d862c785f6589acc
def set_key_modified(self, key, modification): '\n\t\tThe method to mark the given key as modified\n\n\t\tParameters:\n\t\t\tkey (string) : A string representing the key\n\t\t\tmodification (int) : An int representing the modification\n\t\t' if ((key is not None) and (not isinstance(key, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if ((modification is not None) and (not isinstance(modification, int))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None) self.__key_modified[key] = modification
The method to mark the given key as modified Parameters: key (string) : A string representing the key modification (int) : An int representing the modification
zcrmsdk/src/com/zoho/crm/api/blue_print/validation_error.py
set_key_modified
zoho/zohocrm-python-sdk-2.1
0
python
def set_key_modified(self, key, modification): '\n\t\tThe method to mark the given key as modified\n\n\t\tParameters:\n\t\t\tkey (string) : A string representing the key\n\t\t\tmodification (int) : An int representing the modification\n\t\t' if ((key is not None) and (not isinstance(key, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if ((modification is not None) and (not isinstance(modification, int))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None) self.__key_modified[key] = modification
def set_key_modified(self, key, modification): '\n\t\tThe method to mark the given key as modified\n\n\t\tParameters:\n\t\t\tkey (string) : A string representing the key\n\t\t\tmodification (int) : An int representing the modification\n\t\t' if ((key is not None) and (not isinstance(key, str))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if ((modification is not None) and (not isinstance(modification, int))): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None) self.__key_modified[key] = modification<|docstring|>The method to mark the given key as modified Parameters: key (string) : A string representing the key modification (int) : An int representing the modification<|endoftext|>
6a3b06db42960d62f7feee719e87d4824c4e4076bb024178a0dbcfa0c5a4d399
def concat_dataset(data_path: Path=DATA_PATH) -> pd.DataFrame: '\n Concatenate skaters datasets.\n ' years = range(2009, 2021) df = pd.DataFrame() for year in years: print(f'Data year: {year}') df = df.append(pd.read_csv(data_path.joinpath(f'raw/skaters_{year}.csv'))) return df
Concatenate skaters datasets.
src/dataset.py
concat_dataset
Charles-de-Montigny/fluxion-ch
0
python
def concat_dataset(data_path: Path=DATA_PATH) -> pd.DataFrame: '\n \n ' years = range(2009, 2021) df = pd.DataFrame() for year in years: print(f'Data year: {year}') df = df.append(pd.read_csv(data_path.joinpath(f'raw/skaters_{year}.csv'))) return df
def concat_dataset(data_path: Path=DATA_PATH) -> pd.DataFrame: '\n \n ' years = range(2009, 2021) df = pd.DataFrame() for year in years: print(f'Data year: {year}') df = df.append(pd.read_csv(data_path.joinpath(f'raw/skaters_{year}.csv'))) return df<|docstring|>Concatenate skaters datasets.<|endoftext|>
64b902bc855d94b27a213648ed4788c05ec8096be596694cf26d4b19d3fc35d1
def clean_df(df: pd.DataFrame) -> pd.DataFrame: '\n Clean DataFrame.\n ' df = df.query("situation == 'all'") df = df.query('icetime >= 6000') df.sort_values(['name', 'season'], ascending=True, inplace=True) df.reset_index(inplace=True) return df
Clean DataFrame.
src/dataset.py
clean_df
Charles-de-Montigny/fluxion-ch
0
python
def clean_df(df: pd.DataFrame) -> pd.DataFrame: '\n \n ' df = df.query("situation == 'all'") df = df.query('icetime >= 6000') df.sort_values(['name', 'season'], ascending=True, inplace=True) df.reset_index(inplace=True) return df
def clean_df(df: pd.DataFrame) -> pd.DataFrame: '\n \n ' df = df.query("situation == 'all'") df = df.query('icetime >= 6000') df.sort_values(['name', 'season'], ascending=True, inplace=True) df.reset_index(inplace=True) return df<|docstring|>Clean DataFrame.<|endoftext|>
4f33f6b6dc01826d53927da29ca4176303dd425a3acb60c391a345d990b15d31
def make_dataset(data_path: Path, project_path=Path('./')): '\n Create the final dataset for the dashboard.\n ' df = concat_dataset(data_path=data_path) df = clean_df(df) players = read_json(project_path.joinpath('config/players.json')) score = Score(skaters=df, players=players) score.make() return score.shooting_df
Create the final dataset for the dashboard.
src/dataset.py
make_dataset
Charles-de-Montigny/fluxion-ch
0
python
def make_dataset(data_path: Path, project_path=Path('./')): '\n \n ' df = concat_dataset(data_path=data_path) df = clean_df(df) players = read_json(project_path.joinpath('config/players.json')) score = Score(skaters=df, players=players) score.make() return score.shooting_df
def make_dataset(data_path: Path, project_path=Path('./')): '\n \n ' df = concat_dataset(data_path=data_path) df = clean_df(df) players = read_json(project_path.joinpath('config/players.json')) score = Score(skaters=df, players=players) score.make() return score.shooting_df<|docstring|>Create the final dataset for the dashboard.<|endoftext|>
c41d36d408e9db573d5211af0d9fcfe40aa871bbe4e491cbfabf3d3f68ca2280
def shooting(self): '\n Create the shooting DataFrame for the MTL habs.\n ' self.shooting_df = self.skaters[['season', 'name']].copy(deep=True) self.skaters['goal_pct'] = (self.skaters['I_F_goals'] / self.skaters['I_F_shotsOnGoal']) self.skaters['goal_per_game'] = (self.skaters['I_F_goals'] / self.skaters['games_played']) self.skaters['shot_per_game'] = (self.skaters['I_F_highDangerShots'] / self.skaters['games_played']) self.skaters['primary_per_game'] = (self.skaters['I_F_primaryAssists'] / self.skaters['games_played']) self.skaters['secondary_per_game'] = (self.skaters['I_F_secondaryAssists'] / self.skaters['games_played']) self.skaters['hits_per_game'] = (self.skaters['I_F_hits'] / self.skaters['games_played']) self.skaters['pim_per_game'] = (self.skaters['I_F_penalityMinutes'] / self.skaters['games_played']) self.skaters['plus-minus'] = ((self.skaters['OnIce_F_xGoals'] - self.skaters['OnIce_A_xGoals']) / self.skaters['games_played']) self.skaters['block_shot_per_game'] = (self.skaters['shotsBlockedByPlayer'] / self.skaters['games_played']) self.skaters['dzone_faceoff_per_game'] = (self.skaters['I_F_dZoneShiftStarts'] / self.skaters['games_played']) self.skaters['giveaway_per_game'] = np.where((self.skaters['I_F_dZoneGiveaways'] != 0), (1 / (self.skaters['I_F_dZoneGiveaways'] / self.skaters['games_played'])), 0) exp_cumsum = self.skaters.groupby('name')[['games_played', 'icetime']].cumsum().rename(columns={'games_played': 'games_played_sum', 'icetime': 'icetime_sum'}) self.skaters[exp_cumsum.columns] = exp_cumsum self.shooting_df['shooting'] = self.skaters.groupby(['season'])[['goal_per_game', 'shot_per_game', 'goal_pct']].rank(pct=True).mean(axis=1) self.shooting_df['passing'] = self.skaters.groupby('season')[['primary_per_game', 'secondary_per_game']].rank(pct=True).mean(axis=1) self.shooting_df['physical'] = self.skaters.groupby('season')[['hits_per_game', 'pim_per_game']].rank(pct=True).mean(axis=1) self.shooting_df['experience'] = self.skaters.groupby('season')[['games_played_sum', 'icetime_sum']].rank(pct=True).mean(axis=1) self.shooting_df['defending'] = self.skaters.groupby(['season', 'position'])[['block_shot_per_game', 'giveaway_per_game', 'dzone_faceoff_per_game']].rank(pct=True).mean(axis=1) adjusted_shooting = self.shooting_df.groupby(['name'])[('shooting', 'passing', 'physical', 'defending')].ewm(com=0.2).mean().reset_index().drop('level_1', axis=1) adjusted_shooting.columns = ['name_x', 'adjusted_shooting', 'adjusted_passing', 'adjusted_physical', 'adjusted_defending'] self.shooting_df = pd.concat([self.shooting_df, adjusted_shooting], axis=1) self.shooting_df.drop(['name_x', 'shooting', 'passing', 'physical', 'defending'], axis=1, inplace=True) mtl2020 = self.shooting_df.loc[(self.shooting_df['name'].apply((lambda x: (x in self.players['2020']))), :)].query('season==2019') mtl2021 = self.shooting_df.loc[(self.shooting_df['name'].apply((lambda x: (x in self.players['2021']))), :)].query('season==2020') self.shooting_df = pd.concat([mtl2020, mtl2021], axis=0) self.shooting_df['shooting_score'] = self.shooting_df['adjusted_shooting'].apply((lambda x: int((x * 100)))) self.shooting_df['passing_score'] = self.shooting_df['adjusted_passing'].apply((lambda x: int((x * 100)))) self.shooting_df['physical_score'] = self.shooting_df['adjusted_physical'].apply((lambda x: int((x * 100)))) self.shooting_df['exp_score'] = self.shooting_df['experience'].apply((lambda x: int((x * 100)))) self.shooting_df['defending_score'] = self.shooting_df['adjusted_defending'].apply((lambda x: int((x * 100)))) self.shooting_df.drop(['adjusted_shooting', 'adjusted_passing', 'adjusted_physical', 'experience', 'adjusted_defending'], axis=1, inplace=True) self.shooting_df.sort_values('physical_score', ascending=False, inplace=True) self.shooting_df.reset_index(drop=True, inplace=True)
Create the shooting DataFrame for the MTL habs.
src/dataset.py
shooting
Charles-de-Montigny/fluxion-ch
0
python
def shooting(self): '\n \n ' self.shooting_df = self.skaters[['season', 'name']].copy(deep=True) self.skaters['goal_pct'] = (self.skaters['I_F_goals'] / self.skaters['I_F_shotsOnGoal']) self.skaters['goal_per_game'] = (self.skaters['I_F_goals'] / self.skaters['games_played']) self.skaters['shot_per_game'] = (self.skaters['I_F_highDangerShots'] / self.skaters['games_played']) self.skaters['primary_per_game'] = (self.skaters['I_F_primaryAssists'] / self.skaters['games_played']) self.skaters['secondary_per_game'] = (self.skaters['I_F_secondaryAssists'] / self.skaters['games_played']) self.skaters['hits_per_game'] = (self.skaters['I_F_hits'] / self.skaters['games_played']) self.skaters['pim_per_game'] = (self.skaters['I_F_penalityMinutes'] / self.skaters['games_played']) self.skaters['plus-minus'] = ((self.skaters['OnIce_F_xGoals'] - self.skaters['OnIce_A_xGoals']) / self.skaters['games_played']) self.skaters['block_shot_per_game'] = (self.skaters['shotsBlockedByPlayer'] / self.skaters['games_played']) self.skaters['dzone_faceoff_per_game'] = (self.skaters['I_F_dZoneShiftStarts'] / self.skaters['games_played']) self.skaters['giveaway_per_game'] = np.where((self.skaters['I_F_dZoneGiveaways'] != 0), (1 / (self.skaters['I_F_dZoneGiveaways'] / self.skaters['games_played'])), 0) exp_cumsum = self.skaters.groupby('name')[['games_played', 'icetime']].cumsum().rename(columns={'games_played': 'games_played_sum', 'icetime': 'icetime_sum'}) self.skaters[exp_cumsum.columns] = exp_cumsum self.shooting_df['shooting'] = self.skaters.groupby(['season'])[['goal_per_game', 'shot_per_game', 'goal_pct']].rank(pct=True).mean(axis=1) self.shooting_df['passing'] = self.skaters.groupby('season')[['primary_per_game', 'secondary_per_game']].rank(pct=True).mean(axis=1) self.shooting_df['physical'] = self.skaters.groupby('season')[['hits_per_game', 'pim_per_game']].rank(pct=True).mean(axis=1) self.shooting_df['experience'] = self.skaters.groupby('season')[['games_played_sum', 'icetime_sum']].rank(pct=True).mean(axis=1) self.shooting_df['defending'] = self.skaters.groupby(['season', 'position'])[['block_shot_per_game', 'giveaway_per_game', 'dzone_faceoff_per_game']].rank(pct=True).mean(axis=1) adjusted_shooting = self.shooting_df.groupby(['name'])[('shooting', 'passing', 'physical', 'defending')].ewm(com=0.2).mean().reset_index().drop('level_1', axis=1) adjusted_shooting.columns = ['name_x', 'adjusted_shooting', 'adjusted_passing', 'adjusted_physical', 'adjusted_defending'] self.shooting_df = pd.concat([self.shooting_df, adjusted_shooting], axis=1) self.shooting_df.drop(['name_x', 'shooting', 'passing', 'physical', 'defending'], axis=1, inplace=True) mtl2020 = self.shooting_df.loc[(self.shooting_df['name'].apply((lambda x: (x in self.players['2020']))), :)].query('season==2019') mtl2021 = self.shooting_df.loc[(self.shooting_df['name'].apply((lambda x: (x in self.players['2021']))), :)].query('season==2020') self.shooting_df = pd.concat([mtl2020, mtl2021], axis=0) self.shooting_df['shooting_score'] = self.shooting_df['adjusted_shooting'].apply((lambda x: int((x * 100)))) self.shooting_df['passing_score'] = self.shooting_df['adjusted_passing'].apply((lambda x: int((x * 100)))) self.shooting_df['physical_score'] = self.shooting_df['adjusted_physical'].apply((lambda x: int((x * 100)))) self.shooting_df['exp_score'] = self.shooting_df['experience'].apply((lambda x: int((x * 100)))) self.shooting_df['defending_score'] = self.shooting_df['adjusted_defending'].apply((lambda x: int((x * 100)))) self.shooting_df.drop(['adjusted_shooting', 'adjusted_passing', 'adjusted_physical', 'experience', 'adjusted_defending'], axis=1, inplace=True) self.shooting_df.sort_values('physical_score', ascending=False, inplace=True) self.shooting_df.reset_index(drop=True, inplace=True)
def shooting(self): '\n \n ' self.shooting_df = self.skaters[['season', 'name']].copy(deep=True) self.skaters['goal_pct'] = (self.skaters['I_F_goals'] / self.skaters['I_F_shotsOnGoal']) self.skaters['goal_per_game'] = (self.skaters['I_F_goals'] / self.skaters['games_played']) self.skaters['shot_per_game'] = (self.skaters['I_F_highDangerShots'] / self.skaters['games_played']) self.skaters['primary_per_game'] = (self.skaters['I_F_primaryAssists'] / self.skaters['games_played']) self.skaters['secondary_per_game'] = (self.skaters['I_F_secondaryAssists'] / self.skaters['games_played']) self.skaters['hits_per_game'] = (self.skaters['I_F_hits'] / self.skaters['games_played']) self.skaters['pim_per_game'] = (self.skaters['I_F_penalityMinutes'] / self.skaters['games_played']) self.skaters['plus-minus'] = ((self.skaters['OnIce_F_xGoals'] - self.skaters['OnIce_A_xGoals']) / self.skaters['games_played']) self.skaters['block_shot_per_game'] = (self.skaters['shotsBlockedByPlayer'] / self.skaters['games_played']) self.skaters['dzone_faceoff_per_game'] = (self.skaters['I_F_dZoneShiftStarts'] / self.skaters['games_played']) self.skaters['giveaway_per_game'] = np.where((self.skaters['I_F_dZoneGiveaways'] != 0), (1 / (self.skaters['I_F_dZoneGiveaways'] / self.skaters['games_played'])), 0) exp_cumsum = self.skaters.groupby('name')[['games_played', 'icetime']].cumsum().rename(columns={'games_played': 'games_played_sum', 'icetime': 'icetime_sum'}) self.skaters[exp_cumsum.columns] = exp_cumsum self.shooting_df['shooting'] = self.skaters.groupby(['season'])[['goal_per_game', 'shot_per_game', 'goal_pct']].rank(pct=True).mean(axis=1) self.shooting_df['passing'] = self.skaters.groupby('season')[['primary_per_game', 'secondary_per_game']].rank(pct=True).mean(axis=1) self.shooting_df['physical'] = self.skaters.groupby('season')[['hits_per_game', 'pim_per_game']].rank(pct=True).mean(axis=1) self.shooting_df['experience'] = self.skaters.groupby('season')[['games_played_sum', 'icetime_sum']].rank(pct=True).mean(axis=1) self.shooting_df['defending'] = self.skaters.groupby(['season', 'position'])[['block_shot_per_game', 'giveaway_per_game', 'dzone_faceoff_per_game']].rank(pct=True).mean(axis=1) adjusted_shooting = self.shooting_df.groupby(['name'])[('shooting', 'passing', 'physical', 'defending')].ewm(com=0.2).mean().reset_index().drop('level_1', axis=1) adjusted_shooting.columns = ['name_x', 'adjusted_shooting', 'adjusted_passing', 'adjusted_physical', 'adjusted_defending'] self.shooting_df = pd.concat([self.shooting_df, adjusted_shooting], axis=1) self.shooting_df.drop(['name_x', 'shooting', 'passing', 'physical', 'defending'], axis=1, inplace=True) mtl2020 = self.shooting_df.loc[(self.shooting_df['name'].apply((lambda x: (x in self.players['2020']))), :)].query('season==2019') mtl2021 = self.shooting_df.loc[(self.shooting_df['name'].apply((lambda x: (x in self.players['2021']))), :)].query('season==2020') self.shooting_df = pd.concat([mtl2020, mtl2021], axis=0) self.shooting_df['shooting_score'] = self.shooting_df['adjusted_shooting'].apply((lambda x: int((x * 100)))) self.shooting_df['passing_score'] = self.shooting_df['adjusted_passing'].apply((lambda x: int((x * 100)))) self.shooting_df['physical_score'] = self.shooting_df['adjusted_physical'].apply((lambda x: int((x * 100)))) self.shooting_df['exp_score'] = self.shooting_df['experience'].apply((lambda x: int((x * 100)))) self.shooting_df['defending_score'] = self.shooting_df['adjusted_defending'].apply((lambda x: int((x * 100)))) self.shooting_df.drop(['adjusted_shooting', 'adjusted_passing', 'adjusted_physical', 'experience', 'adjusted_defending'], axis=1, inplace=True) self.shooting_df.sort_values('physical_score', ascending=False, inplace=True) self.shooting_df.reset_index(drop=True, inplace=True)<|docstring|>Create the shooting DataFrame for the MTL habs.<|endoftext|>
6ea7e61b75358ee186da806baa6c2f58ec679effc5441abcbc8c02d84f6d66f6
def __init__(self, IP='127.0.0.1', Port=19997): "\n Instantiate FourRoomGridWorld. \n \n Parameters\n ----------\n IP: string default = '127.0.0.1'\n IP address to connect V-REP server.\n \n Port: int default = 19997\n Port to communicate with V-REP server.\n \n " print('Initialize FourRoomGridWorld ...') vrep.simxFinish((- 1)) self.clientID = vrep.simxStart(IP, Port, True, True, 5000, 5) if (self.clientID != (- 1)): print('FourRoomGridWorld connected to remote V-REP API server') else: print('FourRoomGridWorld failed connecting to remote V-REP API server') self._def_op_mode = vrep.simx_opmode_blocking vrep.simxStartSimulation(self.clientID, vrep.simx_opmode_blocking) (self.wallBrickHandles, self.wallBrickNames, self.floorTileHandles, self.floorTileNames, self.hallwayHandles, self.hallwayNames, self.goalHandles, self.goalNames, self.standingParticipantHandles, self.standingParticipantNames) = get_all_object_name_and_handle(self.clientID, self._def_op_mode, vrep) self.wallBrickPositions = get_object_position(self.wallBrickHandles, self.clientID, self._def_op_mode, vrep) self.floorTilePositions = get_object_position(self.floorTileHandles, self.clientID, self._def_op_mode, vrep) self.hallwayPositions = get_object_position(self.hallwayHandles, self.clientID, self._def_op_mode, vrep) self.goalPositions = get_object_position(self.goalHandles, self.clientID, self._def_op_mode, vrep) self.standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) self.initial_standingParticipantPositions = self.standingParticipantPositions print('Initialize action and observation space...') self.observation_handles = np.concatenate((self.floorTileHandles, self.hallwayHandles)) self.observation_positions = np.concatenate((self.floorTilePositions, self.hallwayPositions)) self.observation_dim = len(self.observation_handles) self.observation_space = spaces.Discrete(self.observation_dim) self.action_dim = 4 self.action_space = spaces.Discrete(self.action_dim) print('Initialization of FourRoomGridWorld done!')
Instantiate FourRoomGridWorld. Parameters ---------- IP: string default = '127.0.0.1' IP address to connect V-REP server. Port: int default = 19997 Port to communicate with V-REP server.
Environment/FourRoomGridWorld.py
__init__
LinghengMeng/4_Room_World_Environment
6
python
def __init__(self, IP='127.0.0.1', Port=19997): "\n Instantiate FourRoomGridWorld. \n \n Parameters\n ----------\n IP: string default = '127.0.0.1'\n IP address to connect V-REP server.\n \n Port: int default = 19997\n Port to communicate with V-REP server.\n \n " print('Initialize FourRoomGridWorld ...') vrep.simxFinish((- 1)) self.clientID = vrep.simxStart(IP, Port, True, True, 5000, 5) if (self.clientID != (- 1)): print('FourRoomGridWorld connected to remote V-REP API server') else: print('FourRoomGridWorld failed connecting to remote V-REP API server') self._def_op_mode = vrep.simx_opmode_blocking vrep.simxStartSimulation(self.clientID, vrep.simx_opmode_blocking) (self.wallBrickHandles, self.wallBrickNames, self.floorTileHandles, self.floorTileNames, self.hallwayHandles, self.hallwayNames, self.goalHandles, self.goalNames, self.standingParticipantHandles, self.standingParticipantNames) = get_all_object_name_and_handle(self.clientID, self._def_op_mode, vrep) self.wallBrickPositions = get_object_position(self.wallBrickHandles, self.clientID, self._def_op_mode, vrep) self.floorTilePositions = get_object_position(self.floorTileHandles, self.clientID, self._def_op_mode, vrep) self.hallwayPositions = get_object_position(self.hallwayHandles, self.clientID, self._def_op_mode, vrep) self.goalPositions = get_object_position(self.goalHandles, self.clientID, self._def_op_mode, vrep) self.standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) self.initial_standingParticipantPositions = self.standingParticipantPositions print('Initialize action and observation space...') self.observation_handles = np.concatenate((self.floorTileHandles, self.hallwayHandles)) self.observation_positions = np.concatenate((self.floorTilePositions, self.hallwayPositions)) self.observation_dim = len(self.observation_handles) self.observation_space = spaces.Discrete(self.observation_dim) self.action_dim = 4 self.action_space = spaces.Discrete(self.action_dim) print('Initialization of FourRoomGridWorld done!')
def __init__(self, IP='127.0.0.1', Port=19997): "\n Instantiate FourRoomGridWorld. \n \n Parameters\n ----------\n IP: string default = '127.0.0.1'\n IP address to connect V-REP server.\n \n Port: int default = 19997\n Port to communicate with V-REP server.\n \n " print('Initialize FourRoomGridWorld ...') vrep.simxFinish((- 1)) self.clientID = vrep.simxStart(IP, Port, True, True, 5000, 5) if (self.clientID != (- 1)): print('FourRoomGridWorld connected to remote V-REP API server') else: print('FourRoomGridWorld failed connecting to remote V-REP API server') self._def_op_mode = vrep.simx_opmode_blocking vrep.simxStartSimulation(self.clientID, vrep.simx_opmode_blocking) (self.wallBrickHandles, self.wallBrickNames, self.floorTileHandles, self.floorTileNames, self.hallwayHandles, self.hallwayNames, self.goalHandles, self.goalNames, self.standingParticipantHandles, self.standingParticipantNames) = get_all_object_name_and_handle(self.clientID, self._def_op_mode, vrep) self.wallBrickPositions = get_object_position(self.wallBrickHandles, self.clientID, self._def_op_mode, vrep) self.floorTilePositions = get_object_position(self.floorTileHandles, self.clientID, self._def_op_mode, vrep) self.hallwayPositions = get_object_position(self.hallwayHandles, self.clientID, self._def_op_mode, vrep) self.goalPositions = get_object_position(self.goalHandles, self.clientID, self._def_op_mode, vrep) self.standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) self.initial_standingParticipantPositions = self.standingParticipantPositions print('Initialize action and observation space...') self.observation_handles = np.concatenate((self.floorTileHandles, self.hallwayHandles)) self.observation_positions = np.concatenate((self.floorTilePositions, self.hallwayPositions)) self.observation_dim = len(self.observation_handles) self.observation_space = spaces.Discrete(self.observation_dim) self.action_dim = 4 self.action_space = spaces.Discrete(self.action_dim) print('Initialization of FourRoomGridWorld done!')<|docstring|>Instantiate FourRoomGridWorld. Parameters ---------- IP: string default = '127.0.0.1' IP address to connect V-REP server. Port: int default = 19997 Port to communicate with V-REP server.<|endoftext|>
7c79d2cc0ce4df8baa4f860659c115849f4996277069bb72823ebf85506d5ba9
def step(self, action): '\n Take one step of interaction.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n observation: int\n obervation of environment after taking an action\n reward: float\n reward after taking an action\n done: bool\n whether simulation is done or not.\n info:\n some information for debugging\n ' if (np.sum(np.isnan(action)) != 0): raise ValueError('Find nan value in action!') if (not self.action_space.contains(int(action))): raise ValueError('action value: {} is not in action_space'.format(action)) self._act(action) time.sleep(0.01) self.observation = self._self_observe() (self.reward, done) = self._reward_function(self.observation) info = [] return (self.observation, self.reward, done, info)
Take one step of interaction. Parameters ---------- action: int Returns ------- observation: int obervation of environment after taking an action reward: float reward after taking an action done: bool whether simulation is done or not. info: some information for debugging
Environment/FourRoomGridWorld.py
step
LinghengMeng/4_Room_World_Environment
6
python
def step(self, action): '\n Take one step of interaction.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n observation: int\n obervation of environment after taking an action\n reward: float\n reward after taking an action\n done: bool\n whether simulation is done or not.\n info:\n some information for debugging\n ' if (np.sum(np.isnan(action)) != 0): raise ValueError('Find nan value in action!') if (not self.action_space.contains(int(action))): raise ValueError('action value: {} is not in action_space'.format(action)) self._act(action) time.sleep(0.01) self.observation = self._self_observe() (self.reward, done) = self._reward_function(self.observation) info = [] return (self.observation, self.reward, done, info)
def step(self, action): '\n Take one step of interaction.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n observation: int\n obervation of environment after taking an action\n reward: float\n reward after taking an action\n done: bool\n whether simulation is done or not.\n info:\n some information for debugging\n ' if (np.sum(np.isnan(action)) != 0): raise ValueError('Find nan value in action!') if (not self.action_space.contains(int(action))): raise ValueError('action value: {} is not in action_space'.format(action)) self._act(action) time.sleep(0.01) self.observation = self._self_observe() (self.reward, done) = self._reward_function(self.observation) info = [] return (self.observation, self.reward, done, info)<|docstring|>Take one step of interaction. Parameters ---------- action: int Returns ------- observation: int obervation of environment after taking an action reward: float reward after taking an action done: bool whether simulation is done or not. info: some information for debugging<|endoftext|>
4d813713f2d2c47957e129c1c8b0e014f14d7f11b49d9110b6d34b52d2bda396
def _self_observe(self): '\n return state of participant by mapping position to index in:\n self.floorTileHandles and self.hallwayHandles\n \n Returns\n -------\n new_state: int\n ' standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) new_state = (- 1) for (index, position) in enumerate(self.observation_positions): if ((standingParticipantPositions[0][0] == position[0]) and (standingParticipantPositions[0][1] == position[1])): new_state = index break if (new_state == (- 1)): raise Exception('Did not find new_state.') return new_state
return state of participant by mapping position to index in: self.floorTileHandles and self.hallwayHandles Returns ------- new_state: int
Environment/FourRoomGridWorld.py
_self_observe
LinghengMeng/4_Room_World_Environment
6
python
def _self_observe(self): '\n return state of participant by mapping position to index in:\n self.floorTileHandles and self.hallwayHandles\n \n Returns\n -------\n new_state: int\n ' standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) new_state = (- 1) for (index, position) in enumerate(self.observation_positions): if ((standingParticipantPositions[0][0] == position[0]) and (standingParticipantPositions[0][1] == position[1])): new_state = index break if (new_state == (- 1)): raise Exception('Did not find new_state.') return new_state
def _self_observe(self): '\n return state of participant by mapping position to index in:\n self.floorTileHandles and self.hallwayHandles\n \n Returns\n -------\n new_state: int\n ' standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) new_state = (- 1) for (index, position) in enumerate(self.observation_positions): if ((standingParticipantPositions[0][0] == position[0]) and (standingParticipantPositions[0][1] == position[1])): new_state = index break if (new_state == (- 1)): raise Exception('Did not find new_state.') return new_state<|docstring|>return state of participant by mapping position to index in: self.floorTileHandles and self.hallwayHandles Returns ------- new_state: int<|endoftext|>
d43ed3500d973d0a69187c6c46d5656aa717bbd32a613301651a96ab08f71f20
def _reward_function(self, observation): '\n If current state is in goal state, reward = 1.0 and done = True. \n Otherwise, reward = 0.0 and done = False.\n \n Parameters\n ----------\n observation: int\n state of participant which is also the index in \n self.observation_handles and self.observation_positions.\n \n Returns\n -------\n reward: float\n \n done: bool\n if current state is in goal state, done = True. Otherwise, done = False\n ' reward = 0.0 done = False for goal_position in self.goalPositions: x = self.observation_positions[observation][0] y = self.observation_positions[observation][1] if ((x == goal_position[0]) and (y == goal_position[1])): reward = 1.0 done = True break return (reward, done)
If current state is in goal state, reward = 1.0 and done = True. Otherwise, reward = 0.0 and done = False. Parameters ---------- observation: int state of participant which is also the index in self.observation_handles and self.observation_positions. Returns ------- reward: float done: bool if current state is in goal state, done = True. Otherwise, done = False
Environment/FourRoomGridWorld.py
_reward_function
LinghengMeng/4_Room_World_Environment
6
python
def _reward_function(self, observation): '\n If current state is in goal state, reward = 1.0 and done = True. \n Otherwise, reward = 0.0 and done = False.\n \n Parameters\n ----------\n observation: int\n state of participant which is also the index in \n self.observation_handles and self.observation_positions.\n \n Returns\n -------\n reward: float\n \n done: bool\n if current state is in goal state, done = True. Otherwise, done = False\n ' reward = 0.0 done = False for goal_position in self.goalPositions: x = self.observation_positions[observation][0] y = self.observation_positions[observation][1] if ((x == goal_position[0]) and (y == goal_position[1])): reward = 1.0 done = True break return (reward, done)
def _reward_function(self, observation): '\n If current state is in goal state, reward = 1.0 and done = True. \n Otherwise, reward = 0.0 and done = False.\n \n Parameters\n ----------\n observation: int\n state of participant which is also the index in \n self.observation_handles and self.observation_positions.\n \n Returns\n -------\n reward: float\n \n done: bool\n if current state is in goal state, done = True. Otherwise, done = False\n ' reward = 0.0 done = False for goal_position in self.goalPositions: x = self.observation_positions[observation][0] y = self.observation_positions[observation][1] if ((x == goal_position[0]) and (y == goal_position[1])): reward = 1.0 done = True break return (reward, done)<|docstring|>If current state is in goal state, reward = 1.0 and done = True. Otherwise, reward = 0.0 and done = False. Parameters ---------- observation: int state of participant which is also the index in self.observation_handles and self.observation_positions. Returns ------- reward: float done: bool if current state is in goal state, done = True. Otherwise, done = False<|endoftext|>
baf4541553ab398d441475131d2493b8f7da0497448fecc8ac37a2b84549666a
def _act(self, action): '\n Take the action in V-REP\n ' (targetPosition, targetOrientation) = self._transition_model(action) vrep.simxSetObjectPosition(self.clientID, self.standingParticipantHandles[0], (- 1), targetPosition, vrep.simx_opmode_blocking) vrep.simxSetObjectOrientation(self.clientID, self.standingParticipantHandles[0], (- 1), targetOrientation, vrep.simx_opmode_blocking)
Take the action in V-REP
Environment/FourRoomGridWorld.py
_act
LinghengMeng/4_Room_World_Environment
6
python
def _act(self, action): '\n \n ' (targetPosition, targetOrientation) = self._transition_model(action) vrep.simxSetObjectPosition(self.clientID, self.standingParticipantHandles[0], (- 1), targetPosition, vrep.simx_opmode_blocking) vrep.simxSetObjectOrientation(self.clientID, self.standingParticipantHandles[0], (- 1), targetOrientation, vrep.simx_opmode_blocking)
def _act(self, action): '\n \n ' (targetPosition, targetOrientation) = self._transition_model(action) vrep.simxSetObjectPosition(self.clientID, self.standingParticipantHandles[0], (- 1), targetPosition, vrep.simx_opmode_blocking) vrep.simxSetObjectOrientation(self.clientID, self.standingParticipantHandles[0], (- 1), targetOrientation, vrep.simx_opmode_blocking)<|docstring|>Take the action in V-REP<|endoftext|>
1ac494f4291b0cb4fd616de4ca4299747302e7b88974ae7614cb4a4e9cd796cc
def _stochastic_primitive_action(self, action): '\n Get the stochatic action given action chosen by agent.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n stochastic_action: int\n ' random.seed(1) rand_number = random.random() if (rand_number < (2 / 3)): stochastic_action = action else: rest_actions = [] for i in range(self.action_space.n): if (i != action): rest_actions.append(i) act_index = random.randint(0, len(rest_actions)) stochastic_action = rest_actions[act_index] return stochastic_action
Get the stochatic action given action chosen by agent. Parameters ---------- action: int Returns ------- stochastic_action: int
Environment/FourRoomGridWorld.py
_stochastic_primitive_action
LinghengMeng/4_Room_World_Environment
6
python
def _stochastic_primitive_action(self, action): '\n Get the stochatic action given action chosen by agent.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n stochastic_action: int\n ' random.seed(1) rand_number = random.random() if (rand_number < (2 / 3)): stochastic_action = action else: rest_actions = [] for i in range(self.action_space.n): if (i != action): rest_actions.append(i) act_index = random.randint(0, len(rest_actions)) stochastic_action = rest_actions[act_index] return stochastic_action
def _stochastic_primitive_action(self, action): '\n Get the stochatic action given action chosen by agent.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n stochastic_action: int\n ' random.seed(1) rand_number = random.random() if (rand_number < (2 / 3)): stochastic_action = action else: rest_actions = [] for i in range(self.action_space.n): if (i != action): rest_actions.append(i) act_index = random.randint(0, len(rest_actions)) stochastic_action = rest_actions[act_index] return stochastic_action<|docstring|>Get the stochatic action given action chosen by agent. Parameters ---------- action: int Returns ------- stochastic_action: int<|endoftext|>
ca8b22b27226b3900c857d709f0ff15eb7f1e2f321c63894155cab48643abec6
def _transition_model(self, action): '\n Return the target position of participant after taking action.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n targetPosition: [x, y, z]\n targetOrientation: \n 0. up: [0,0,90]\n 1. down: [0,0,-90]\n 2. left: [0,0,180]\n 3. right: [0,0,0]\n ' standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) x = standingParticipantPositions[0][0] y = standingParticipantPositions[0][1] z = standingParticipantPositions[0][2] stochastic_action = self._stochastic_primitive_action(action) if (stochastic_action == 0): targetPosition = [x, (y + 1), z] targetOrientation = [0, 0, (math.pi / 2)] elif (stochastic_action == 1): targetPosition = [x, (y - 1), z] targetOrientation = [0, 0, ((- math.pi) / 2)] elif (stochastic_action == 2): targetPosition = [(x - 1), y, z] targetOrientation = [0, 0, math.pi] elif (stochastic_action == 3): targetPosition = [(x + 1), y, z] targetOrientation = [0, 0, 0] else: raise ValueError('Wrong stochastic_action value.') for wall_brick in self.wallBrickPositions: if ((targetPosition[0] == wall_brick[0]) and (targetPosition[1] == wall_brick[1])): targetPosition = standingParticipantPositions[0] break return (targetPosition, targetOrientation)
Return the target position of participant after taking action. Parameters ---------- action: int Returns ------- targetPosition: [x, y, z] targetOrientation: 0. up: [0,0,90] 1. down: [0,0,-90] 2. left: [0,0,180] 3. right: [0,0,0]
Environment/FourRoomGridWorld.py
_transition_model
LinghengMeng/4_Room_World_Environment
6
python
def _transition_model(self, action): '\n Return the target position of participant after taking action.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n targetPosition: [x, y, z]\n targetOrientation: \n 0. up: [0,0,90]\n 1. down: [0,0,-90]\n 2. left: [0,0,180]\n 3. right: [0,0,0]\n ' standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) x = standingParticipantPositions[0][0] y = standingParticipantPositions[0][1] z = standingParticipantPositions[0][2] stochastic_action = self._stochastic_primitive_action(action) if (stochastic_action == 0): targetPosition = [x, (y + 1), z] targetOrientation = [0, 0, (math.pi / 2)] elif (stochastic_action == 1): targetPosition = [x, (y - 1), z] targetOrientation = [0, 0, ((- math.pi) / 2)] elif (stochastic_action == 2): targetPosition = [(x - 1), y, z] targetOrientation = [0, 0, math.pi] elif (stochastic_action == 3): targetPosition = [(x + 1), y, z] targetOrientation = [0, 0, 0] else: raise ValueError('Wrong stochastic_action value.') for wall_brick in self.wallBrickPositions: if ((targetPosition[0] == wall_brick[0]) and (targetPosition[1] == wall_brick[1])): targetPosition = standingParticipantPositions[0] break return (targetPosition, targetOrientation)
def _transition_model(self, action): '\n Return the target position of participant after taking action.\n \n Parameters\n ----------\n action: int\n \n Returns\n -------\n targetPosition: [x, y, z]\n targetOrientation: \n 0. up: [0,0,90]\n 1. down: [0,0,-90]\n 2. left: [0,0,180]\n 3. right: [0,0,0]\n ' standingParticipantPositions = get_object_position(self.standingParticipantHandles, self.clientID, self._def_op_mode, vrep) x = standingParticipantPositions[0][0] y = standingParticipantPositions[0][1] z = standingParticipantPositions[0][2] stochastic_action = self._stochastic_primitive_action(action) if (stochastic_action == 0): targetPosition = [x, (y + 1), z] targetOrientation = [0, 0, (math.pi / 2)] elif (stochastic_action == 1): targetPosition = [x, (y - 1), z] targetOrientation = [0, 0, ((- math.pi) / 2)] elif (stochastic_action == 2): targetPosition = [(x - 1), y, z] targetOrientation = [0, 0, math.pi] elif (stochastic_action == 3): targetPosition = [(x + 1), y, z] targetOrientation = [0, 0, 0] else: raise ValueError('Wrong stochastic_action value.') for wall_brick in self.wallBrickPositions: if ((targetPosition[0] == wall_brick[0]) and (targetPosition[1] == wall_brick[1])): targetPosition = standingParticipantPositions[0] break return (targetPosition, targetOrientation)<|docstring|>Return the target position of participant after taking action. Parameters ---------- action: int Returns ------- targetPosition: [x, y, z] targetOrientation: 0. up: [0,0,90] 1. down: [0,0,-90] 2. left: [0,0,180] 3. right: [0,0,0]<|endoftext|>
9f5f02302d12725b5347f4ded145424619a9cf9d77332dba511342d81db84ade
def reset(self): '\n Returns\n -------\n obseravtion: int\n one of 104 valid states\n ' vrep.simxSetObjectPosition(self.clientID, self.standingParticipantHandles[0], (- 1), self.initial_standingParticipantPositions[0], vrep.simx_opmode_blocking) self.observation = self._self_observe() return self.observation
Returns ------- obseravtion: int one of 104 valid states
Environment/FourRoomGridWorld.py
reset
LinghengMeng/4_Room_World_Environment
6
python
def reset(self): '\n Returns\n -------\n obseravtion: int\n one of 104 valid states\n ' vrep.simxSetObjectPosition(self.clientID, self.standingParticipantHandles[0], (- 1), self.initial_standingParticipantPositions[0], vrep.simx_opmode_blocking) self.observation = self._self_observe() return self.observation
def reset(self): '\n Returns\n -------\n obseravtion: int\n one of 104 valid states\n ' vrep.simxSetObjectPosition(self.clientID, self.standingParticipantHandles[0], (- 1), self.initial_standingParticipantPositions[0], vrep.simx_opmode_blocking) self.observation = self._self_observe() return self.observation<|docstring|>Returns ------- obseravtion: int one of 104 valid states<|endoftext|>
c091f458ba5c5acd433f0010ee16bff97d9dc97199c1ee139f9878c397437006
def hw4_1(): '\n Use\n 1. steepest descent\n 2. Newton algorithm\n 3. Cauchy point\n 4. Conjugate Gradient\n 5. Quasi-newton method\n\n to minimize the Rosenbrock function (2.22). First try the initial point\n x0 = (1.2, 1.2)T and then the more difficult starting point x0 = (1.2, 1)T\n ' (alpha_max, rho, c) = (1, 0.5, 0.5) (delta_max, eta) = (2, (1.0 / 8)) (x_p, f_obj, alpha, iter) = (([[]] * 5), ([[]] * 5), ([[]] * 5), ([[]] * 5)) leg = ['Trust Region (Cauchy Point)', 'Steepest Descent', 'Newton', 'Conjugate Gradient', 'Quasi-Newton'] f = (rb_function, rb_gradient, rb_hessian) (x_0, i) = (array([[1.2], [1.2]]), 0) (x_p[i], f_obj[i], alpha[i], iter[i]) = trust_region_min(f, x_0, delta_max, eta) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'steepest descent', alpha_max, rho, c) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'newton', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [[], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'cg-fr', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [rb_hessian(x_0), x_0.copy(), [], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'quasi-newton', alpha_max, rho, c) plot_results('out1.pdf', x_p, f_obj, alpha, iter, '[1.2, 1.2].T', leg) (x_0, i) = (array([[1.2], [1.0]]), 0) f = (rb_function, rb_gradient, rb_hessian) (x_p[i], f_obj[i], alpha[i], iter[i]) = trust_region_min(f, x_0, delta_max, eta) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'steepest descent', alpha_max, rho, c) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'newton', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [[], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'cg-fr', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [rb_hessian(x_0), x_0.copy(), [], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'quasi-newton', alpha_max, rho, c) plot_results('out2.pdf', x_p, f_obj, alpha, iter, '[1.2, 1.0].T', leg) return None
Use 1. steepest descent 2. Newton algorithm 3. Cauchy point 4. Conjugate Gradient 5. Quasi-newton method to minimize the Rosenbrock function (2.22). First try the initial point x0 = (1.2, 1.2)T and then the more difficult starting point x0 = (1.2, 1)T
hw4/hw4_1.py
hw4_1
escorciav/amcs211
2
python
def hw4_1(): '\n Use\n 1. steepest descent\n 2. Newton algorithm\n 3. Cauchy point\n 4. Conjugate Gradient\n 5. Quasi-newton method\n\n to minimize the Rosenbrock function (2.22). First try the initial point\n x0 = (1.2, 1.2)T and then the more difficult starting point x0 = (1.2, 1)T\n ' (alpha_max, rho, c) = (1, 0.5, 0.5) (delta_max, eta) = (2, (1.0 / 8)) (x_p, f_obj, alpha, iter) = (([[]] * 5), ([[]] * 5), ([[]] * 5), ([[]] * 5)) leg = ['Trust Region (Cauchy Point)', 'Steepest Descent', 'Newton', 'Conjugate Gradient', 'Quasi-Newton'] f = (rb_function, rb_gradient, rb_hessian) (x_0, i) = (array([[1.2], [1.2]]), 0) (x_p[i], f_obj[i], alpha[i], iter[i]) = trust_region_min(f, x_0, delta_max, eta) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'steepest descent', alpha_max, rho, c) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'newton', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [[], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'cg-fr', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [rb_hessian(x_0), x_0.copy(), [], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'quasi-newton', alpha_max, rho, c) plot_results('out1.pdf', x_p, f_obj, alpha, iter, '[1.2, 1.2].T', leg) (x_0, i) = (array([[1.2], [1.0]]), 0) f = (rb_function, rb_gradient, rb_hessian) (x_p[i], f_obj[i], alpha[i], iter[i]) = trust_region_min(f, x_0, delta_max, eta) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'steepest descent', alpha_max, rho, c) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'newton', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [[], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'cg-fr', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [rb_hessian(x_0), x_0.copy(), [], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'quasi-newton', alpha_max, rho, c) plot_results('out2.pdf', x_p, f_obj, alpha, iter, '[1.2, 1.0].T', leg) return None
def hw4_1(): '\n Use\n 1. steepest descent\n 2. Newton algorithm\n 3. Cauchy point\n 4. Conjugate Gradient\n 5. Quasi-newton method\n\n to minimize the Rosenbrock function (2.22). First try the initial point\n x0 = (1.2, 1.2)T and then the more difficult starting point x0 = (1.2, 1)T\n ' (alpha_max, rho, c) = (1, 0.5, 0.5) (delta_max, eta) = (2, (1.0 / 8)) (x_p, f_obj, alpha, iter) = (([[]] * 5), ([[]] * 5), ([[]] * 5), ([[]] * 5)) leg = ['Trust Region (Cauchy Point)', 'Steepest Descent', 'Newton', 'Conjugate Gradient', 'Quasi-Newton'] f = (rb_function, rb_gradient, rb_hessian) (x_0, i) = (array([[1.2], [1.2]]), 0) (x_p[i], f_obj[i], alpha[i], iter[i]) = trust_region_min(f, x_0, delta_max, eta) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'steepest descent', alpha_max, rho, c) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'newton', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [[], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'cg-fr', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [rb_hessian(x_0), x_0.copy(), [], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'quasi-newton', alpha_max, rho, c) plot_results('out1.pdf', x_p, f_obj, alpha, iter, '[1.2, 1.2].T', leg) (x_0, i) = (array([[1.2], [1.0]]), 0) f = (rb_function, rb_gradient, rb_hessian) (x_p[i], f_obj[i], alpha[i], iter[i]) = trust_region_min(f, x_0, delta_max, eta) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'steepest descent', alpha_max, rho, c) i += 1 (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'newton', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [[], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'cg-fr', alpha_max, rho, c) i += 1 f = (rb_function, rb_gradient, [rb_hessian(x_0), x_0.copy(), [], False]) (x_p[i], f_obj[i], alpha[i], iter[i]) = backtracking_min(f, x_0, 'quasi-newton', alpha_max, rho, c) plot_results('out2.pdf', x_p, f_obj, alpha, iter, '[1.2, 1.0].T', leg) return None<|docstring|>Use 1. steepest descent 2. Newton algorithm 3. Cauchy point 4. Conjugate Gradient 5. Quasi-newton method to minimize the Rosenbrock function (2.22). First try the initial point x0 = (1.2, 1.2)T and then the more difficult starting point x0 = (1.2, 1)T<|endoftext|>
cdd76ce0e79c8878829be9f14ade932dfaeb410374da4745bd99e41feddf1afc
def __init__(self, env: LlvmEnv, db_path: Path, commit_frequency_in_seconds: int=300, max_step_buffer_length: int=5000): 'Constructor.\n\n :param env: The environment to wrap.\n\n :param db_path: The path of the database to log to. This file may\n already exist. If it does, new entries are appended. If the files\n does not exist, it is created.\n\n :param commit_frequency_in_seconds: The maximum amount of time to elapse\n before writing pending logs to the database.\n\n :param max_step_buffer_length: The maximum number of calls to\n :code:`step()` before writing pending logs to the database.\n ' super().__init__(env) if (not hasattr(env, 'unwrapped')): raise TypeError('Requires LlvmEnv base environment') if (not isinstance(self.unwrapped, LlvmEnv)): raise TypeError('Requires LlvmEnv base environment') db_path.parent.mkdir(exist_ok=True, parents=True) self.connection = sqlite3.connect(str(db_path)) self.cursor = self.connection.cursor() self.commit_frequency = commit_frequency_in_seconds self.max_step_buffer_length = max_step_buffer_length self.cursor.executescript(DB_CREATION_SCRIPT) self.connection.commit() self.last_commit = time() self.observations_buffer = {} self.step_buffer = [] self._observations = [self.env.observation.spaces['IrSha1'], self.env.observation.spaces['Ir'], self.env.observation.spaces['Programl'], self.env.observation.spaces['Autophase'], self.env.observation.spaces['InstCount'], self.env.observation.spaces['IrInstructionCount']] self._rewards = [self.env.reward.spaces['IrInstructionCountOz'], self.env.reward.spaces['IrInstructionCount']] self._reward_totals = np.zeros(len(self._rewards))
Constructor. :param env: The environment to wrap. :param db_path: The path of the database to log to. This file may already exist. If it does, new entries are appended. If the files does not exist, it is created. :param commit_frequency_in_seconds: The maximum amount of time to elapse before writing pending logs to the database. :param max_step_buffer_length: The maximum number of calls to :code:`step()` before writing pending logs to the database.
compiler_gym/wrappers/sqlite_logger.py
__init__
thecoblack/CompilerGym
0
python
def __init__(self, env: LlvmEnv, db_path: Path, commit_frequency_in_seconds: int=300, max_step_buffer_length: int=5000): 'Constructor.\n\n :param env: The environment to wrap.\n\n :param db_path: The path of the database to log to. This file may\n already exist. If it does, new entries are appended. If the files\n does not exist, it is created.\n\n :param commit_frequency_in_seconds: The maximum amount of time to elapse\n before writing pending logs to the database.\n\n :param max_step_buffer_length: The maximum number of calls to\n :code:`step()` before writing pending logs to the database.\n ' super().__init__(env) if (not hasattr(env, 'unwrapped')): raise TypeError('Requires LlvmEnv base environment') if (not isinstance(self.unwrapped, LlvmEnv)): raise TypeError('Requires LlvmEnv base environment') db_path.parent.mkdir(exist_ok=True, parents=True) self.connection = sqlite3.connect(str(db_path)) self.cursor = self.connection.cursor() self.commit_frequency = commit_frequency_in_seconds self.max_step_buffer_length = max_step_buffer_length self.cursor.executescript(DB_CREATION_SCRIPT) self.connection.commit() self.last_commit = time() self.observations_buffer = {} self.step_buffer = [] self._observations = [self.env.observation.spaces['IrSha1'], self.env.observation.spaces['Ir'], self.env.observation.spaces['Programl'], self.env.observation.spaces['Autophase'], self.env.observation.spaces['InstCount'], self.env.observation.spaces['IrInstructionCount']] self._rewards = [self.env.reward.spaces['IrInstructionCountOz'], self.env.reward.spaces['IrInstructionCount']] self._reward_totals = np.zeros(len(self._rewards))
def __init__(self, env: LlvmEnv, db_path: Path, commit_frequency_in_seconds: int=300, max_step_buffer_length: int=5000): 'Constructor.\n\n :param env: The environment to wrap.\n\n :param db_path: The path of the database to log to. This file may\n already exist. If it does, new entries are appended. If the files\n does not exist, it is created.\n\n :param commit_frequency_in_seconds: The maximum amount of time to elapse\n before writing pending logs to the database.\n\n :param max_step_buffer_length: The maximum number of calls to\n :code:`step()` before writing pending logs to the database.\n ' super().__init__(env) if (not hasattr(env, 'unwrapped')): raise TypeError('Requires LlvmEnv base environment') if (not isinstance(self.unwrapped, LlvmEnv)): raise TypeError('Requires LlvmEnv base environment') db_path.parent.mkdir(exist_ok=True, parents=True) self.connection = sqlite3.connect(str(db_path)) self.cursor = self.connection.cursor() self.commit_frequency = commit_frequency_in_seconds self.max_step_buffer_length = max_step_buffer_length self.cursor.executescript(DB_CREATION_SCRIPT) self.connection.commit() self.last_commit = time() self.observations_buffer = {} self.step_buffer = [] self._observations = [self.env.observation.spaces['IrSha1'], self.env.observation.spaces['Ir'], self.env.observation.spaces['Programl'], self.env.observation.spaces['Autophase'], self.env.observation.spaces['InstCount'], self.env.observation.spaces['IrInstructionCount']] self._rewards = [self.env.reward.spaces['IrInstructionCountOz'], self.env.reward.spaces['IrInstructionCount']] self._reward_totals = np.zeros(len(self._rewards))<|docstring|>Constructor. :param env: The environment to wrap. :param db_path: The path of the database to log to. This file may already exist. If it does, new entries are appended. If the files does not exist, it is created. :param commit_frequency_in_seconds: The maximum amount of time to elapse before writing pending logs to the database. :param max_step_buffer_length: The maximum number of calls to :code:`step()` before writing pending logs to the database.<|endoftext|>
2851a8d719d84bde65bf94025865ab4b2f3e678b47e87587eac9089f3bcad1fc
def flush(self) -> None: 'Flush the buffered steps and observations to database.' (n_steps, n_observations) = (len(self.step_buffer), len(self.observations_buffer)) if (not n_steps): return with Timer() as flush_time: self.cursor.executemany('INSERT OR IGNORE INTO States VALUES (?, ?, ?, ?, ?)', self.step_buffer) self.cursor.executemany('INSERT OR IGNORE INTO Observations VALUES (?, ?, ?, ?, ?, ?)', ((k, *v) for (k, v) in self.observations_buffer.items())) self.step_buffer = [] self.observations_buffer = {} self.connection.commit() logging.info('Wrote %d state records and %d observations in %s. Last flush %s ago', n_steps, n_observations, flush_time, humanize_duration((time() - self.last_commit))) self.last_commit = time()
Flush the buffered steps and observations to database.
compiler_gym/wrappers/sqlite_logger.py
flush
thecoblack/CompilerGym
0
python
def flush(self) -> None: (n_steps, n_observations) = (len(self.step_buffer), len(self.observations_buffer)) if (not n_steps): return with Timer() as flush_time: self.cursor.executemany('INSERT OR IGNORE INTO States VALUES (?, ?, ?, ?, ?)', self.step_buffer) self.cursor.executemany('INSERT OR IGNORE INTO Observations VALUES (?, ?, ?, ?, ?, ?)', ((k, *v) for (k, v) in self.observations_buffer.items())) self.step_buffer = [] self.observations_buffer = {} self.connection.commit() logging.info('Wrote %d state records and %d observations in %s. Last flush %s ago', n_steps, n_observations, flush_time, humanize_duration((time() - self.last_commit))) self.last_commit = time()
def flush(self) -> None: (n_steps, n_observations) = (len(self.step_buffer), len(self.observations_buffer)) if (not n_steps): return with Timer() as flush_time: self.cursor.executemany('INSERT OR IGNORE INTO States VALUES (?, ?, ?, ?, ?)', self.step_buffer) self.cursor.executemany('INSERT OR IGNORE INTO Observations VALUES (?, ?, ?, ?, ?, ?)', ((k, *v) for (k, v) in self.observations_buffer.items())) self.step_buffer = [] self.observations_buffer = {} self.connection.commit() logging.info('Wrote %d state records and %d observations in %s. Last flush %s ago', n_steps, n_observations, flush_time, humanize_duration((time() - self.last_commit))) self.last_commit = time()<|docstring|>Flush the buffered steps and observations to database.<|endoftext|>
cd4e7928a4e7f33b3001d3d21b1d064e9359b82fb2db8fc3e3c35c17d3f0f65f
def collect_stdlibs(args: argparse.Namespace): '\n Collect the lists of standard library_names for each language Babelfish can parse.\n ' log = logging.getLogger('collect_stdlibs') output_path = path_with_suffix(args.output_path, '.asdf') check_remove_filepath(output_path, log, args.force) process = CrawlerProcess(settings={'BOT_NAME': 'vegeta', 'ITEM_PIPELINES': {'sourced.ml.mining.cmd.collect_stdlibs.StdlibPipeline': 100}, 'LOG_ENABLED': 0, 'OUTPUT_PATH': output_path}) logging.getLogger('scrapy').setLevel(logging.WARNING) for spider in [CppStdlibSpider, CSharpStdlibSpider, GoStdlibSpider, JavaStdlibSpider, PythonStdlibSpider, RubyStdlibSpider]: process.crawl(spider) process.start() process.stop()
Collect the lists of standard library_names for each language Babelfish can parse.
sourced/ml/mining/cmd/collect_stdlibs.py
collect_stdlibs
src-d/ml-mining
9
python
def collect_stdlibs(args: argparse.Namespace): '\n \n ' log = logging.getLogger('collect_stdlibs') output_path = path_with_suffix(args.output_path, '.asdf') check_remove_filepath(output_path, log, args.force) process = CrawlerProcess(settings={'BOT_NAME': 'vegeta', 'ITEM_PIPELINES': {'sourced.ml.mining.cmd.collect_stdlibs.StdlibPipeline': 100}, 'LOG_ENABLED': 0, 'OUTPUT_PATH': output_path}) logging.getLogger('scrapy').setLevel(logging.WARNING) for spider in [CppStdlibSpider, CSharpStdlibSpider, GoStdlibSpider, JavaStdlibSpider, PythonStdlibSpider, RubyStdlibSpider]: process.crawl(spider) process.start() process.stop()
def collect_stdlibs(args: argparse.Namespace): '\n \n ' log = logging.getLogger('collect_stdlibs') output_path = path_with_suffix(args.output_path, '.asdf') check_remove_filepath(output_path, log, args.force) process = CrawlerProcess(settings={'BOT_NAME': 'vegeta', 'ITEM_PIPELINES': {'sourced.ml.mining.cmd.collect_stdlibs.StdlibPipeline': 100}, 'LOG_ENABLED': 0, 'OUTPUT_PATH': output_path}) logging.getLogger('scrapy').setLevel(logging.WARNING) for spider in [CppStdlibSpider, CSharpStdlibSpider, GoStdlibSpider, JavaStdlibSpider, PythonStdlibSpider, RubyStdlibSpider]: process.crawl(spider) process.start() process.stop()<|docstring|>Collect the lists of standard library_names for each language Babelfish can parse.<|endoftext|>
33c989e95d1d77a48c8a792d55859e2585bf8109fb736dc13bdae05bd2127ea8
def __init__(self, output_path): 'Create the item pipeline.' self.output_path = output_path
Create the item pipeline.
sourced/ml/mining/cmd/collect_stdlibs.py
__init__
src-d/ml-mining
9
python
def __init__(self, output_path): self.output_path = output_path
def __init__(self, output_path): self.output_path = output_path<|docstring|>Create the item pipeline.<|endoftext|>
5ee1479b22c588cbdc8978f4bba8d252c1da0ba1fc8af42236d8381067cee1da
@classmethod def from_crawler(cls, crawler): 'Pass the output path to the constructor when instantiating from the crawler.' return cls(output_path=crawler.settings['OUTPUT_PATH'])
Pass the output path to the constructor when instantiating from the crawler.
sourced/ml/mining/cmd/collect_stdlibs.py
from_crawler
src-d/ml-mining
9
python
@classmethod def from_crawler(cls, crawler): return cls(output_path=crawler.settings['OUTPUT_PATH'])
@classmethod def from_crawler(cls, crawler): return cls(output_path=crawler.settings['OUTPUT_PATH'])<|docstring|>Pass the output path to the constructor when instantiating from the crawler.<|endoftext|>
694c77a587a76c21f0f5a5bd1f6e0825d3fbaa1e9ae1209ef57806e95071b833
def open_spider(self, spider): 'Open a spider.' type(self).pending_spiders += 1 log = logging.getLogger(spider.name) log.info('Opened spider, pending: %d', self.pending_spiders)
Open a spider.
sourced/ml/mining/cmd/collect_stdlibs.py
open_spider
src-d/ml-mining
9
python
def open_spider(self, spider): type(self).pending_spiders += 1 log = logging.getLogger(spider.name) log.info('Opened spider, pending: %d', self.pending_spiders)
def open_spider(self, spider): type(self).pending_spiders += 1 log = logging.getLogger(spider.name) log.info('Opened spider, pending: %d', self.pending_spiders)<|docstring|>Open a spider.<|endoftext|>
8357a4b59a6b5a1a4ff585666ba0c83e02f0106f14519eadae51d5167ea8de96
def close_spider(self, spider): 'Close a spider, and save the model if it is the last to terminate.' log = logging.getLogger(spider.name) type(self).pending_spiders -= 1 log.info('Closed spider, pending: %d', self.pending_spiders) if (self.pending_spiders == 0): log.info('No more spiders are running, creating the model ...') StandardLibraries(log_level=logging.INFO).construct(self.library_names, self.library_metadata).save(self.output_path, series='stdlib') log.info('Saved model to %s', self.output_path)
Close a spider, and save the model if it is the last to terminate.
sourced/ml/mining/cmd/collect_stdlibs.py
close_spider
src-d/ml-mining
9
python
def close_spider(self, spider): log = logging.getLogger(spider.name) type(self).pending_spiders -= 1 log.info('Closed spider, pending: %d', self.pending_spiders) if (self.pending_spiders == 0): log.info('No more spiders are running, creating the model ...') StandardLibraries(log_level=logging.INFO).construct(self.library_names, self.library_metadata).save(self.output_path, series='stdlib') log.info('Saved model to %s', self.output_path)
def close_spider(self, spider): log = logging.getLogger(spider.name) type(self).pending_spiders -= 1 log.info('Closed spider, pending: %d', self.pending_spiders) if (self.pending_spiders == 0): log.info('No more spiders are running, creating the model ...') StandardLibraries(log_level=logging.INFO).construct(self.library_names, self.library_metadata).save(self.output_path, series='stdlib') log.info('Saved model to %s', self.output_path)<|docstring|>Close a spider, and save the model if it is the last to terminate.<|endoftext|>
59750438c464fa068efc3d6ecf632250c905489414bfe1b25ff603c93e4c9fae
def process_item(self, item, spider): 'Process an item returned by one of the spiders.' (lang, library_name) = (item['lang'], item['library_name']) if (lang not in self.library_names): self.library_names[lang] = set() self.library_metadata[lang] = {} self.library_names[lang].add(library_name) for meta in item['library_metadata']: if (meta not in self.library_metadata[lang]): self.library_metadata[lang][meta] = set() self.library_metadata[lang][meta].add(library_name)
Process an item returned by one of the spiders.
sourced/ml/mining/cmd/collect_stdlibs.py
process_item
src-d/ml-mining
9
python
def process_item(self, item, spider): (lang, library_name) = (item['lang'], item['library_name']) if (lang not in self.library_names): self.library_names[lang] = set() self.library_metadata[lang] = {} self.library_names[lang].add(library_name) for meta in item['library_metadata']: if (meta not in self.library_metadata[lang]): self.library_metadata[lang][meta] = set() self.library_metadata[lang][meta].add(library_name)
def process_item(self, item, spider): (lang, library_name) = (item['lang'], item['library_name']) if (lang not in self.library_names): self.library_names[lang] = set() self.library_metadata[lang] = {} self.library_names[lang].add(library_name) for meta in item['library_metadata']: if (meta not in self.library_metadata[lang]): self.library_metadata[lang][meta] = set() self.library_metadata[lang][meta].add(library_name)<|docstring|>Process an item returned by one of the spiders.<|endoftext|>
af77f3acfac883d48189009e9d0d913582d4014c8d385a7100a0ef3610df46c2
def _numericalize_segments(lines): 'From lines w/ separator, returns\n [(line, seg_id),]\n ' numericalized = [] borders = [] count = 0 for line in lines[1:]: if is_separator(line): count += 1 borders += [1] else: n_line = (line, count) numericalized.append(n_line) borders += [0] borders = borders[:(- 1)] return (numericalized, borders)
From lines w/ separator, returns [(line, seg_id),]
autoalign/external/run_c99.py
_numericalize_segments
pltrdy/autoalign
6
python
def _numericalize_segments(lines): 'From lines w/ separator, returns\n [(line, seg_id),]\n ' numericalized = [] borders = [] count = 0 for line in lines[1:]: if is_separator(line): count += 1 borders += [1] else: n_line = (line, count) numericalized.append(n_line) borders += [0] borders = borders[:(- 1)] return (numericalized, borders)
def _numericalize_segments(lines): 'From lines w/ separator, returns\n [(line, seg_id),]\n ' numericalized = [] borders = [] count = 0 for line in lines[1:]: if is_separator(line): count += 1 borders += [1] else: n_line = (line, count) numericalized.append(n_line) borders += [0] borders = borders[:(- 1)] return (numericalized, borders)<|docstring|>From lines w/ separator, returns [(line, seg_id),]<|endoftext|>
ca44ce7600212ef7b97f211c901e210f8216d27f5f5e1c4609033b3100c32e59
@skipUnless((db.backend_name == 'firebird'), 'Firebird-only test') def test_firebird_double_index_creation_1317(self): '\n Tests foreign key creation, especially uppercase (see #61)\n ' Test = db.mock_model(model_name='Test', db_table='test5a', db_tablespace='', pk_field_name='ID', pk_field_type=models.AutoField, pk_field_args=[]) db.create_table('test5a', [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))]) db.create_table('test5b', [('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('UNIQUE', models.ForeignKey(Test))]) db.execute_deferred_sql()
Tests foreign key creation, especially uppercase (see #61)
blogproject/thirdparty/south/tests/db_firebird.py
test_firebird_double_index_creation_1317
recall704/django-r3call-blog
11
python
@skipUnless((db.backend_name == 'firebird'), 'Firebird-only test') def test_firebird_double_index_creation_1317(self): '\n \n ' Test = db.mock_model(model_name='Test', db_table='test5a', db_tablespace=, pk_field_name='ID', pk_field_type=models.AutoField, pk_field_args=[]) db.create_table('test5a', [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))]) db.create_table('test5b', [('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('UNIQUE', models.ForeignKey(Test))]) db.execute_deferred_sql()
@skipUnless((db.backend_name == 'firebird'), 'Firebird-only test') def test_firebird_double_index_creation_1317(self): '\n \n ' Test = db.mock_model(model_name='Test', db_table='test5a', db_tablespace=, pk_field_name='ID', pk_field_type=models.AutoField, pk_field_args=[]) db.create_table('test5a', [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))]) db.create_table('test5b', [('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('UNIQUE', models.ForeignKey(Test))]) db.execute_deferred_sql()<|docstring|>Tests foreign key creation, especially uppercase (see #61)<|endoftext|>
9fa6aeb76239e8d2ed0a48e7fe67c0cb8eb8ddb594c6d07bc823dd2fcb571300
def bpr_loss(self, users, pos, neg): '\n Parameters:\n users: users list \n pos: positive items for corresponding users\n neg: negative items for corresponding users\n Return:\n (log-loss, l2-loss)\n ' raise NotImplementedError
Parameters: users: users list pos: positive items for corresponding users neg: negative items for corresponding users Return: (log-loss, l2-loss)
code/model.py
bpr_loss
jeongwhanchoi/LT-OCF
10
python
def bpr_loss(self, users, pos, neg): '\n Parameters:\n users: users list \n pos: positive items for corresponding users\n neg: negative items for corresponding users\n Return:\n (log-loss, l2-loss)\n ' raise NotImplementedError
def bpr_loss(self, users, pos, neg): '\n Parameters:\n users: users list \n pos: positive items for corresponding users\n neg: negative items for corresponding users\n Return:\n (log-loss, l2-loss)\n ' raise NotImplementedError<|docstring|>Parameters: users: users list pos: positive items for corresponding users neg: negative items for corresponding users Return: (log-loss, l2-loss)<|endoftext|>
90989ad2094bf813a15564c0570e6b80811fe6b9bc0bc087c1786093a7b9c733
def computer(self): '\n propagate methods for lightGCN\n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph for layer in range(self.n_layers): if self.A_split: temp_emb = [] for f in range(len(g_droped)): temp_emb.append(torch.sparse.mm(g_droped[f], all_emb)) side_emb = torch.cat(temp_emb, dim=0) all_emb = side_emb else: all_emb = torch.sparse.mm(g_droped, all_emb) embs.append(all_emb) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
propagate methods for lightGCN
code/model.py
computer
jeongwhanchoi/LT-OCF
10
python
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph for layer in range(self.n_layers): if self.A_split: temp_emb = [] for f in range(len(g_droped)): temp_emb.append(torch.sparse.mm(g_droped[f], all_emb)) side_emb = torch.cat(temp_emb, dim=0) all_emb = side_emb else: all_emb = torch.sparse.mm(g_droped, all_emb) embs.append(all_emb) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph for layer in range(self.n_layers): if self.A_split: temp_emb = [] for f in range(len(g_droped)): temp_emb.append(torch.sparse.mm(g_droped[f], all_emb)) side_emb = torch.cat(temp_emb, dim=0) all_emb = side_emb else: all_emb = torch.sparse.mm(g_droped, all_emb) embs.append(all_emb) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)<|docstring|>propagate methods for lightGCN<|endoftext|>
176f5c9d065f2f33272580d99c731ae74e6fc74c417b7081c0b032311ce78c36
def computer(self): '\n propagate methods for LT-NCF\n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) out_3 = self.ode_block_test_3(out_2, self.odetime_2, self.odetime_3) if (world.config['dual_res'] == False): out_3 = (out_3 - out_2) embs.append(out_3) out_4 = self.ode_block_test_4(out_3, self.odetime_3) if (world.config['dual_res'] == False): out_4 = (out_4 - out_3) embs.append(out_4) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) all_emb_3 = self.ode_block_3(all_emb_2) all_emb_3 = (all_emb_3 - all_emb_2) embs.append(all_emb_3) all_emb_4 = self.ode_block_4(all_emb_3) all_emb_4 = (all_emb_4 - all_emb_3) embs.append(all_emb_4) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
propagate methods for LT-NCF
code/model.py
computer
jeongwhanchoi/LT-OCF
10
python
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) out_3 = self.ode_block_test_3(out_2, self.odetime_2, self.odetime_3) if (world.config['dual_res'] == False): out_3 = (out_3 - out_2) embs.append(out_3) out_4 = self.ode_block_test_4(out_3, self.odetime_3) if (world.config['dual_res'] == False): out_4 = (out_4 - out_3) embs.append(out_4) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) all_emb_3 = self.ode_block_3(all_emb_2) all_emb_3 = (all_emb_3 - all_emb_2) embs.append(all_emb_3) all_emb_4 = self.ode_block_4(all_emb_3) all_emb_4 = (all_emb_4 - all_emb_3) embs.append(all_emb_4) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) out_3 = self.ode_block_test_3(out_2, self.odetime_2, self.odetime_3) if (world.config['dual_res'] == False): out_3 = (out_3 - out_2) embs.append(out_3) out_4 = self.ode_block_test_4(out_3, self.odetime_3) if (world.config['dual_res'] == False): out_4 = (out_4 - out_3) embs.append(out_4) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) all_emb_3 = self.ode_block_3(all_emb_2) all_emb_3 = (all_emb_3 - all_emb_2) embs.append(all_emb_3) all_emb_4 = self.ode_block_4(all_emb_3) all_emb_4 = (all_emb_4 - all_emb_3) embs.append(all_emb_4) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)<|docstring|>propagate methods for LT-NCF<|endoftext|>
ca81d57e4a30000e042a73208fcd9eff431ef654c8b62d4ef9cf73aca3e7a658
def computer(self): '\n propagate methods for Continuous-Time lightGCN\n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) out_3 = self.ode_block_test_3(out_2, self.odetime_2) if (world.config['dual_res'] == False): out_3 = (out_3 - out_2) embs.append(out_3) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) all_emb_3 = self.ode_block_3(all_emb_2) all_emb_3 = (all_emb_3 - all_emb_2) embs.append(all_emb_3) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
propagate methods for Continuous-Time lightGCN
code/model.py
computer
jeongwhanchoi/LT-OCF
10
python
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) out_3 = self.ode_block_test_3(out_2, self.odetime_2) if (world.config['dual_res'] == False): out_3 = (out_3 - out_2) embs.append(out_3) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) all_emb_3 = self.ode_block_3(all_emb_2) all_emb_3 = (all_emb_3 - all_emb_2) embs.append(all_emb_3) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) out_3 = self.ode_block_test_3(out_2, self.odetime_2) if (world.config['dual_res'] == False): out_3 = (out_3 - out_2) embs.append(out_3) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) all_emb_3 = self.ode_block_3(all_emb_2) all_emb_3 = (all_emb_3 - all_emb_2) embs.append(all_emb_3) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)<|docstring|>propagate methods for Continuous-Time lightGCN<|endoftext|>
7f3d349387412f906e44136879b3bbc96393a00b9a5d5a0d797a25c818d773f5
def computer(self): '\n propagate methods for Continuous-Time lightGCN\n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
propagate methods for Continuous-Time lightGCN
code/model.py
computer
jeongwhanchoi/LT-OCF
10
python
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)
def computer(self): '\n \n ' users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight all_emb = torch.cat([users_emb, items_emb]) embs = [all_emb] if self.config['dropout']: if self.training: g_droped = self.__dropout(self.keep_prob) else: g_droped = self.Graph else: g_droped = self.Graph '\n layers\n ' if (world.config['learnable_time'] == True): out_1 = self.ode_block_test_1(all_emb, self.odetime_1) if (world.config['dual_res'] == False): out_1 = (out_1 - all_emb) embs.append(out_1) out_2 = self.ode_block_test_2(out_1, self.odetime_1) if (world.config['dual_res'] == False): out_2 = (out_2 - out_1) embs.append(out_2) elif (world.config['learnable_time'] == False): all_emb_1 = self.ode_block_1(all_emb) all_emb_1 = (all_emb_1 - all_emb) embs.append(all_emb_1) all_emb_2 = self.ode_block_2(all_emb_1) all_emb_2 = (all_emb_2 - all_emb_1) embs.append(all_emb_2) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) (users, items) = torch.split(light_out, [self.num_users, self.num_items]) return (users, items)<|docstring|>propagate methods for Continuous-Time lightGCN<|endoftext|>
5efa94a2f6c9db2f6547f94f9139acb9bc4ed2514e868829e244b3e6531f05b7
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, parent_name: Optional[pulumi.Input[str]]=None, parent_type: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['ConnectionStateArgs']]]=None, provisioning_state: Optional[pulumi.Input[Union[(str, 'ResourceProvisioningState')]]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Latest API Version: 2020-06-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: GroupIds from the private link service resource.\n :param pulumi.Input[str] parent_name: The name of the parent resource (namely, either, the topic name or domain name).\n :param pulumi.Input[str] parent_type: The type of the parent resource. This can be either 'topics' or 'domains'.\n :param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The Private Endpoint resource for this Connection.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection connection.\n :param pulumi.Input[pulumi.InputType['ConnectionStateArgs']] private_link_service_connection_state: Details about the state of the connection.\n :param pulumi.Input[Union[str, 'ResourceProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection.\n :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.\n " pulumi.log.warn("PrivateEndpointConnection is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:eventgrid:PrivateEndpointConnection'.") if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['group_ids'] = group_ids if ((parent_name is None) and (not opts.urn)): raise TypeError("Missing required property 'parent_name'") __props__['parent_name'] = parent_name if ((parent_type is None) and (not opts.urn)): raise TypeError("Missing required property 'parent_type'") __props__['parent_type'] = parent_type __props__['private_endpoint'] = private_endpoint __props__['private_endpoint_connection_name'] = private_endpoint_connection_name __props__['private_link_service_connection_state'] = private_link_service_connection_state __props__['provisioning_state'] = provisioning_state if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:eventgrid:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20200401preview:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20200601:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20201015preview:PrivateEndpointConnection')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(PrivateEndpointConnection, __self__).__init__('azure-nextgen:eventgrid/latest:PrivateEndpointConnection', resource_name, __props__, opts)
Latest API Version: 2020-06-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: GroupIds from the private link service resource. :param pulumi.Input[str] parent_name: The name of the parent resource (namely, either, the topic name or domain name). :param pulumi.Input[str] parent_type: The type of the parent resource. This can be either 'topics' or 'domains'. :param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The Private Endpoint resource for this Connection. :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection connection. :param pulumi.Input[pulumi.InputType['ConnectionStateArgs']] private_link_service_connection_state: Details about the state of the connection. :param pulumi.Input[Union[str, 'ResourceProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
__init__
pulumi/pulumi-azure-nextgen
31
python
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, parent_name: Optional[pulumi.Input[str]]=None, parent_type: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['ConnectionStateArgs']]]=None, provisioning_state: Optional[pulumi.Input[Union[(str, 'ResourceProvisioningState')]]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Latest API Version: 2020-06-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: GroupIds from the private link service resource.\n :param pulumi.Input[str] parent_name: The name of the parent resource (namely, either, the topic name or domain name).\n :param pulumi.Input[str] parent_type: The type of the parent resource. This can be either 'topics' or 'domains'.\n :param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The Private Endpoint resource for this Connection.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection connection.\n :param pulumi.Input[pulumi.InputType['ConnectionStateArgs']] private_link_service_connection_state: Details about the state of the connection.\n :param pulumi.Input[Union[str, 'ResourceProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection.\n :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.\n " pulumi.log.warn("PrivateEndpointConnection is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:eventgrid:PrivateEndpointConnection'.") if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['group_ids'] = group_ids if ((parent_name is None) and (not opts.urn)): raise TypeError("Missing required property 'parent_name'") __props__['parent_name'] = parent_name if ((parent_type is None) and (not opts.urn)): raise TypeError("Missing required property 'parent_type'") __props__['parent_type'] = parent_type __props__['private_endpoint'] = private_endpoint __props__['private_endpoint_connection_name'] = private_endpoint_connection_name __props__['private_link_service_connection_state'] = private_link_service_connection_state __props__['provisioning_state'] = provisioning_state if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:eventgrid:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20200401preview:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20200601:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20201015preview:PrivateEndpointConnection')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(PrivateEndpointConnection, __self__).__init__('azure-nextgen:eventgrid/latest:PrivateEndpointConnection', resource_name, __props__, opts)
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, parent_name: Optional[pulumi.Input[str]]=None, parent_type: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['ConnectionStateArgs']]]=None, provisioning_state: Optional[pulumi.Input[Union[(str, 'ResourceProvisioningState')]]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Latest API Version: 2020-06-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: GroupIds from the private link service resource.\n :param pulumi.Input[str] parent_name: The name of the parent resource (namely, either, the topic name or domain name).\n :param pulumi.Input[str] parent_type: The type of the parent resource. This can be either 'topics' or 'domains'.\n :param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The Private Endpoint resource for this Connection.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection connection.\n :param pulumi.Input[pulumi.InputType['ConnectionStateArgs']] private_link_service_connection_state: Details about the state of the connection.\n :param pulumi.Input[Union[str, 'ResourceProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection.\n :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.\n " pulumi.log.warn("PrivateEndpointConnection is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:eventgrid:PrivateEndpointConnection'.") if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['group_ids'] = group_ids if ((parent_name is None) and (not opts.urn)): raise TypeError("Missing required property 'parent_name'") __props__['parent_name'] = parent_name if ((parent_type is None) and (not opts.urn)): raise TypeError("Missing required property 'parent_type'") __props__['parent_type'] = parent_type __props__['private_endpoint'] = private_endpoint __props__['private_endpoint_connection_name'] = private_endpoint_connection_name __props__['private_link_service_connection_state'] = private_link_service_connection_state __props__['provisioning_state'] = provisioning_state if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:eventgrid:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20200401preview:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20200601:PrivateEndpointConnection'), pulumi.Alias(type_='azure-nextgen:eventgrid/v20201015preview:PrivateEndpointConnection')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(PrivateEndpointConnection, __self__).__init__('azure-nextgen:eventgrid/latest:PrivateEndpointConnection', resource_name, __props__, opts)<|docstring|>Latest API Version: 2020-06-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: GroupIds from the private link service resource. :param pulumi.Input[str] parent_name: The name of the parent resource (namely, either, the topic name or domain name). :param pulumi.Input[str] parent_type: The type of the parent resource. This can be either 'topics' or 'domains'. :param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The Private Endpoint resource for this Connection. :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection connection. :param pulumi.Input[pulumi.InputType['ConnectionStateArgs']] private_link_service_connection_state: Details about the state of the connection. :param pulumi.Input[Union[str, 'ResourceProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.<|endoftext|>
8af4b373da91160a8646ea83299189bb34963d25f3a5125344e202d6790b51a1
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'PrivateEndpointConnection': "\n Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
get
pulumi/pulumi-azure-nextgen
31
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'PrivateEndpointConnection': "\n Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'PrivateEndpointConnection': "\n Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
318a2ddceb17cbd9349392e58aeb7df68b0533c61d0ee9f3b4a69b72d4685df8
@property @pulumi.getter(name='groupIds') def group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]: '\n GroupIds from the private link service resource.\n ' return pulumi.get(self, 'group_ids')
GroupIds from the private link service resource.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
group_ids
pulumi/pulumi-azure-nextgen
31
python
@property @pulumi.getter(name='groupIds') def group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]: '\n \n ' return pulumi.get(self, 'group_ids')
@property @pulumi.getter(name='groupIds') def group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]: '\n \n ' return pulumi.get(self, 'group_ids')<|docstring|>GroupIds from the private link service resource.<|endoftext|>
bcd4e491ec7afad8a93b7cf0506d839503a2771afbda9c7e0d5ae437a7f86ab2
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n Name of the resource.\n ' return pulumi.get(self, 'name')
Name of the resource.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
name
pulumi/pulumi-azure-nextgen
31
python
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'name')<|docstring|>Name of the resource.<|endoftext|>
d60e83cbd9868dd5586c057be65a5ff5462cf86c9ca93a9d63907065d1d52005
@property @pulumi.getter(name='privateEndpoint') def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]: '\n The Private Endpoint resource for this Connection.\n ' return pulumi.get(self, 'private_endpoint')
The Private Endpoint resource for this Connection.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
private_endpoint
pulumi/pulumi-azure-nextgen
31
python
@property @pulumi.getter(name='privateEndpoint') def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]: '\n \n ' return pulumi.get(self, 'private_endpoint')
@property @pulumi.getter(name='privateEndpoint') def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]: '\n \n ' return pulumi.get(self, 'private_endpoint')<|docstring|>The Private Endpoint resource for this Connection.<|endoftext|>
a284ff04eaa15e46518cd2596015029557616536973c90a4bca06a981c2c18e9
@property @pulumi.getter(name='privateLinkServiceConnectionState') def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.ConnectionStateResponse']]: '\n Details about the state of the connection.\n ' return pulumi.get(self, 'private_link_service_connection_state')
Details about the state of the connection.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
private_link_service_connection_state
pulumi/pulumi-azure-nextgen
31
python
@property @pulumi.getter(name='privateLinkServiceConnectionState') def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.ConnectionStateResponse']]: '\n \n ' return pulumi.get(self, 'private_link_service_connection_state')
@property @pulumi.getter(name='privateLinkServiceConnectionState') def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.ConnectionStateResponse']]: '\n \n ' return pulumi.get(self, 'private_link_service_connection_state')<|docstring|>Details about the state of the connection.<|endoftext|>
936e227375c47c766c96603cacb77c9dcd92d86624860d483bcf1bd64da4284c
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[Optional[str]]: '\n Provisioning state of the Private Endpoint Connection.\n ' return pulumi.get(self, 'provisioning_state')
Provisioning state of the Private Endpoint Connection.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
provisioning_state
pulumi/pulumi-azure-nextgen
31
python
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'provisioning_state')
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'provisioning_state')<|docstring|>Provisioning state of the Private Endpoint Connection.<|endoftext|>
457c6f0908ee597dc451e7f877aac89c8dfb4133c38a42a4736de0ed4c34700a
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n Type of the resource.\n ' return pulumi.get(self, 'type')
Type of the resource.
sdk/python/pulumi_azure_nextgen/eventgrid/latest/private_endpoint_connection.py
type
pulumi/pulumi-azure-nextgen
31
python
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')<|docstring|>Type of the resource.<|endoftext|>
da685dbd2e72394aa514e907774775aa2450227e0179372c105888c9b29aa3b4
def __init__(self, n_differences=1, enhance=True, vectorizer=Vectorizer(complexity=3), n_jobs=(- 1), random_state=1): "Generate sequences.\n\n Start from input sequences that are 'better' if enhance is set to True\n ('worse' otherwise) given the set of sequences used in the fit phase.\n\n Parameters\n ----------\n n_differences : int (default 1)\n Number of characters that differ for the generated sequence from\n the original input sequence.\n\n enhance : bool (default True)\n If set to True then the score computed by the estimator will be\n higher for the sequences generated than for the input sequences.\n If False than the score will be lower.\n\n vectorizer : EDeN sequence vectorizer\n The vectorizer to map sequences to sparse vectors.\n\n n_jobs : int (default -1)\n The number of cores to use in parallel. -1 indicates all available.\n\n random_state: int (default 1)\n The random seed.\n " self.random_state = random_state self.n_jobs = n_jobs self.n_differences = n_differences self.enhance = enhance self.vectorizer = vectorizer self.estimator = None
Generate sequences. Start from input sequences that are 'better' if enhance is set to True ('worse' otherwise) given the set of sequences used in the fit phase. Parameters ---------- n_differences : int (default 1) Number of characters that differ for the generated sequence from the original input sequence. enhance : bool (default True) If set to True then the score computed by the estimator will be higher for the sequences generated than for the input sequences. If False than the score will be lower. vectorizer : EDeN sequence vectorizer The vectorizer to map sequences to sparse vectors. n_jobs : int (default -1) The number of cores to use in parallel. -1 indicates all available. random_state: int (default 1) The random seed.
GArDen/construct/sequence_generator.py
__init__
rgerkin/EDeN
0
python
def __init__(self, n_differences=1, enhance=True, vectorizer=Vectorizer(complexity=3), n_jobs=(- 1), random_state=1): "Generate sequences.\n\n Start from input sequences that are 'better' if enhance is set to True\n ('worse' otherwise) given the set of sequences used in the fit phase.\n\n Parameters\n ----------\n n_differences : int (default 1)\n Number of characters that differ for the generated sequence from\n the original input sequence.\n\n enhance : bool (default True)\n If set to True then the score computed by the estimator will be\n higher for the sequences generated than for the input sequences.\n If False than the score will be lower.\n\n vectorizer : EDeN sequence vectorizer\n The vectorizer to map sequences to sparse vectors.\n\n n_jobs : int (default -1)\n The number of cores to use in parallel. -1 indicates all available.\n\n random_state: int (default 1)\n The random seed.\n " self.random_state = random_state self.n_jobs = n_jobs self.n_differences = n_differences self.enhance = enhance self.vectorizer = vectorizer self.estimator = None
def __init__(self, n_differences=1, enhance=True, vectorizer=Vectorizer(complexity=3), n_jobs=(- 1), random_state=1): "Generate sequences.\n\n Start from input sequences that are 'better' if enhance is set to True\n ('worse' otherwise) given the set of sequences used in the fit phase.\n\n Parameters\n ----------\n n_differences : int (default 1)\n Number of characters that differ for the generated sequence from\n the original input sequence.\n\n enhance : bool (default True)\n If set to True then the score computed by the estimator will be\n higher for the sequences generated than for the input sequences.\n If False than the score will be lower.\n\n vectorizer : EDeN sequence vectorizer\n The vectorizer to map sequences to sparse vectors.\n\n n_jobs : int (default -1)\n The number of cores to use in parallel. -1 indicates all available.\n\n random_state: int (default 1)\n The random seed.\n " self.random_state = random_state self.n_jobs = n_jobs self.n_differences = n_differences self.enhance = enhance self.vectorizer = vectorizer self.estimator = None<|docstring|>Generate sequences. Start from input sequences that are 'better' if enhance is set to True ('worse' otherwise) given the set of sequences used in the fit phase. Parameters ---------- n_differences : int (default 1) Number of characters that differ for the generated sequence from the original input sequence. enhance : bool (default True) If set to True then the score computed by the estimator will be higher for the sequences generated than for the input sequences. If False than the score will be lower. vectorizer : EDeN sequence vectorizer The vectorizer to map sequences to sparse vectors. n_jobs : int (default -1) The number of cores to use in parallel. -1 indicates all available. random_state: int (default 1) The random seed.<|endoftext|>
069459e67e92aeb431fd4f40e07de27f1c1b1cda11da9e66439d57e81a3ad561
def fit(self, pos_seqs, neg_seqs=None, times=2, order=2): 'Fit an estimator to discriminate the pos_seqs from the neg_seqs.\n\n Parameters\n ----------\n pos_seqs : iterable strings\n Input sequences.\n\n neg_seqs : iterable strings (default: None)\n If not None the program uses these as negative examples. If\n it is None, then negative sequences are generated as random\n shuffling of the positive sequences.\n\n times: int (default: 2)\n Factor between number of negatives and number of positives.\n\n order: int (default: 2)\n Size of the minimum block to shuffle: 1 means shuffling single\n characters, 2 means shuffling pairs of characters, etc.\n\n Returns\n -------\n self.\n ' if (neg_seqs is None): neg_seqs = list(seq_to_seq(pos_seqs, modifier=shuffle_modifier, times=times, order=order)) self.estimator = fit(pos_seqs, neg_seqs, self.vectorizer, n_jobs=self.n_jobs, cv=10, n_iter_search=1, random_state=self.random_state, n_blocks=5, block_size=None) return self
Fit an estimator to discriminate the pos_seqs from the neg_seqs. Parameters ---------- pos_seqs : iterable strings Input sequences. neg_seqs : iterable strings (default: None) If not None the program uses these as negative examples. If it is None, then negative sequences are generated as random shuffling of the positive sequences. times: int (default: 2) Factor between number of negatives and number of positives. order: int (default: 2) Size of the minimum block to shuffle: 1 means shuffling single characters, 2 means shuffling pairs of characters, etc. Returns ------- self.
GArDen/construct/sequence_generator.py
fit
rgerkin/EDeN
0
python
def fit(self, pos_seqs, neg_seqs=None, times=2, order=2): 'Fit an estimator to discriminate the pos_seqs from the neg_seqs.\n\n Parameters\n ----------\n pos_seqs : iterable strings\n Input sequences.\n\n neg_seqs : iterable strings (default: None)\n If not None the program uses these as negative examples. If\n it is None, then negative sequences are generated as random\n shuffling of the positive sequences.\n\n times: int (default: 2)\n Factor between number of negatives and number of positives.\n\n order: int (default: 2)\n Size of the minimum block to shuffle: 1 means shuffling single\n characters, 2 means shuffling pairs of characters, etc.\n\n Returns\n -------\n self.\n ' if (neg_seqs is None): neg_seqs = list(seq_to_seq(pos_seqs, modifier=shuffle_modifier, times=times, order=order)) self.estimator = fit(pos_seqs, neg_seqs, self.vectorizer, n_jobs=self.n_jobs, cv=10, n_iter_search=1, random_state=self.random_state, n_blocks=5, block_size=None) return self
def fit(self, pos_seqs, neg_seqs=None, times=2, order=2): 'Fit an estimator to discriminate the pos_seqs from the neg_seqs.\n\n Parameters\n ----------\n pos_seqs : iterable strings\n Input sequences.\n\n neg_seqs : iterable strings (default: None)\n If not None the program uses these as negative examples. If\n it is None, then negative sequences are generated as random\n shuffling of the positive sequences.\n\n times: int (default: 2)\n Factor between number of negatives and number of positives.\n\n order: int (default: 2)\n Size of the minimum block to shuffle: 1 means shuffling single\n characters, 2 means shuffling pairs of characters, etc.\n\n Returns\n -------\n self.\n ' if (neg_seqs is None): neg_seqs = list(seq_to_seq(pos_seqs, modifier=shuffle_modifier, times=times, order=order)) self.estimator = fit(pos_seqs, neg_seqs, self.vectorizer, n_jobs=self.n_jobs, cv=10, n_iter_search=1, random_state=self.random_state, n_blocks=5, block_size=None) return self<|docstring|>Fit an estimator to discriminate the pos_seqs from the neg_seqs. Parameters ---------- pos_seqs : iterable strings Input sequences. neg_seqs : iterable strings (default: None) If not None the program uses these as negative examples. If it is None, then negative sequences are generated as random shuffling of the positive sequences. times: int (default: 2) Factor between number of negatives and number of positives. order: int (default: 2) Size of the minimum block to shuffle: 1 means shuffling single characters, 2 means shuffling pairs of characters, etc. Returns ------- self.<|endoftext|>
6fa4bab93dc3a240d42da1ef711efe53a78880822ba3537ba6499e0a45164713
def transform(self, seqs, n_seqs=1, show_score=False, enhance=None, n_differences=None): "Generate sequences.\n\n Start from input sequences that are 'better' if enhance is set to True\n ('worse' otherwise) given the set of sequences used in the fit phase.\n\n Parameters\n ----------\n seqs : iterable strings\n Input sequences.\n\n n_seqs : int (default: 1)\n Number of sequences to be generated starting from each sequence\n in input.\n\n show_score: bool (default: False)\n If True the return type is a pair consisting of a score and a\n sequence. If False the return type is a sequence.\n\n enhance : bool (default None)\n If set to True then the score computed by the estimator will be\n higher for the sequences generated than for the input sequences.\n If False than the score will be lower. If None the state set in\n the initializer is used.\n\n n_differences : int (default None)\n Number of characters that differ for the generated sequence from\n the original input sequence. If None the number set in the\n initializer is used.\n\n Returns\n -------\n sequences : iterable sequences\n List of sequences or (score, sequence) pairs if show_score is True.\n " try: if (enhance is not None): self.enhance = enhance if (n_differences is not None): self.n_differences = n_differences for seq in seqs: if show_score: preds = predict(iterable=[seq], estimator=self.estimator, vectorizer=self.vectorizer, mode='decision_function', n_blocks=5, block_size=None, n_jobs=self.n_jobs) logger.debug(('%s\n%+.3f %s' % (seq[0], preds[0], seq[1]))) gen_seqs = self._generate(seq, n_seqs=n_seqs, show_score=show_score) for gen_seq in gen_seqs: (yield gen_seq) except Exception as e: logger.debug(('Failed iteration. Reason: %s' % e)) logger.debug('Exception', exc_info=True)
Generate sequences. Start from input sequences that are 'better' if enhance is set to True ('worse' otherwise) given the set of sequences used in the fit phase. Parameters ---------- seqs : iterable strings Input sequences. n_seqs : int (default: 1) Number of sequences to be generated starting from each sequence in input. show_score: bool (default: False) If True the return type is a pair consisting of a score and a sequence. If False the return type is a sequence. enhance : bool (default None) If set to True then the score computed by the estimator will be higher for the sequences generated than for the input sequences. If False than the score will be lower. If None the state set in the initializer is used. n_differences : int (default None) Number of characters that differ for the generated sequence from the original input sequence. If None the number set in the initializer is used. Returns ------- sequences : iterable sequences List of sequences or (score, sequence) pairs if show_score is True.
GArDen/construct/sequence_generator.py
transform
rgerkin/EDeN
0
python
def transform(self, seqs, n_seqs=1, show_score=False, enhance=None, n_differences=None): "Generate sequences.\n\n Start from input sequences that are 'better' if enhance is set to True\n ('worse' otherwise) given the set of sequences used in the fit phase.\n\n Parameters\n ----------\n seqs : iterable strings\n Input sequences.\n\n n_seqs : int (default: 1)\n Number of sequences to be generated starting from each sequence\n in input.\n\n show_score: bool (default: False)\n If True the return type is a pair consisting of a score and a\n sequence. If False the return type is a sequence.\n\n enhance : bool (default None)\n If set to True then the score computed by the estimator will be\n higher for the sequences generated than for the input sequences.\n If False than the score will be lower. If None the state set in\n the initializer is used.\n\n n_differences : int (default None)\n Number of characters that differ for the generated sequence from\n the original input sequence. If None the number set in the\n initializer is used.\n\n Returns\n -------\n sequences : iterable sequences\n List of sequences or (score, sequence) pairs if show_score is True.\n " try: if (enhance is not None): self.enhance = enhance if (n_differences is not None): self.n_differences = n_differences for seq in seqs: if show_score: preds = predict(iterable=[seq], estimator=self.estimator, vectorizer=self.vectorizer, mode='decision_function', n_blocks=5, block_size=None, n_jobs=self.n_jobs) logger.debug(('%s\n%+.3f %s' % (seq[0], preds[0], seq[1]))) gen_seqs = self._generate(seq, n_seqs=n_seqs, show_score=show_score) for gen_seq in gen_seqs: (yield gen_seq) except Exception as e: logger.debug(('Failed iteration. Reason: %s' % e)) logger.debug('Exception', exc_info=True)
def transform(self, seqs, n_seqs=1, show_score=False, enhance=None, n_differences=None): "Generate sequences.\n\n Start from input sequences that are 'better' if enhance is set to True\n ('worse' otherwise) given the set of sequences used in the fit phase.\n\n Parameters\n ----------\n seqs : iterable strings\n Input sequences.\n\n n_seqs : int (default: 1)\n Number of sequences to be generated starting from each sequence\n in input.\n\n show_score: bool (default: False)\n If True the return type is a pair consisting of a score and a\n sequence. If False the return type is a sequence.\n\n enhance : bool (default None)\n If set to True then the score computed by the estimator will be\n higher for the sequences generated than for the input sequences.\n If False than the score will be lower. If None the state set in\n the initializer is used.\n\n n_differences : int (default None)\n Number of characters that differ for the generated sequence from\n the original input sequence. If None the number set in the\n initializer is used.\n\n Returns\n -------\n sequences : iterable sequences\n List of sequences or (score, sequence) pairs if show_score is True.\n " try: if (enhance is not None): self.enhance = enhance if (n_differences is not None): self.n_differences = n_differences for seq in seqs: if show_score: preds = predict(iterable=[seq], estimator=self.estimator, vectorizer=self.vectorizer, mode='decision_function', n_blocks=5, block_size=None, n_jobs=self.n_jobs) logger.debug(('%s\n%+.3f %s' % (seq[0], preds[0], seq[1]))) gen_seqs = self._generate(seq, n_seqs=n_seqs, show_score=show_score) for gen_seq in gen_seqs: (yield gen_seq) except Exception as e: logger.debug(('Failed iteration. Reason: %s' % e)) logger.debug('Exception', exc_info=True)<|docstring|>Generate sequences. Start from input sequences that are 'better' if enhance is set to True ('worse' otherwise) given the set of sequences used in the fit phase. Parameters ---------- seqs : iterable strings Input sequences. n_seqs : int (default: 1) Number of sequences to be generated starting from each sequence in input. show_score: bool (default: False) If True the return type is a pair consisting of a score and a sequence. If False the return type is a sequence. enhance : bool (default None) If set to True then the score computed by the estimator will be higher for the sequences generated than for the input sequences. If False than the score will be lower. If None the state set in the initializer is used. n_differences : int (default None) Number of characters that differ for the generated sequence from the original input sequence. If None the number set in the initializer is used. Returns ------- sequences : iterable sequences List of sequences or (score, sequence) pairs if show_score is True.<|endoftext|>
f16becd7287bef0d05c271315b669c33ad7d19964900409ac43565b5e5d55eca
def __getattr__(cls, name): "Return the enum member matching `name`\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members themselves.\n " try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name)
Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves.
src/test/scenarios/datafactory/output/src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_dfaz_management_client_enums.py
__getattr__
colawwj/autorest.az
0
python
def __getattr__(cls, name): "Return the enum member matching `name`\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members themselves.\n " try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name)
def __getattr__(cls, name): "Return the enum member matching `name`\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members themselves.\n " try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name)<|docstring|>Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves.<|endoftext|>
345670a2bab30dad6407575eb148cf72c50185c87d1bbceabed965dd3859f153
def has_object_permission(self, request, view, obj): '\n If owner return true, otherwise return false.\n ' if (request.method in permissions.SAFE_METHODS): return True return (obj.author == request.user)
If owner return true, otherwise return false.
authors/apps/articles/permissions.py
has_object_permission
SilasKenneth/ah-technocrats
1
python
def has_object_permission(self, request, view, obj): '\n \n ' if (request.method in permissions.SAFE_METHODS): return True return (obj.author == request.user)
def has_object_permission(self, request, view, obj): '\n \n ' if (request.method in permissions.SAFE_METHODS): return True return (obj.author == request.user)<|docstring|>If owner return true, otherwise return false.<|endoftext|>
6ed6c4f4fefd3067fef06f88cef2a9e2a3727961bbbe347d0d3040a352389db1
def __init__(self, size=0): 'Initializes Square with size' self.size = size
Initializes Square with size
0x06-python-classes/4-square.py
__init__
flourishcodes/holbertonschool-higher_level_programming
0
python
def __init__(self, size=0): self.size = size
def __init__(self, size=0): self.size = size<|docstring|>Initializes Square with size<|endoftext|>
505d4c6d70fd17d00ac7ffb2e37372e48e2b5b77ad35a3c97c10588acd20debc
@property def size(self): 'Defines the size of square and returns its value' return self.__size
Defines the size of square and returns its value
0x06-python-classes/4-square.py
size
flourishcodes/holbertonschool-higher_level_programming
0
python
@property def size(self): return self.__size
@property def size(self): return self.__size<|docstring|>Defines the size of square and returns its value<|endoftext|>
077db03eec0471a727df600438416adb41848ebb8651403a7502cf5610d76570
@size.setter def size(self, value): 'Defines the value of size of square and checks if >= 0' self.__size = value if (type(value) is not int): raise TypeError('size must be an integer') if (value < 0): raise ValueError('size must be >= 0')
Defines the value of size of square and checks if >= 0
0x06-python-classes/4-square.py
size
flourishcodes/holbertonschool-higher_level_programming
0
python
@size.setter def size(self, value): self.__size = value if (type(value) is not int): raise TypeError('size must be an integer') if (value < 0): raise ValueError('size must be >= 0')
@size.setter def size(self, value): self.__size = value if (type(value) is not int): raise TypeError('size must be an integer') if (value < 0): raise ValueError('size must be >= 0')<|docstring|>Defines the value of size of square and checks if >= 0<|endoftext|>
11d1399309e6f39b74eb7d432152f7aa1cfa9221aaa6db1d9bbcb47dd9687640
def area(self): 'Defines the area of a square' return (self.__size * self.__size)
Defines the area of a square
0x06-python-classes/4-square.py
area
flourishcodes/holbertonschool-higher_level_programming
0
python
def area(self): return (self.__size * self.__size)
def area(self): return (self.__size * self.__size)<|docstring|>Defines the area of a square<|endoftext|>
31c1143dca58857308666881290a0f73a16488e75f55271e15562772ad0c879c
def on_network_update(self, chan: Channel, funding_tx_depth: int): '\n Only called when the channel is OPEN.\n\n Runs on the Network thread.\n ' if ((not chan.config[LOCAL].was_announced) and (funding_tx_depth >= 6)): return chan.config[LOCAL].was_announced = True self.lnworker.save_channel(chan) coro = self.handle_announcements(chan) asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
Only called when the channel is OPEN. Runs on the Network thread.
electrum/lnpeer.py
on_network_update
Coldcard/electrum
7
python
def on_network_update(self, chan: Channel, funding_tx_depth: int): '\n Only called when the channel is OPEN.\n\n Runs on the Network thread.\n ' if ((not chan.config[LOCAL].was_announced) and (funding_tx_depth >= 6)): return chan.config[LOCAL].was_announced = True self.lnworker.save_channel(chan) coro = self.handle_announcements(chan) asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
def on_network_update(self, chan: Channel, funding_tx_depth: int): '\n Only called when the channel is OPEN.\n\n Runs on the Network thread.\n ' if ((not chan.config[LOCAL].was_announced) and (funding_tx_depth >= 6)): return chan.config[LOCAL].was_announced = True self.lnworker.save_channel(chan) coro = self.handle_announcements(chan) asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)<|docstring|>Only called when the channel is OPEN. Runs on the Network thread.<|endoftext|>
2421d57e203fea2712e0fe824effbb0665fb5c839677a56dce4ee65bb99863a8
async def bitcoin_fee_update(self, chan: Channel): '\n called when our fee estimates change\n ' if (not chan.constraints.is_initiator): return feerate_per_kw = self.lnworker.current_feerate_per_kw() chan_fee = chan.get_next_feerate(REMOTE) self.logger.info(f'(chan: {chan.get_id_for_log()}) current pending feerate {chan_fee}. new feerate {feerate_per_kw}') if (feerate_per_kw < (chan_fee / 2)): self.logger.info('FEES HAVE FALLEN') elif (feerate_per_kw > (chan_fee * 2)): self.logger.info('FEES HAVE RISEN') else: return chan.update_fee(feerate_per_kw, True) remote_ctn = chan.get_latest_ctn(REMOTE) self.send_message('update_fee', channel_id=chan.channel_id, feerate_per_kw=feerate_per_kw) (await self.await_remote(chan, remote_ctn))
called when our fee estimates change
electrum/lnpeer.py
bitcoin_fee_update
Coldcard/electrum
7
python
async def bitcoin_fee_update(self, chan: Channel): '\n \n ' if (not chan.constraints.is_initiator): return feerate_per_kw = self.lnworker.current_feerate_per_kw() chan_fee = chan.get_next_feerate(REMOTE) self.logger.info(f'(chan: {chan.get_id_for_log()}) current pending feerate {chan_fee}. new feerate {feerate_per_kw}') if (feerate_per_kw < (chan_fee / 2)): self.logger.info('FEES HAVE FALLEN') elif (feerate_per_kw > (chan_fee * 2)): self.logger.info('FEES HAVE RISEN') else: return chan.update_fee(feerate_per_kw, True) remote_ctn = chan.get_latest_ctn(REMOTE) self.send_message('update_fee', channel_id=chan.channel_id, feerate_per_kw=feerate_per_kw) (await self.await_remote(chan, remote_ctn))
async def bitcoin_fee_update(self, chan: Channel): '\n \n ' if (not chan.constraints.is_initiator): return feerate_per_kw = self.lnworker.current_feerate_per_kw() chan_fee = chan.get_next_feerate(REMOTE) self.logger.info(f'(chan: {chan.get_id_for_log()}) current pending feerate {chan_fee}. new feerate {feerate_per_kw}') if (feerate_per_kw < (chan_fee / 2)): self.logger.info('FEES HAVE FALLEN') elif (feerate_per_kw > (chan_fee * 2)): self.logger.info('FEES HAVE RISEN') else: return chan.update_fee(feerate_per_kw, True) remote_ctn = chan.get_latest_ctn(REMOTE) self.send_message('update_fee', channel_id=chan.channel_id, feerate_per_kw=feerate_per_kw) (await self.await_remote(chan, remote_ctn))<|docstring|>called when our fee estimates change<|endoftext|>
231847946848fe87fa3d0f2ddb01bc1daa0b71db2174a2842601031baf2ab3b2
def ls_fit_iter(niter, xt, yt, x, y, Nsigma, arcsec=True, verbose=True): '\n This funciton finds the standard deviation and mean from all points, then subtracts that mean from\n each point and compares it with the true values to reject all the points that are Nsigma away. It\n iterates until no more points are being rejected.\n Args:\n niter = number of max iterations\n xt = input measured x-centroid converted to V2\n yt = input measured y-centroid converted to V3\n x = numpy array of true V2 positions\n y = numpy array of true V3 positions\n Nsigma = sigma limit to reject stars\n arcsec = True or False, give delta theta in arcsecs?\n Returns:\n deltas, sigmas, lines2print, rejected_elements_idx\n deltas = list of means for x, y, and theta\n sigmas = list of standard deviations for x, y, and theta\n lines2print = list of lines to pretty print results on screen and/or in a file\n rejected_elements_idx = list of the index of the rejected points\n nit = integer, number of iterations\n ' original_elements = len(x) original_true_centroids = copy.deepcopy(x) for nit in range(niter): n = len(x) sum_tot = 0.0 sum_x = 0.0 sum_y = 0.0 sum_xt = 0.0 sum_yt = 0.0 sum_xt2 = 0.0 sum_yt2 = 0.0 sum_xyt = 0.0 sum_xty = 0.0 for i in range(n): sum_tot += 1.0 sum_x = (sum_x + x[i]) sum_y = (sum_y + y[i]) sum_xt = (sum_xt + xt[i]) sum_yt = (sum_yt + yt[i]) sum_xt2 = (sum_xt2 + (xt[i] * xt[i])) sum_yt2 = (sum_yt2 + (yt[i] * yt[i])) sum_xyt = (sum_xyt + (x[i] * yt[i])) sum_xty = (sum_xty + (xt[i] * y[i])) det = (((sum_tot * sum_tot) * (sum_xt2 + sum_yt2)) - (sum_tot * ((sum_xt * sum_xt) + (sum_yt * sum_yt)))) delta_x = (((sum_tot * (sum_xt2 + sum_yt2)) - (sum_xt * sum_xt)) * (sum_x - sum_xt)) delta_x = ((delta_x - (((sum_y - sum_yt) * sum_xt) * sum_yt)) - ((sum_tot * sum_yt) * (sum_xyt - sum_xty))) delta_x /= det delta_y = (((sum_tot * (sum_xt2 + sum_yt2)) - (sum_yt * sum_yt)) * (sum_y - sum_yt)) delta_y = ((delta_y - (((sum_x - sum_xt) * sum_xt) * sum_yt)) - ((sum_tot * sum_xt) * (sum_xty - sum_xyt))) delta_y /= det delta_theta = (sum_tot * (((sum_xt * sum_y) - (sum_xt * sum_yt)) + (sum_tot * (sum_xyt - sum_xty)))) delta_theta /= det line1 = '(least_squares_iterate): iteration number: {}'.format(nit) if arcsec: delta_theta = ((delta_theta * (180.0 / np.pi)) * 3600.0) line2 = '(least_squares_iterate): delta_x = {} delta_y = {} delta_theta = {} arcsec'.format(delta_x, delta_y, delta_theta) else: line2 = '(least_squares_iterate): delta_x = {} delta_y = {} delta_theta = {} radians'.format(delta_x, delta_y, delta_theta) deltas = [delta_x, delta_y, delta_theta] sum_delta_x2 = 0.0 sum_delta_y2 = 0.0 sum_delta_theta2 = 0.0 for i in range(n): sum_delta_x2 += ((((- xt[i]) + x[i]) - delta_x) * (((- xt[i]) + x[i]) - delta_x)) sum_delta_y2 += ((((- yt[i]) + y[i]) - delta_y) * (((- yt[i]) + y[i]) - delta_y)) sigma_x = np.sqrt((sum_delta_x2 / n)) sigma_y = np.sqrt((sum_delta_y2 / n)) sigma_theta = (- 999.0) line3 = '(least_squares_iterate): sigma_x = {} sigma_y = {} sigma_theta = {}'.format(sigma_x, sigma_y, sigma_theta) sigmas = [sigma_x, sigma_y, sigma_theta] xnewpos = (x - delta_x) xdiff = (xnewpos - xt) ynewpos = (y - delta_y) ydiff = (ynewpos - yt) thres_x = (Nsigma * sigma_x) thres_y = (Nsigma * sigma_y) var_clip = xdiff[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] xcentroids_new = xt[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] ycentroids_new = yt[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] x_new = x[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] y_new = y[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] '\n zerop7 = 0.7\n var_clip = xdiff[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n xcentroids_new = xt[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n ycentroids_new = yt[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n x_new = x[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n y_new = y[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n ' elements_left = len(xcentroids_new) line4 = '(least_squares_iterate): elements_left={} out of original_elements={}'.format(elements_left, original_elements) if (len(xcentroids_new) == len(xt)): break else: xt = xcentroids_new yt = ycentroids_new x = x_new y = y_new if verbose: print(line1) print(line2) print(line3) print(line4) lines2print = [line1, line2, line3, line4] rejected_elements_idx = [] for (i, centroid) in enumerate(original_true_centroids): if (centroid not in x): rejected_elements_idx.append(i) return (deltas, sigmas, lines2print, rejected_elements_idx, nit)
This funciton finds the standard deviation and mean from all points, then subtracts that mean from each point and compares it with the true values to reject all the points that are Nsigma away. It iterates until no more points are being rejected. Args: niter = number of max iterations xt = input measured x-centroid converted to V2 yt = input measured y-centroid converted to V3 x = numpy array of true V2 positions y = numpy array of true V3 positions Nsigma = sigma limit to reject stars arcsec = True or False, give delta theta in arcsecs? Returns: deltas, sigmas, lines2print, rejected_elements_idx deltas = list of means for x, y, and theta sigmas = list of standard deviations for x, y, and theta lines2print = list of lines to pretty print results on screen and/or in a file rejected_elements_idx = list of the index of the rejected points nit = integer, number of iterations
least_squares_iterate.py
ls_fit_iter
penaguerrero/nirspec_TA
0
python
def ls_fit_iter(niter, xt, yt, x, y, Nsigma, arcsec=True, verbose=True): '\n This funciton finds the standard deviation and mean from all points, then subtracts that mean from\n each point and compares it with the true values to reject all the points that are Nsigma away. It\n iterates until no more points are being rejected.\n Args:\n niter = number of max iterations\n xt = input measured x-centroid converted to V2\n yt = input measured y-centroid converted to V3\n x = numpy array of true V2 positions\n y = numpy array of true V3 positions\n Nsigma = sigma limit to reject stars\n arcsec = True or False, give delta theta in arcsecs?\n Returns:\n deltas, sigmas, lines2print, rejected_elements_idx\n deltas = list of means for x, y, and theta\n sigmas = list of standard deviations for x, y, and theta\n lines2print = list of lines to pretty print results on screen and/or in a file\n rejected_elements_idx = list of the index of the rejected points\n nit = integer, number of iterations\n ' original_elements = len(x) original_true_centroids = copy.deepcopy(x) for nit in range(niter): n = len(x) sum_tot = 0.0 sum_x = 0.0 sum_y = 0.0 sum_xt = 0.0 sum_yt = 0.0 sum_xt2 = 0.0 sum_yt2 = 0.0 sum_xyt = 0.0 sum_xty = 0.0 for i in range(n): sum_tot += 1.0 sum_x = (sum_x + x[i]) sum_y = (sum_y + y[i]) sum_xt = (sum_xt + xt[i]) sum_yt = (sum_yt + yt[i]) sum_xt2 = (sum_xt2 + (xt[i] * xt[i])) sum_yt2 = (sum_yt2 + (yt[i] * yt[i])) sum_xyt = (sum_xyt + (x[i] * yt[i])) sum_xty = (sum_xty + (xt[i] * y[i])) det = (((sum_tot * sum_tot) * (sum_xt2 + sum_yt2)) - (sum_tot * ((sum_xt * sum_xt) + (sum_yt * sum_yt)))) delta_x = (((sum_tot * (sum_xt2 + sum_yt2)) - (sum_xt * sum_xt)) * (sum_x - sum_xt)) delta_x = ((delta_x - (((sum_y - sum_yt) * sum_xt) * sum_yt)) - ((sum_tot * sum_yt) * (sum_xyt - sum_xty))) delta_x /= det delta_y = (((sum_tot * (sum_xt2 + sum_yt2)) - (sum_yt * sum_yt)) * (sum_y - sum_yt)) delta_y = ((delta_y - (((sum_x - sum_xt) * sum_xt) * sum_yt)) - ((sum_tot * sum_xt) * (sum_xty - sum_xyt))) delta_y /= det delta_theta = (sum_tot * (((sum_xt * sum_y) - (sum_xt * sum_yt)) + (sum_tot * (sum_xyt - sum_xty)))) delta_theta /= det line1 = '(least_squares_iterate): iteration number: {}'.format(nit) if arcsec: delta_theta = ((delta_theta * (180.0 / np.pi)) * 3600.0) line2 = '(least_squares_iterate): delta_x = {} delta_y = {} delta_theta = {} arcsec'.format(delta_x, delta_y, delta_theta) else: line2 = '(least_squares_iterate): delta_x = {} delta_y = {} delta_theta = {} radians'.format(delta_x, delta_y, delta_theta) deltas = [delta_x, delta_y, delta_theta] sum_delta_x2 = 0.0 sum_delta_y2 = 0.0 sum_delta_theta2 = 0.0 for i in range(n): sum_delta_x2 += ((((- xt[i]) + x[i]) - delta_x) * (((- xt[i]) + x[i]) - delta_x)) sum_delta_y2 += ((((- yt[i]) + y[i]) - delta_y) * (((- yt[i]) + y[i]) - delta_y)) sigma_x = np.sqrt((sum_delta_x2 / n)) sigma_y = np.sqrt((sum_delta_y2 / n)) sigma_theta = (- 999.0) line3 = '(least_squares_iterate): sigma_x = {} sigma_y = {} sigma_theta = {}'.format(sigma_x, sigma_y, sigma_theta) sigmas = [sigma_x, sigma_y, sigma_theta] xnewpos = (x - delta_x) xdiff = (xnewpos - xt) ynewpos = (y - delta_y) ydiff = (ynewpos - yt) thres_x = (Nsigma * sigma_x) thres_y = (Nsigma * sigma_y) var_clip = xdiff[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] xcentroids_new = xt[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] ycentroids_new = yt[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] x_new = x[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] y_new = y[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] '\n zerop7 = 0.7\n var_clip = xdiff[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n xcentroids_new = xt[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n ycentroids_new = yt[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n x_new = x[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n y_new = y[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n ' elements_left = len(xcentroids_new) line4 = '(least_squares_iterate): elements_left={} out of original_elements={}'.format(elements_left, original_elements) if (len(xcentroids_new) == len(xt)): break else: xt = xcentroids_new yt = ycentroids_new x = x_new y = y_new if verbose: print(line1) print(line2) print(line3) print(line4) lines2print = [line1, line2, line3, line4] rejected_elements_idx = [] for (i, centroid) in enumerate(original_true_centroids): if (centroid not in x): rejected_elements_idx.append(i) return (deltas, sigmas, lines2print, rejected_elements_idx, nit)
def ls_fit_iter(niter, xt, yt, x, y, Nsigma, arcsec=True, verbose=True): '\n This funciton finds the standard deviation and mean from all points, then subtracts that mean from\n each point and compares it with the true values to reject all the points that are Nsigma away. It\n iterates until no more points are being rejected.\n Args:\n niter = number of max iterations\n xt = input measured x-centroid converted to V2\n yt = input measured y-centroid converted to V3\n x = numpy array of true V2 positions\n y = numpy array of true V3 positions\n Nsigma = sigma limit to reject stars\n arcsec = True or False, give delta theta in arcsecs?\n Returns:\n deltas, sigmas, lines2print, rejected_elements_idx\n deltas = list of means for x, y, and theta\n sigmas = list of standard deviations for x, y, and theta\n lines2print = list of lines to pretty print results on screen and/or in a file\n rejected_elements_idx = list of the index of the rejected points\n nit = integer, number of iterations\n ' original_elements = len(x) original_true_centroids = copy.deepcopy(x) for nit in range(niter): n = len(x) sum_tot = 0.0 sum_x = 0.0 sum_y = 0.0 sum_xt = 0.0 sum_yt = 0.0 sum_xt2 = 0.0 sum_yt2 = 0.0 sum_xyt = 0.0 sum_xty = 0.0 for i in range(n): sum_tot += 1.0 sum_x = (sum_x + x[i]) sum_y = (sum_y + y[i]) sum_xt = (sum_xt + xt[i]) sum_yt = (sum_yt + yt[i]) sum_xt2 = (sum_xt2 + (xt[i] * xt[i])) sum_yt2 = (sum_yt2 + (yt[i] * yt[i])) sum_xyt = (sum_xyt + (x[i] * yt[i])) sum_xty = (sum_xty + (xt[i] * y[i])) det = (((sum_tot * sum_tot) * (sum_xt2 + sum_yt2)) - (sum_tot * ((sum_xt * sum_xt) + (sum_yt * sum_yt)))) delta_x = (((sum_tot * (sum_xt2 + sum_yt2)) - (sum_xt * sum_xt)) * (sum_x - sum_xt)) delta_x = ((delta_x - (((sum_y - sum_yt) * sum_xt) * sum_yt)) - ((sum_tot * sum_yt) * (sum_xyt - sum_xty))) delta_x /= det delta_y = (((sum_tot * (sum_xt2 + sum_yt2)) - (sum_yt * sum_yt)) * (sum_y - sum_yt)) delta_y = ((delta_y - (((sum_x - sum_xt) * sum_xt) * sum_yt)) - ((sum_tot * sum_xt) * (sum_xty - sum_xyt))) delta_y /= det delta_theta = (sum_tot * (((sum_xt * sum_y) - (sum_xt * sum_yt)) + (sum_tot * (sum_xyt - sum_xty)))) delta_theta /= det line1 = '(least_squares_iterate): iteration number: {}'.format(nit) if arcsec: delta_theta = ((delta_theta * (180.0 / np.pi)) * 3600.0) line2 = '(least_squares_iterate): delta_x = {} delta_y = {} delta_theta = {} arcsec'.format(delta_x, delta_y, delta_theta) else: line2 = '(least_squares_iterate): delta_x = {} delta_y = {} delta_theta = {} radians'.format(delta_x, delta_y, delta_theta) deltas = [delta_x, delta_y, delta_theta] sum_delta_x2 = 0.0 sum_delta_y2 = 0.0 sum_delta_theta2 = 0.0 for i in range(n): sum_delta_x2 += ((((- xt[i]) + x[i]) - delta_x) * (((- xt[i]) + x[i]) - delta_x)) sum_delta_y2 += ((((- yt[i]) + y[i]) - delta_y) * (((- yt[i]) + y[i]) - delta_y)) sigma_x = np.sqrt((sum_delta_x2 / n)) sigma_y = np.sqrt((sum_delta_y2 / n)) sigma_theta = (- 999.0) line3 = '(least_squares_iterate): sigma_x = {} sigma_y = {} sigma_theta = {}'.format(sigma_x, sigma_y, sigma_theta) sigmas = [sigma_x, sigma_y, sigma_theta] xnewpos = (x - delta_x) xdiff = (xnewpos - xt) ynewpos = (y - delta_y) ydiff = (ynewpos - yt) thres_x = (Nsigma * sigma_x) thres_y = (Nsigma * sigma_y) var_clip = xdiff[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] xcentroids_new = xt[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] ycentroids_new = yt[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] x_new = x[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] y_new = y[np.where(((np.abs(xdiff) <= thres_x) & (np.abs(ydiff) <= thres_y)))] '\n zerop7 = 0.7\n var_clip = xdiff[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n xcentroids_new = xt[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n ycentroids_new = yt[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n x_new = x[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n y_new = y[(np.where((np.abs(xdiff)<=thres_x) & (np.abs(ydiff)<=thres_y) & (np.abs(xdiff)<=zerop7) & (np.abs(ydiff)<=zerop7)))]\n ' elements_left = len(xcentroids_new) line4 = '(least_squares_iterate): elements_left={} out of original_elements={}'.format(elements_left, original_elements) if (len(xcentroids_new) == len(xt)): break else: xt = xcentroids_new yt = ycentroids_new x = x_new y = y_new if verbose: print(line1) print(line2) print(line3) print(line4) lines2print = [line1, line2, line3, line4] rejected_elements_idx = [] for (i, centroid) in enumerate(original_true_centroids): if (centroid not in x): rejected_elements_idx.append(i) return (deltas, sigmas, lines2print, rejected_elements_idx, nit)<|docstring|>This funciton finds the standard deviation and mean from all points, then subtracts that mean from each point and compares it with the true values to reject all the points that are Nsigma away. It iterates until no more points are being rejected. Args: niter = number of max iterations xt = input measured x-centroid converted to V2 yt = input measured y-centroid converted to V3 x = numpy array of true V2 positions y = numpy array of true V3 positions Nsigma = sigma limit to reject stars arcsec = True or False, give delta theta in arcsecs? Returns: deltas, sigmas, lines2print, rejected_elements_idx deltas = list of means for x, y, and theta sigmas = list of standard deviations for x, y, and theta lines2print = list of lines to pretty print results on screen and/or in a file rejected_elements_idx = list of the index of the rejected points nit = integer, number of iterations<|endoftext|>
c891c863a2ad220d7edfad14b159485fcc4d04d35ca5dab2ff2d18579695fe91
def _zero_on_type_error(column_fn): 'Wrap a function on an np.ndarray to return 0 on a type error.' if (not column_fn): return column_fn if (not callable(column_fn)): raise TypeError('column functions must be callable') @functools.wraps(column_fn) def wrapped(column): try: return column_fn(column) except TypeError: if isinstance(column, np.ndarray): return column.dtype.type() else: raise return wrapped
Wrap a function on an np.ndarray to return 0 on a type error.
digital-assyriology-review/datascience/tables.py
_zero_on_type_error
ds-modules/NESTUD-190A
6
python
def _zero_on_type_error(column_fn): if (not column_fn): return column_fn if (not callable(column_fn)): raise TypeError('column functions must be callable') @functools.wraps(column_fn) def wrapped(column): try: return column_fn(column) except TypeError: if isinstance(column, np.ndarray): return column.dtype.type() else: raise return wrapped
def _zero_on_type_error(column_fn): if (not column_fn): return column_fn if (not callable(column_fn)): raise TypeError('column functions must be callable') @functools.wraps(column_fn) def wrapped(column): try: return column_fn(column) except TypeError: if isinstance(column, np.ndarray): return column.dtype.type() else: raise return wrapped<|docstring|>Wrap a function on an np.ndarray to return 0 on a type error.<|endoftext|>
b022b0c91d1cb16a776299520ea627475940dc0d1dc86f3c6b52c2a5bad39963
def _fill_with_zeros(partials, rows, zero=None): 'Find and return values from rows for all partials. In cases where no\n row matches a partial, zero is assumed as value. For a row, the first\n (n-1) fields are assumed to be the partial, and the last field,\n the value, where n is the total number of fields in each row. It is\n assumed that there is a unique row for each partial.\n partials -- single field values or tuples of field values\n rows -- table rows\n zero -- value used when no rows match a particular partial\n ' assert (len(rows) > 0) if (not _is_non_string_iterable(partials)): partials = [(partial,) for partial in partials] mapping = {} for row in rows: mapping[tuple(row[:(- 1)])] = row[(- 1)] if (zero is None): array = np.array(tuple(mapping.values())) if (len(array.shape) == 1): zero = array.dtype.type() return np.array([mapping.get(partial, zero) for partial in partials])
Find and return values from rows for all partials. In cases where no row matches a partial, zero is assumed as value. For a row, the first (n-1) fields are assumed to be the partial, and the last field, the value, where n is the total number of fields in each row. It is assumed that there is a unique row for each partial. partials -- single field values or tuples of field values rows -- table rows zero -- value used when no rows match a particular partial
digital-assyriology-review/datascience/tables.py
_fill_with_zeros
ds-modules/NESTUD-190A
6
python
def _fill_with_zeros(partials, rows, zero=None): 'Find and return values from rows for all partials. In cases where no\n row matches a partial, zero is assumed as value. For a row, the first\n (n-1) fields are assumed to be the partial, and the last field,\n the value, where n is the total number of fields in each row. It is\n assumed that there is a unique row for each partial.\n partials -- single field values or tuples of field values\n rows -- table rows\n zero -- value used when no rows match a particular partial\n ' assert (len(rows) > 0) if (not _is_non_string_iterable(partials)): partials = [(partial,) for partial in partials] mapping = {} for row in rows: mapping[tuple(row[:(- 1)])] = row[(- 1)] if (zero is None): array = np.array(tuple(mapping.values())) if (len(array.shape) == 1): zero = array.dtype.type() return np.array([mapping.get(partial, zero) for partial in partials])
def _fill_with_zeros(partials, rows, zero=None): 'Find and return values from rows for all partials. In cases where no\n row matches a partial, zero is assumed as value. For a row, the first\n (n-1) fields are assumed to be the partial, and the last field,\n the value, where n is the total number of fields in each row. It is\n assumed that there is a unique row for each partial.\n partials -- single field values or tuples of field values\n rows -- table rows\n zero -- value used when no rows match a particular partial\n ' assert (len(rows) > 0) if (not _is_non_string_iterable(partials)): partials = [(partial,) for partial in partials] mapping = {} for row in rows: mapping[tuple(row[:(- 1)])] = row[(- 1)] if (zero is None): array = np.array(tuple(mapping.values())) if (len(array.shape) == 1): zero = array.dtype.type() return np.array([mapping.get(partial, zero) for partial in partials])<|docstring|>Find and return values from rows for all partials. In cases where no row matches a partial, zero is assumed as value. For a row, the first (n-1) fields are assumed to be the partial, and the last field, the value, where n is the total number of fields in each row. It is assumed that there is a unique row for each partial. partials -- single field values or tuples of field values rows -- table rows zero -- value used when no rows match a particular partial<|endoftext|>
d8f2ed093a44c211440ede890a0a0cb642f2d6b81902f6f760e9a79d03f82b3e
def _as_labels(column_or_columns): 'Return a list of labels for a label or labels.' if (not _is_non_string_iterable(column_or_columns)): return [column_or_columns] else: return column_or_columns
Return a list of labels for a label or labels.
digital-assyriology-review/datascience/tables.py
_as_labels
ds-modules/NESTUD-190A
6
python
def _as_labels(column_or_columns): if (not _is_non_string_iterable(column_or_columns)): return [column_or_columns] else: return column_or_columns
def _as_labels(column_or_columns): if (not _is_non_string_iterable(column_or_columns)): return [column_or_columns] else: return column_or_columns<|docstring|>Return a list of labels for a label or labels.<|endoftext|>
b9f0f7efd27ad2231387c7ec62488032de80118593cad8cbf71ebab9d2fa31c0
def _varargs_labels_as_list(label_list): 'Return a list of labels for a list of labels or singleton list of list\n of labels.' if (len(label_list) == 0): return [] elif (not _is_non_string_iterable(label_list[0])): return label_list elif (len(label_list) == 1): return label_list[0] else: raise ValueError('Labels {} contain more than list.'.format(label_list), 'Pass just one list of labels.')
Return a list of labels for a list of labels or singleton list of list of labels.
digital-assyriology-review/datascience/tables.py
_varargs_labels_as_list
ds-modules/NESTUD-190A
6
python
def _varargs_labels_as_list(label_list): 'Return a list of labels for a list of labels or singleton list of list\n of labels.' if (len(label_list) == 0): return [] elif (not _is_non_string_iterable(label_list[0])): return label_list elif (len(label_list) == 1): return label_list[0] else: raise ValueError('Labels {} contain more than list.'.format(label_list), 'Pass just one list of labels.')
def _varargs_labels_as_list(label_list): 'Return a list of labels for a list of labels or singleton list of list\n of labels.' if (len(label_list) == 0): return [] elif (not _is_non_string_iterable(label_list[0])): return label_list elif (len(label_list) == 1): return label_list[0] else: raise ValueError('Labels {} contain more than list.'.format(label_list), 'Pass just one list of labels.')<|docstring|>Return a list of labels for a list of labels or singleton list of list of labels.<|endoftext|>
3b38f7d8230189ef4d7e23e8191d99bc0cceb58b0752b2444243bf046161c127
def _assert_same(values): 'Assert that all values are identical and return the unique value.' assert (len(values) > 0) (first, rest) = (values[0], values[1:]) for v in rest: assert (v == first) return first
Assert that all values are identical and return the unique value.
digital-assyriology-review/datascience/tables.py
_assert_same
ds-modules/NESTUD-190A
6
python
def _assert_same(values): assert (len(values) > 0) (first, rest) = (values[0], values[1:]) for v in rest: assert (v == first) return first
def _assert_same(values): assert (len(values) > 0) (first, rest) = (values[0], values[1:]) for v in rest: assert (v == first) return first<|docstring|>Assert that all values are identical and return the unique value.<|endoftext|>
6ed3cd781ad091874acc117fca5ac427691b6acde073af6a517dafad495c9be1
def _collected_label(collect, label): 'Label of a collected column.' if (not collect.__name__.startswith('<')): return ((label + ' ') + collect.__name__) else: return label
Label of a collected column.
digital-assyriology-review/datascience/tables.py
_collected_label
ds-modules/NESTUD-190A
6
python
def _collected_label(collect, label): if (not collect.__name__.startswith('<')): return ((label + ' ') + collect.__name__) else: return label
def _collected_label(collect, label): if (not collect.__name__.startswith('<')): return ((label + ' ') + collect.__name__) else: return label<|docstring|>Label of a collected column.<|endoftext|>
3d59aa179aebb5724de8f69c983cd6a83b740086d99eafa46b4f82ee789fe23d
def _is_non_string_iterable(value): 'Whether a value is iterable.' if isinstance(value, str): return False if hasattr(value, '__iter__'): return True if isinstance(value, collections.abc.Sequence): return True return False
Whether a value is iterable.
digital-assyriology-review/datascience/tables.py
_is_non_string_iterable
ds-modules/NESTUD-190A
6
python
def _is_non_string_iterable(value): if isinstance(value, str): return False if hasattr(value, '__iter__'): return True if isinstance(value, collections.abc.Sequence): return True return False
def _is_non_string_iterable(value): if isinstance(value, str): return False if hasattr(value, '__iter__'): return True if isinstance(value, collections.abc.Sequence): return True return False<|docstring|>Whether a value is iterable.<|endoftext|>
72329dabdec9a37079e9a14f01a0f453e8fedcbb23c32d873edf1ccc06c27707
def _vertical_x(axis, ticks=None, max_width=5): 'Switch labels to vertical if they are long.' if (ticks is None): ticks = axis.get_xticks() if (np.array(ticks) == np.rint(ticks)).all(): ticks = np.rint(ticks).astype(np.int) if (max([len(str(tick)) for tick in ticks]) > max_width): axis.set_xticklabels(ticks, rotation='vertical')
Switch labels to vertical if they are long.
digital-assyriology-review/datascience/tables.py
_vertical_x
ds-modules/NESTUD-190A
6
python
def _vertical_x(axis, ticks=None, max_width=5): if (ticks is None): ticks = axis.get_xticks() if (np.array(ticks) == np.rint(ticks)).all(): ticks = np.rint(ticks).astype(np.int) if (max([len(str(tick)) for tick in ticks]) > max_width): axis.set_xticklabels(ticks, rotation='vertical')
def _vertical_x(axis, ticks=None, max_width=5): if (ticks is None): ticks = axis.get_xticks() if (np.array(ticks) == np.rint(ticks)).all(): ticks = np.rint(ticks).astype(np.int) if (max([len(str(tick)) for tick in ticks]) > max_width): axis.set_xticklabels(ticks, rotation='vertical')<|docstring|>Switch labels to vertical if they are long.<|endoftext|>
8a6459d3494a3916959312e326f07dee09594dc51277dc4265b7a9fd753cb6ff
def __init__(self, labels=None, _deprecated=None, *, formatter=_formats.default_formatter): "Create an empty table with column labels.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles\n letter | count | points\n\n Args:\n ``labels`` (list of strings): The column labels.\n\n ``formatter`` (Formatter): An instance of :class:`Formatter` that\n formats the columns' values.\n " self._columns = collections.OrderedDict() self._formats = dict() self.formatter = formatter if (_deprecated is not None): warnings.warn('Two-argument __init__ is deprecated. Use Table().with_columns(...)', FutureWarning) (columns, labels) = (labels, _deprecated) columns = (columns if (columns is not None) else []) labels = (labels if (labels is not None) else []) assert (len(labels) == len(columns)), 'label/column number mismatch' else: labels = (labels if (labels is not None) else []) columns = [[] for _ in labels] self._num_rows = (0 if (len(columns) is 0) else len(columns[0])) for (column, label) in zip(columns, labels): self[label] = column self.take = _RowTaker(self) self.exclude = _RowExcluder(self)
Create an empty table with column labels. >>> tiles = Table(make_array('letter', 'count', 'points')) >>> tiles letter | count | points Args: ``labels`` (list of strings): The column labels. ``formatter`` (Formatter): An instance of :class:`Formatter` that formats the columns' values.
digital-assyriology-review/datascience/tables.py
__init__
ds-modules/NESTUD-190A
6
python
def __init__(self, labels=None, _deprecated=None, *, formatter=_formats.default_formatter): "Create an empty table with column labels.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles\n letter | count | points\n\n Args:\n ``labels`` (list of strings): The column labels.\n\n ``formatter`` (Formatter): An instance of :class:`Formatter` that\n formats the columns' values.\n " self._columns = collections.OrderedDict() self._formats = dict() self.formatter = formatter if (_deprecated is not None): warnings.warn('Two-argument __init__ is deprecated. Use Table().with_columns(...)', FutureWarning) (columns, labels) = (labels, _deprecated) columns = (columns if (columns is not None) else []) labels = (labels if (labels is not None) else []) assert (len(labels) == len(columns)), 'label/column number mismatch' else: labels = (labels if (labels is not None) else []) columns = [[] for _ in labels] self._num_rows = (0 if (len(columns) is 0) else len(columns[0])) for (column, label) in zip(columns, labels): self[label] = column self.take = _RowTaker(self) self.exclude = _RowExcluder(self)
def __init__(self, labels=None, _deprecated=None, *, formatter=_formats.default_formatter): "Create an empty table with column labels.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles\n letter | count | points\n\n Args:\n ``labels`` (list of strings): The column labels.\n\n ``formatter`` (Formatter): An instance of :class:`Formatter` that\n formats the columns' values.\n " self._columns = collections.OrderedDict() self._formats = dict() self.formatter = formatter if (_deprecated is not None): warnings.warn('Two-argument __init__ is deprecated. Use Table().with_columns(...)', FutureWarning) (columns, labels) = (labels, _deprecated) columns = (columns if (columns is not None) else []) labels = (labels if (labels is not None) else []) assert (len(labels) == len(columns)), 'label/column number mismatch' else: labels = (labels if (labels is not None) else []) columns = [[] for _ in labels] self._num_rows = (0 if (len(columns) is 0) else len(columns[0])) for (column, label) in zip(columns, labels): self[label] = column self.take = _RowTaker(self) self.exclude = _RowExcluder(self)<|docstring|>Create an empty table with column labels. >>> tiles = Table(make_array('letter', 'count', 'points')) >>> tiles letter | count | points Args: ``labels`` (list of strings): The column labels. ``formatter`` (Formatter): An instance of :class:`Formatter` that formats the columns' values.<|endoftext|>
6148ff44b2193ccbf02e7ab116e66252f01e0c4d22caa54303f980f9e94dc933
@classmethod def empty(cls, labels=None): 'Creates an empty table. Column labels are optional. [Deprecated]\n\n Args:\n ``labels`` (None or list): If ``None``, a table with 0\n columns is created.\n If a list, each element is a column label in a table with\n 0 rows.\n\n Returns:\n A new instance of ``Table``.\n ' warnings.warn('Table.empty(labels) is deprecated. Use Table(labels)', FutureWarning) if (labels is None): return cls() values = [[] for label in labels] return cls(values, labels)
Creates an empty table. Column labels are optional. [Deprecated] Args: ``labels`` (None or list): If ``None``, a table with 0 columns is created. If a list, each element is a column label in a table with 0 rows. Returns: A new instance of ``Table``.
digital-assyriology-review/datascience/tables.py
empty
ds-modules/NESTUD-190A
6
python
@classmethod def empty(cls, labels=None): 'Creates an empty table. Column labels are optional. [Deprecated]\n\n Args:\n ``labels`` (None or list): If ``None``, a table with 0\n columns is created.\n If a list, each element is a column label in a table with\n 0 rows.\n\n Returns:\n A new instance of ``Table``.\n ' warnings.warn('Table.empty(labels) is deprecated. Use Table(labels)', FutureWarning) if (labels is None): return cls() values = [[] for label in labels] return cls(values, labels)
@classmethod def empty(cls, labels=None): 'Creates an empty table. Column labels are optional. [Deprecated]\n\n Args:\n ``labels`` (None or list): If ``None``, a table with 0\n columns is created.\n If a list, each element is a column label in a table with\n 0 rows.\n\n Returns:\n A new instance of ``Table``.\n ' warnings.warn('Table.empty(labels) is deprecated. Use Table(labels)', FutureWarning) if (labels is None): return cls() values = [[] for label in labels] return cls(values, labels)<|docstring|>Creates an empty table. Column labels are optional. [Deprecated] Args: ``labels`` (None or list): If ``None``, a table with 0 columns is created. If a list, each element is a column label in a table with 0 rows. Returns: A new instance of ``Table``.<|endoftext|>
8bbb2ed2fd72120259ecb2dd38b0f2d4108b37e89159e9523923df60cfb20123
@classmethod def from_rows(cls, rows, labels): 'Create a table from a sequence of rows (fixed-length sequences). [Deprecated]' warnings.warn('Table.from_rows is deprecated. Use Table(labels).with_rows(...)', FutureWarning) return cls(labels).with_rows(rows)
Create a table from a sequence of rows (fixed-length sequences). [Deprecated]
digital-assyriology-review/datascience/tables.py
from_rows
ds-modules/NESTUD-190A
6
python
@classmethod def from_rows(cls, rows, labels): warnings.warn('Table.from_rows is deprecated. Use Table(labels).with_rows(...)', FutureWarning) return cls(labels).with_rows(rows)
@classmethod def from_rows(cls, rows, labels): warnings.warn('Table.from_rows is deprecated. Use Table(labels).with_rows(...)', FutureWarning) return cls(labels).with_rows(rows)<|docstring|>Create a table from a sequence of rows (fixed-length sequences). [Deprecated]<|endoftext|>
207d505bbd38198014dc19280c6ac1a54c7fe478747918015c3b9fa7c01fffaf
@classmethod def from_records(cls, records): 'Create a table from a sequence of records (dicts with fixed keys).' if (not records): return cls() labels = sorted(list(records[0].keys())) columns = [[rec[label] for rec in records] for label in labels] return cls().with_columns(zip(labels, columns))
Create a table from a sequence of records (dicts with fixed keys).
digital-assyriology-review/datascience/tables.py
from_records
ds-modules/NESTUD-190A
6
python
@classmethod def from_records(cls, records): if (not records): return cls() labels = sorted(list(records[0].keys())) columns = [[rec[label] for rec in records] for label in labels] return cls().with_columns(zip(labels, columns))
@classmethod def from_records(cls, records): if (not records): return cls() labels = sorted(list(records[0].keys())) columns = [[rec[label] for rec in records] for label in labels] return cls().with_columns(zip(labels, columns))<|docstring|>Create a table from a sequence of records (dicts with fixed keys).<|endoftext|>
9c5ab747e652ee6ac558d449aa40c1903acf5c80ac2df8123c3dd5333de2025f
@classmethod def from_columns_dict(cls, columns): 'Create a table from a mapping of column labels to column values. [Deprecated]' warnings.warn('Table.from_columns_dict is deprecated. Use Table().with_columns(...)', FutureWarning) return cls().with_columns(columns.items())
Create a table from a mapping of column labels to column values. [Deprecated]
digital-assyriology-review/datascience/tables.py
from_columns_dict
ds-modules/NESTUD-190A
6
python
@classmethod def from_columns_dict(cls, columns): warnings.warn('Table.from_columns_dict is deprecated. Use Table().with_columns(...)', FutureWarning) return cls().with_columns(columns.items())
@classmethod def from_columns_dict(cls, columns): warnings.warn('Table.from_columns_dict is deprecated. Use Table().with_columns(...)', FutureWarning) return cls().with_columns(columns.items())<|docstring|>Create a table from a mapping of column labels to column values. [Deprecated]<|endoftext|>
afe32877d035910fa9a24d654e6b6673bfcd8eae4a85c4e0f3b0676949a8e918
@classmethod def read_table(cls, filepath_or_buffer, *args, **vargs): 'Read a table from a file or web address.\n\n filepath_or_buffer -- string or file handle / StringIO; The string\n could be a URL. Valid URL schemes include http,\n ftp, s3, and file.\n ' try: path = urllib.parse.urlparse(filepath_or_buffer).path if ('data8.berkeley.edu' in filepath_or_buffer): raise ValueError('data8.berkeley.edu requires authentication, which is not supported.') except AttributeError: path = filepath_or_buffer try: if (('sep' not in vargs) and path.endswith('.csv')): vargs['sep'] = ',' except AttributeError: pass df = pandas.read_table(filepath_or_buffer, *args, **vargs) return cls.from_df(df)
Read a table from a file or web address. filepath_or_buffer -- string or file handle / StringIO; The string could be a URL. Valid URL schemes include http, ftp, s3, and file.
digital-assyriology-review/datascience/tables.py
read_table
ds-modules/NESTUD-190A
6
python
@classmethod def read_table(cls, filepath_or_buffer, *args, **vargs): 'Read a table from a file or web address.\n\n filepath_or_buffer -- string or file handle / StringIO; The string\n could be a URL. Valid URL schemes include http,\n ftp, s3, and file.\n ' try: path = urllib.parse.urlparse(filepath_or_buffer).path if ('data8.berkeley.edu' in filepath_or_buffer): raise ValueError('data8.berkeley.edu requires authentication, which is not supported.') except AttributeError: path = filepath_or_buffer try: if (('sep' not in vargs) and path.endswith('.csv')): vargs['sep'] = ',' except AttributeError: pass df = pandas.read_table(filepath_or_buffer, *args, **vargs) return cls.from_df(df)
@classmethod def read_table(cls, filepath_or_buffer, *args, **vargs): 'Read a table from a file or web address.\n\n filepath_or_buffer -- string or file handle / StringIO; The string\n could be a URL. Valid URL schemes include http,\n ftp, s3, and file.\n ' try: path = urllib.parse.urlparse(filepath_or_buffer).path if ('data8.berkeley.edu' in filepath_or_buffer): raise ValueError('data8.berkeley.edu requires authentication, which is not supported.') except AttributeError: path = filepath_or_buffer try: if (('sep' not in vargs) and path.endswith('.csv')): vargs['sep'] = ',' except AttributeError: pass df = pandas.read_table(filepath_or_buffer, *args, **vargs) return cls.from_df(df)<|docstring|>Read a table from a file or web address. filepath_or_buffer -- string or file handle / StringIO; The string could be a URL. Valid URL schemes include http, ftp, s3, and file.<|endoftext|>
0d45df9836a506340633100dd8ea7be3068a16ed3ef0a2132ed389d5125a9caa
def _with_columns(self, columns): 'Create a table from a sequence of columns, copying column labels.' table = type(self)() for (label, column) in zip(self.labels, columns): self._add_column_and_format(table, label, column) return table
Create a table from a sequence of columns, copying column labels.
digital-assyriology-review/datascience/tables.py
_with_columns
ds-modules/NESTUD-190A
6
python
def _with_columns(self, columns): table = type(self)() for (label, column) in zip(self.labels, columns): self._add_column_and_format(table, label, column) return table
def _with_columns(self, columns): table = type(self)() for (label, column) in zip(self.labels, columns): self._add_column_and_format(table, label, column) return table<|docstring|>Create a table from a sequence of columns, copying column labels.<|endoftext|>
a9b57393f09754b504c0fec64fa307fa82adf5538dd69e0ee66315512f3f9147
def _add_column_and_format(self, table, label, column): 'Add a column to table, copying the formatter from self.' label = self._as_label(label) table[label] = column if (label in self._formats): table._formats[label] = self._formats[label]
Add a column to table, copying the formatter from self.
digital-assyriology-review/datascience/tables.py
_add_column_and_format
ds-modules/NESTUD-190A
6
python
def _add_column_and_format(self, table, label, column): label = self._as_label(label) table[label] = column if (label in self._formats): table._formats[label] = self._formats[label]
def _add_column_and_format(self, table, label, column): label = self._as_label(label) table[label] = column if (label in self._formats): table._formats[label] = self._formats[label]<|docstring|>Add a column to table, copying the formatter from self.<|endoftext|>
ddc012ef2ee7338a34e8c15e2b6852c0204943871b98a50b8300ea4c1038629e
@classmethod def from_df(cls, df): 'Convert a Pandas DataFrame into a Table.' t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) return t
Convert a Pandas DataFrame into a Table.
digital-assyriology-review/datascience/tables.py
from_df
ds-modules/NESTUD-190A
6
python
@classmethod def from_df(cls, df): t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) return t
@classmethod def from_df(cls, df): t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) return t<|docstring|>Convert a Pandas DataFrame into a Table.<|endoftext|>
87e389c1e90f92839bf47893d4a64aff52f7a07e1def7d8d1932d3617b6c01d9
@classmethod def from_array(cls, arr): 'Convert a structured NumPy array into a Table.' return cls().with_columns([(f, arr[f]) for f in arr.dtype.names])
Convert a structured NumPy array into a Table.
digital-assyriology-review/datascience/tables.py
from_array
ds-modules/NESTUD-190A
6
python
@classmethod def from_array(cls, arr): return cls().with_columns([(f, arr[f]) for f in arr.dtype.names])
@classmethod def from_array(cls, arr): return cls().with_columns([(f, arr[f]) for f in arr.dtype.names])<|docstring|>Convert a structured NumPy array into a Table.<|endoftext|>
a5726a4f03b90cbbc2ea229d1fd915c10df3c71da563f78be35faacae69c05ba
def __getattr__(self, attr): 'Return a method that applies to all columns or a table of attributes. [Deprecated]\n\n E.g., t.sum() on a Table will return a table with the sum of each column.\n ' if (self.columns and all((hasattr(c, attr) for c in self.columns))): warnings.warn('Implicit column method lookup is deprecated.', FutureWarning) attrs = [getattr(c, attr) for c in self.columns] if all((callable(attr) for attr in attrs)): @functools.wraps(attrs[0]) def method(*args, **vargs): 'Create a table from the results of calling attrs.' columns = [attr(*args, **vargs) for attr in attrs] return self._with_columns(columns) return method else: return self._with_columns([[attr] for attr in attrs]) else: msg = "'{0}' object has no attribute '{1}'".format(type(self).__name__, attr) raise AttributeError(msg)
Return a method that applies to all columns or a table of attributes. [Deprecated] E.g., t.sum() on a Table will return a table with the sum of each column.
digital-assyriology-review/datascience/tables.py
__getattr__
ds-modules/NESTUD-190A
6
python
def __getattr__(self, attr): 'Return a method that applies to all columns or a table of attributes. [Deprecated]\n\n E.g., t.sum() on a Table will return a table with the sum of each column.\n ' if (self.columns and all((hasattr(c, attr) for c in self.columns))): warnings.warn('Implicit column method lookup is deprecated.', FutureWarning) attrs = [getattr(c, attr) for c in self.columns] if all((callable(attr) for attr in attrs)): @functools.wraps(attrs[0]) def method(*args, **vargs): 'Create a table from the results of calling attrs.' columns = [attr(*args, **vargs) for attr in attrs] return self._with_columns(columns) return method else: return self._with_columns([[attr] for attr in attrs]) else: msg = "'{0}' object has no attribute '{1}'".format(type(self).__name__, attr) raise AttributeError(msg)
def __getattr__(self, attr): 'Return a method that applies to all columns or a table of attributes. [Deprecated]\n\n E.g., t.sum() on a Table will return a table with the sum of each column.\n ' if (self.columns and all((hasattr(c, attr) for c in self.columns))): warnings.warn('Implicit column method lookup is deprecated.', FutureWarning) attrs = [getattr(c, attr) for c in self.columns] if all((callable(attr) for attr in attrs)): @functools.wraps(attrs[0]) def method(*args, **vargs): 'Create a table from the results of calling attrs.' columns = [attr(*args, **vargs) for attr in attrs] return self._with_columns(columns) return method else: return self._with_columns([[attr] for attr in attrs]) else: msg = "'{0}' object has no attribute '{1}'".format(type(self).__name__, attr) raise AttributeError(msg)<|docstring|>Return a method that applies to all columns or a table of attributes. [Deprecated] E.g., t.sum() on a Table will return a table with the sum of each column.<|endoftext|>
58797d73e01f4187248e7c69884e3f92e5ad3db741d4ad5d534a07797b11b97a
@property def num_rows(self): 'Number of rows.' return self._num_rows
Number of rows.
digital-assyriology-review/datascience/tables.py
num_rows
ds-modules/NESTUD-190A
6
python
@property def num_rows(self): return self._num_rows
@property def num_rows(self): return self._num_rows<|docstring|>Number of rows.<|endoftext|>
5545512a277394935d7026e74edb8484578f775ed9e3de279feca5643d07175e
@property def rows(self): 'Return a view of all rows.' return self.Rows(self)
Return a view of all rows.
digital-assyriology-review/datascience/tables.py
rows
ds-modules/NESTUD-190A
6
python
@property def rows(self): return self.Rows(self)
@property def rows(self): return self.Rows(self)<|docstring|>Return a view of all rows.<|endoftext|>
01a0e3f39e38d7c203c80654bf8ad08a2d6e5c156618d60c2027abca77caff91
def row(self, index): 'Return a row.' return self.rows[index]
Return a row.
digital-assyriology-review/datascience/tables.py
row
ds-modules/NESTUD-190A
6
python
def row(self, index): return self.rows[index]
def row(self, index): return self.rows[index]<|docstring|>Return a row.<|endoftext|>
4a8730464ad46698fba30125434e6bb92236c7f705fc96aa765e1fe3f1cf206d
@property def labels(self): 'Return a tuple of column labels.' return tuple(self._columns.keys())
Return a tuple of column labels.
digital-assyriology-review/datascience/tables.py
labels
ds-modules/NESTUD-190A
6
python
@property def labels(self): return tuple(self._columns.keys())
@property def labels(self): return tuple(self._columns.keys())<|docstring|>Return a tuple of column labels.<|endoftext|>
c9139fa69227ea9223a0ea4034815e5ee9a6faef7c01753e1379494d4ab998b4
@property def column_labels(self): 'Return a tuple of column labels. [Deprecated]' warnings.warn('column_labels is deprecated; use labels', FutureWarning) return self.labels
Return a tuple of column labels. [Deprecated]
digital-assyriology-review/datascience/tables.py
column_labels
ds-modules/NESTUD-190A
6
python
@property def column_labels(self): warnings.warn('column_labels is deprecated; use labels', FutureWarning) return self.labels
@property def column_labels(self): warnings.warn('column_labels is deprecated; use labels', FutureWarning) return self.labels<|docstring|>Return a tuple of column labels. [Deprecated]<|endoftext|>
2f6a13e76f41ed4496945dac30797ea5be7b7d18a57635bb5fdf5b942e20831a
@property def num_columns(self): 'Number of columns.' return len(self.labels)
Number of columns.
digital-assyriology-review/datascience/tables.py
num_columns
ds-modules/NESTUD-190A
6
python
@property def num_columns(self): return len(self.labels)
@property def num_columns(self): return len(self.labels)<|docstring|>Number of columns.<|endoftext|>
7814deb9dd069061f273b143208503915a348b5d19fe030e47cb12a2bfb0d1e0
def column(self, index_or_label): "Return the values of a column as an array.\n\n table.column(label) is equivalent to table[label].\n\n >>> tiles = Table().with_columns(\n ... 'letter', make_array('c', 'd'),\n ... 'count', make_array(2, 4),\n ... )\n\n >>> tiles.column('letter')\n array(['c', 'd'],\n dtype='<U1')\n >>> tiles.column(1)\n array([2, 4])\n\n Args:\n label (int or str): The index or label of a column\n\n Returns:\n An instance of ``numpy.array``.\n\n Raises:\n ``ValueError``: When the ``index_or_label`` is not in the table.\n " if (isinstance(index_or_label, str) and (index_or_label not in self.labels)): raise ValueError('The column "{}" is not in the table. The table contains these columns: {}'.format(index_or_label, ', '.join(self.labels))) if (isinstance(index_or_label, int) and (not (0 <= index_or_label < len(self.labels)))): raise ValueError('The index {} is not in the table. Only indices between 0 and {} are valid'.format(index_or_label, (len(self.labels) - 1))) return self._columns[self._as_label(index_or_label)]
Return the values of a column as an array. table.column(label) is equivalent to table[label]. >>> tiles = Table().with_columns( ... 'letter', make_array('c', 'd'), ... 'count', make_array(2, 4), ... ) >>> tiles.column('letter') array(['c', 'd'], dtype='<U1') >>> tiles.column(1) array([2, 4]) Args: label (int or str): The index or label of a column Returns: An instance of ``numpy.array``. Raises: ``ValueError``: When the ``index_or_label`` is not in the table.
digital-assyriology-review/datascience/tables.py
column
ds-modules/NESTUD-190A
6
python
def column(self, index_or_label): "Return the values of a column as an array.\n\n table.column(label) is equivalent to table[label].\n\n >>> tiles = Table().with_columns(\n ... 'letter', make_array('c', 'd'),\n ... 'count', make_array(2, 4),\n ... )\n\n >>> tiles.column('letter')\n array(['c', 'd'],\n dtype='<U1')\n >>> tiles.column(1)\n array([2, 4])\n\n Args:\n label (int or str): The index or label of a column\n\n Returns:\n An instance of ``numpy.array``.\n\n Raises:\n ``ValueError``: When the ``index_or_label`` is not in the table.\n " if (isinstance(index_or_label, str) and (index_or_label not in self.labels)): raise ValueError('The column "{}" is not in the table. The table contains these columns: {}'.format(index_or_label, ', '.join(self.labels))) if (isinstance(index_or_label, int) and (not (0 <= index_or_label < len(self.labels)))): raise ValueError('The index {} is not in the table. Only indices between 0 and {} are valid'.format(index_or_label, (len(self.labels) - 1))) return self._columns[self._as_label(index_or_label)]
def column(self, index_or_label): "Return the values of a column as an array.\n\n table.column(label) is equivalent to table[label].\n\n >>> tiles = Table().with_columns(\n ... 'letter', make_array('c', 'd'),\n ... 'count', make_array(2, 4),\n ... )\n\n >>> tiles.column('letter')\n array(['c', 'd'],\n dtype='<U1')\n >>> tiles.column(1)\n array([2, 4])\n\n Args:\n label (int or str): The index or label of a column\n\n Returns:\n An instance of ``numpy.array``.\n\n Raises:\n ``ValueError``: When the ``index_or_label`` is not in the table.\n " if (isinstance(index_or_label, str) and (index_or_label not in self.labels)): raise ValueError('The column "{}" is not in the table. The table contains these columns: {}'.format(index_or_label, ', '.join(self.labels))) if (isinstance(index_or_label, int) and (not (0 <= index_or_label < len(self.labels)))): raise ValueError('The index {} is not in the table. Only indices between 0 and {} are valid'.format(index_or_label, (len(self.labels) - 1))) return self._columns[self._as_label(index_or_label)]<|docstring|>Return the values of a column as an array. table.column(label) is equivalent to table[label]. >>> tiles = Table().with_columns( ... 'letter', make_array('c', 'd'), ... 'count', make_array(2, 4), ... ) >>> tiles.column('letter') array(['c', 'd'], dtype='<U1') >>> tiles.column(1) array([2, 4]) Args: label (int or str): The index or label of a column Returns: An instance of ``numpy.array``. Raises: ``ValueError``: When the ``index_or_label`` is not in the table.<|endoftext|>
b7a5507397b8080121e639cd4aede919b8b07d382f047704729e2c5229bff233
@property def values(self): 'Return data in `self` as a numpy array.\n\n If all columns are the same dtype, the resulting array\n will have this dtype. If there are >1 dtypes in columns,\n then the resulting array will have dtype `object`.\n ' dtypes = [col.dtype for col in self.columns] if (len(set(dtypes)) > 1): dtype = object else: dtype = None return np.array(self.columns, dtype=dtype).T
Return data in `self` as a numpy array. If all columns are the same dtype, the resulting array will have this dtype. If there are >1 dtypes in columns, then the resulting array will have dtype `object`.
digital-assyriology-review/datascience/tables.py
values
ds-modules/NESTUD-190A
6
python
@property def values(self): 'Return data in `self` as a numpy array.\n\n If all columns are the same dtype, the resulting array\n will have this dtype. If there are >1 dtypes in columns,\n then the resulting array will have dtype `object`.\n ' dtypes = [col.dtype for col in self.columns] if (len(set(dtypes)) > 1): dtype = object else: dtype = None return np.array(self.columns, dtype=dtype).T
@property def values(self): 'Return data in `self` as a numpy array.\n\n If all columns are the same dtype, the resulting array\n will have this dtype. If there are >1 dtypes in columns,\n then the resulting array will have dtype `object`.\n ' dtypes = [col.dtype for col in self.columns] if (len(set(dtypes)) > 1): dtype = object else: dtype = None return np.array(self.columns, dtype=dtype).T<|docstring|>Return data in `self` as a numpy array. If all columns are the same dtype, the resulting array will have this dtype. If there are >1 dtypes in columns, then the resulting array will have dtype `object`.<|endoftext|>
ac2a202b373a27cd3fdf2502245d53f8424fff8a43deee5e8be4a9f822a51744
def column_index(self, label): 'Return the index of a column by looking up its label.' return self.labels.index(label)
Return the index of a column by looking up its label.
digital-assyriology-review/datascience/tables.py
column_index
ds-modules/NESTUD-190A
6
python
def column_index(self, label): return self.labels.index(label)
def column_index(self, label): return self.labels.index(label)<|docstring|>Return the index of a column by looking up its label.<|endoftext|>
8f0eb5f3261a748873d839a28a873479435b020609a3ed672479d6390b030a78
def apply(self, fn, *column_or_columns): 'Apply ``fn`` to each element or elements of ``column_or_columns``.\n If no ``column_or_columns`` provided, `fn`` is applied to each row.\n\n Args:\n ``fn`` (function) -- The function to apply.\n ``column_or_columns``: Columns containing the arguments to ``fn``\n as either column labels (``str``) or column indices (``int``).\n The number of columns must match the number of arguments\n that ``fn`` expects.\n\n Raises:\n ``ValueError`` -- if ``column_label`` is not an existing\n column in the table.\n ``TypeError`` -- if insufficent number of ``column_label`` passed\n to ``fn``.\n\n Returns:\n An array consisting of results of applying ``fn`` to elements\n specified by ``column_label`` in each row.\n\n >>> t = Table().with_columns(\n ... \'letter\', make_array(\'a\', \'b\', \'c\', \'z\'),\n ... \'count\', make_array(9, 3, 3, 1),\n ... \'points\', make_array(1, 2, 2, 10))\n >>> t\n letter | count | points\n a | 9 | 1\n b | 3 | 2\n c | 3 | 2\n z | 1 | 10\n >>> t.apply(lambda x: x - 1, \'points\')\n array([0, 1, 1, 9])\n >>> t.apply(lambda x, y: x * y, \'count\', \'points\')\n array([ 9, 6, 6, 10])\n >>> t.apply(lambda x: x - 1, \'count\', \'points\')\n Traceback (most recent call last):\n ...\n TypeError: <lambda>() takes 1 positional argument but 2 were given\n >>> t.apply(lambda x: x - 1, \'counts\')\n Traceback (most recent call last):\n ...\n ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points\n\n Whole rows are passed to the function if no columns are specified.\n\n >>> t.apply(lambda row: row[1] * 2)\n array([18, 6, 6, 2])\n ' if (not column_or_columns): return np.array([fn(row) for row in self.rows]) else: if ((len(column_or_columns) == 1) and _is_non_string_iterable(column_or_columns[0])): warnings.warn('column lists are deprecated; pass each as an argument', FutureWarning) column_or_columns = column_or_columns[0] rows = zip(*self.select(*column_or_columns).columns) return np.array([fn(*row) for row in rows])
Apply ``fn`` to each element or elements of ``column_or_columns``. If no ``column_or_columns`` provided, `fn`` is applied to each row. Args: ``fn`` (function) -- The function to apply. ``column_or_columns``: Columns containing the arguments to ``fn`` as either column labels (``str``) or column indices (``int``). The number of columns must match the number of arguments that ``fn`` expects. Raises: ``ValueError`` -- if ``column_label`` is not an existing column in the table. ``TypeError`` -- if insufficent number of ``column_label`` passed to ``fn``. Returns: An array consisting of results of applying ``fn`` to elements specified by ``column_label`` in each row. >>> t = Table().with_columns( ... 'letter', make_array('a', 'b', 'c', 'z'), ... 'count', make_array(9, 3, 3, 1), ... 'points', make_array(1, 2, 2, 10)) >>> t letter | count | points a | 9 | 1 b | 3 | 2 c | 3 | 2 z | 1 | 10 >>> t.apply(lambda x: x - 1, 'points') array([0, 1, 1, 9]) >>> t.apply(lambda x, y: x * y, 'count', 'points') array([ 9, 6, 6, 10]) >>> t.apply(lambda x: x - 1, 'count', 'points') Traceback (most recent call last): ... TypeError: <lambda>() takes 1 positional argument but 2 were given >>> t.apply(lambda x: x - 1, 'counts') Traceback (most recent call last): ... ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points Whole rows are passed to the function if no columns are specified. >>> t.apply(lambda row: row[1] * 2) array([18, 6, 6, 2])
digital-assyriology-review/datascience/tables.py
apply
ds-modules/NESTUD-190A
6
python
def apply(self, fn, *column_or_columns): 'Apply ``fn`` to each element or elements of ``column_or_columns``.\n If no ``column_or_columns`` provided, `fn`` is applied to each row.\n\n Args:\n ``fn`` (function) -- The function to apply.\n ``column_or_columns``: Columns containing the arguments to ``fn``\n as either column labels (``str``) or column indices (``int``).\n The number of columns must match the number of arguments\n that ``fn`` expects.\n\n Raises:\n ``ValueError`` -- if ``column_label`` is not an existing\n column in the table.\n ``TypeError`` -- if insufficent number of ``column_label`` passed\n to ``fn``.\n\n Returns:\n An array consisting of results of applying ``fn`` to elements\n specified by ``column_label`` in each row.\n\n >>> t = Table().with_columns(\n ... \'letter\', make_array(\'a\', \'b\', \'c\', \'z\'),\n ... \'count\', make_array(9, 3, 3, 1),\n ... \'points\', make_array(1, 2, 2, 10))\n >>> t\n letter | count | points\n a | 9 | 1\n b | 3 | 2\n c | 3 | 2\n z | 1 | 10\n >>> t.apply(lambda x: x - 1, \'points\')\n array([0, 1, 1, 9])\n >>> t.apply(lambda x, y: x * y, \'count\', \'points\')\n array([ 9, 6, 6, 10])\n >>> t.apply(lambda x: x - 1, \'count\', \'points\')\n Traceback (most recent call last):\n ...\n TypeError: <lambda>() takes 1 positional argument but 2 were given\n >>> t.apply(lambda x: x - 1, \'counts\')\n Traceback (most recent call last):\n ...\n ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points\n\n Whole rows are passed to the function if no columns are specified.\n\n >>> t.apply(lambda row: row[1] * 2)\n array([18, 6, 6, 2])\n ' if (not column_or_columns): return np.array([fn(row) for row in self.rows]) else: if ((len(column_or_columns) == 1) and _is_non_string_iterable(column_or_columns[0])): warnings.warn('column lists are deprecated; pass each as an argument', FutureWarning) column_or_columns = column_or_columns[0] rows = zip(*self.select(*column_or_columns).columns) return np.array([fn(*row) for row in rows])
def apply(self, fn, *column_or_columns): 'Apply ``fn`` to each element or elements of ``column_or_columns``.\n If no ``column_or_columns`` provided, `fn`` is applied to each row.\n\n Args:\n ``fn`` (function) -- The function to apply.\n ``column_or_columns``: Columns containing the arguments to ``fn``\n as either column labels (``str``) or column indices (``int``).\n The number of columns must match the number of arguments\n that ``fn`` expects.\n\n Raises:\n ``ValueError`` -- if ``column_label`` is not an existing\n column in the table.\n ``TypeError`` -- if insufficent number of ``column_label`` passed\n to ``fn``.\n\n Returns:\n An array consisting of results of applying ``fn`` to elements\n specified by ``column_label`` in each row.\n\n >>> t = Table().with_columns(\n ... \'letter\', make_array(\'a\', \'b\', \'c\', \'z\'),\n ... \'count\', make_array(9, 3, 3, 1),\n ... \'points\', make_array(1, 2, 2, 10))\n >>> t\n letter | count | points\n a | 9 | 1\n b | 3 | 2\n c | 3 | 2\n z | 1 | 10\n >>> t.apply(lambda x: x - 1, \'points\')\n array([0, 1, 1, 9])\n >>> t.apply(lambda x, y: x * y, \'count\', \'points\')\n array([ 9, 6, 6, 10])\n >>> t.apply(lambda x: x - 1, \'count\', \'points\')\n Traceback (most recent call last):\n ...\n TypeError: <lambda>() takes 1 positional argument but 2 were given\n >>> t.apply(lambda x: x - 1, \'counts\')\n Traceback (most recent call last):\n ...\n ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points\n\n Whole rows are passed to the function if no columns are specified.\n\n >>> t.apply(lambda row: row[1] * 2)\n array([18, 6, 6, 2])\n ' if (not column_or_columns): return np.array([fn(row) for row in self.rows]) else: if ((len(column_or_columns) == 1) and _is_non_string_iterable(column_or_columns[0])): warnings.warn('column lists are deprecated; pass each as an argument', FutureWarning) column_or_columns = column_or_columns[0] rows = zip(*self.select(*column_or_columns).columns) return np.array([fn(*row) for row in rows])<|docstring|>Apply ``fn`` to each element or elements of ``column_or_columns``. If no ``column_or_columns`` provided, `fn`` is applied to each row. Args: ``fn`` (function) -- The function to apply. ``column_or_columns``: Columns containing the arguments to ``fn`` as either column labels (``str``) or column indices (``int``). The number of columns must match the number of arguments that ``fn`` expects. Raises: ``ValueError`` -- if ``column_label`` is not an existing column in the table. ``TypeError`` -- if insufficent number of ``column_label`` passed to ``fn``. Returns: An array consisting of results of applying ``fn`` to elements specified by ``column_label`` in each row. >>> t = Table().with_columns( ... 'letter', make_array('a', 'b', 'c', 'z'), ... 'count', make_array(9, 3, 3, 1), ... 'points', make_array(1, 2, 2, 10)) >>> t letter | count | points a | 9 | 1 b | 3 | 2 c | 3 | 2 z | 1 | 10 >>> t.apply(lambda x: x - 1, 'points') array([0, 1, 1, 9]) >>> t.apply(lambda x, y: x * y, 'count', 'points') array([ 9, 6, 6, 10]) >>> t.apply(lambda x: x - 1, 'count', 'points') Traceback (most recent call last): ... TypeError: <lambda>() takes 1 positional argument but 2 were given >>> t.apply(lambda x: x - 1, 'counts') Traceback (most recent call last): ... ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points Whole rows are passed to the function if no columns are specified. >>> t.apply(lambda row: row[1] * 2) array([18, 6, 6, 2])<|endoftext|>
3ff4c951fbabf438552f37c3521656881e0d156f2995652855e77402e6b49e1f
def set_format(self, column_or_columns, formatter): 'Set the format of a column.' if inspect.isclass(formatter): formatter = formatter() if (callable(formatter) and (not hasattr(formatter, 'format_column'))): formatter = _formats.FunctionFormatter(formatter) if (not hasattr(formatter, 'format_column')): raise Exception(('Expected Formatter or function: ' + str(formatter))) for label in self._as_labels(column_or_columns): if formatter.converts_values: self[label] = formatter.convert_column(self[label]) self._formats[label] = formatter return self
Set the format of a column.
digital-assyriology-review/datascience/tables.py
set_format
ds-modules/NESTUD-190A
6
python
def set_format(self, column_or_columns, formatter): if inspect.isclass(formatter): formatter = formatter() if (callable(formatter) and (not hasattr(formatter, 'format_column'))): formatter = _formats.FunctionFormatter(formatter) if (not hasattr(formatter, 'format_column')): raise Exception(('Expected Formatter or function: ' + str(formatter))) for label in self._as_labels(column_or_columns): if formatter.converts_values: self[label] = formatter.convert_column(self[label]) self._formats[label] = formatter return self
def set_format(self, column_or_columns, formatter): if inspect.isclass(formatter): formatter = formatter() if (callable(formatter) and (not hasattr(formatter, 'format_column'))): formatter = _formats.FunctionFormatter(formatter) if (not hasattr(formatter, 'format_column')): raise Exception(('Expected Formatter or function: ' + str(formatter))) for label in self._as_labels(column_or_columns): if formatter.converts_values: self[label] = formatter.convert_column(self[label]) self._formats[label] = formatter return self<|docstring|>Set the format of a column.<|endoftext|>
18eec77f85197cc120890502bdac7d928ee73ba72079365c0ced5daef84780d0
def move_to_start(self, column_label): 'Move a column to the first in order.' self._columns.move_to_end(column_label, last=False) return self
Move a column to the first in order.
digital-assyriology-review/datascience/tables.py
move_to_start
ds-modules/NESTUD-190A
6
python
def move_to_start(self, column_label): self._columns.move_to_end(column_label, last=False) return self
def move_to_start(self, column_label): self._columns.move_to_end(column_label, last=False) return self<|docstring|>Move a column to the first in order.<|endoftext|>
6fa4acb59749020250053a3c8aab4b691b46a834bb9cee0242799dc3cf41f12d
def move_to_end(self, column_label): 'Move a column to the last in order.' self._columns.move_to_end(column_label) return self
Move a column to the last in order.
digital-assyriology-review/datascience/tables.py
move_to_end
ds-modules/NESTUD-190A
6
python
def move_to_end(self, column_label): self._columns.move_to_end(column_label) return self
def move_to_end(self, column_label): self._columns.move_to_end(column_label) return self<|docstring|>Move a column to the last in order.<|endoftext|>
e14aae553a7599f3ceb854da551798f3e1ee4448c6604fe2693ea11a3eeebf00
def append(self, row_or_table): 'Append a row or all rows of a table. An appended table must have all\n columns of self.' if (not row_or_table): return if isinstance(row_or_table, Table): t = row_or_table columns = list(t.select(self.labels)._columns.values()) n = t.num_rows else: if (len(list(row_or_table)) != self.num_columns): raise Exception((('Row should have ' + str(self.num_columns)) + ' columns')) (columns, n) = ([[value] for value in row_or_table], 1) for (i, column) in enumerate(self._columns): if self.num_rows: self._columns[column] = np.append(self[column], columns[i]) else: self._columns[column] = np.array(columns[i]) self._num_rows += n return self
Append a row or all rows of a table. An appended table must have all columns of self.
digital-assyriology-review/datascience/tables.py
append
ds-modules/NESTUD-190A
6
python
def append(self, row_or_table): 'Append a row or all rows of a table. An appended table must have all\n columns of self.' if (not row_or_table): return if isinstance(row_or_table, Table): t = row_or_table columns = list(t.select(self.labels)._columns.values()) n = t.num_rows else: if (len(list(row_or_table)) != self.num_columns): raise Exception((('Row should have ' + str(self.num_columns)) + ' columns')) (columns, n) = ([[value] for value in row_or_table], 1) for (i, column) in enumerate(self._columns): if self.num_rows: self._columns[column] = np.append(self[column], columns[i]) else: self._columns[column] = np.array(columns[i]) self._num_rows += n return self
def append(self, row_or_table): 'Append a row or all rows of a table. An appended table must have all\n columns of self.' if (not row_or_table): return if isinstance(row_or_table, Table): t = row_or_table columns = list(t.select(self.labels)._columns.values()) n = t.num_rows else: if (len(list(row_or_table)) != self.num_columns): raise Exception((('Row should have ' + str(self.num_columns)) + ' columns')) (columns, n) = ([[value] for value in row_or_table], 1) for (i, column) in enumerate(self._columns): if self.num_rows: self._columns[column] = np.append(self[column], columns[i]) else: self._columns[column] = np.array(columns[i]) self._num_rows += n return self<|docstring|>Append a row or all rows of a table. An appended table must have all columns of self.<|endoftext|>