body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
1e0d0a78a222d0944b5606af39e96d18819f7dd4b596af931a0b9f671d95aad2
def detect(self): 'Detect the IP address.' if (self.opts_family == AF_INET6): kind = IPV6_PUBLIC else: kind = IPV4 theip = None try: theip = detect_ip(kind) except GetIpException: LOG.exception('socket detector raised an exception:') self.set_current_value(theip) return theip
Detect the IP address.
dyndnsc/detector/socket_ip.py
detect
infothrill/python-dyndnsc
35
python
def detect(self): if (self.opts_family == AF_INET6): kind = IPV6_PUBLIC else: kind = IPV4 theip = None try: theip = detect_ip(kind) except GetIpException: LOG.exception('socket detector raised an exception:') self.set_current_value(theip) return theip
def detect(self): if (self.opts_family == AF_INET6): kind = IPV6_PUBLIC else: kind = IPV4 theip = None try: theip = detect_ip(kind) except GetIpException: LOG.exception('socket detector raised an exception:') self.set_current_value(theip) return theip<|docstring|>Detect the IP address.<|endoftext|>
713d7e7f4927cbd79c86788aaae265afb5ead0f85a2cdd5b6301b717324e06fb
def compare_sockets(cs, socket1, socket2): '\n This method compares Socket objects for equality using the buffer and peer attributes.\n It uses `compare_buffers` for checking buffer attributes for equality.\n It calls itself for comparing peer Socket objects.\n Returns True if the Socket objects are equal, false otherwise.\n :param cs: ConstraintSet to be used for checking Socket.buffer for semantic equality using `Z3Solver.instance().must_be_true()`\n :param socket1: one of two Socket objects to be compared for equality against socket2\n :param socket2: one of two Socket objects to be compared for equality against socket1\n :return: True, if the Socket objects are found to be equal, False otherwise\n ' if (socket1 is None): return (socket2 is None) if (socket2 is None): return (socket1 is None) if (not compare_buffers(cs, socket1.buffer, socket2.buffer)): return False return compare_sockets(cs, socket1.peer, socket2.peer)
This method compares Socket objects for equality using the buffer and peer attributes. It uses `compare_buffers` for checking buffer attributes for equality. It calls itself for comparing peer Socket objects. Returns True if the Socket objects are equal, false otherwise. :param cs: ConstraintSet to be used for checking Socket.buffer for semantic equality using `Z3Solver.instance().must_be_true()` :param socket1: one of two Socket objects to be compared for equality against socket2 :param socket2: one of two Socket objects to be compared for equality against socket1 :return: True, if the Socket objects are found to be equal, False otherwise
manticore/native/state_merging.py
compare_sockets
dmgress/manticore
0
python
def compare_sockets(cs, socket1, socket2): '\n This method compares Socket objects for equality using the buffer and peer attributes.\n It uses `compare_buffers` for checking buffer attributes for equality.\n It calls itself for comparing peer Socket objects.\n Returns True if the Socket objects are equal, false otherwise.\n :param cs: ConstraintSet to be used for checking Socket.buffer for semantic equality using `Z3Solver.instance().must_be_true()`\n :param socket1: one of two Socket objects to be compared for equality against socket2\n :param socket2: one of two Socket objects to be compared for equality against socket1\n :return: True, if the Socket objects are found to be equal, False otherwise\n ' if (socket1 is None): return (socket2 is None) if (socket2 is None): return (socket1 is None) if (not compare_buffers(cs, socket1.buffer, socket2.buffer)): return False return compare_sockets(cs, socket1.peer, socket2.peer)
def compare_sockets(cs, socket1, socket2): '\n This method compares Socket objects for equality using the buffer and peer attributes.\n It uses `compare_buffers` for checking buffer attributes for equality.\n It calls itself for comparing peer Socket objects.\n Returns True if the Socket objects are equal, false otherwise.\n :param cs: ConstraintSet to be used for checking Socket.buffer for semantic equality using `Z3Solver.instance().must_be_true()`\n :param socket1: one of two Socket objects to be compared for equality against socket2\n :param socket2: one of two Socket objects to be compared for equality against socket1\n :return: True, if the Socket objects are found to be equal, False otherwise\n ' if (socket1 is None): return (socket2 is None) if (socket2 is None): return (socket1 is None) if (not compare_buffers(cs, socket1.buffer, socket2.buffer)): return False return compare_sockets(cs, socket1.peer, socket2.peer)<|docstring|>This method compares Socket objects for equality using the buffer and peer attributes. It uses `compare_buffers` for checking buffer attributes for equality. It calls itself for comparing peer Socket objects. Returns True if the Socket objects are equal, false otherwise. :param cs: ConstraintSet to be used for checking Socket.buffer for semantic equality using `Z3Solver.instance().must_be_true()` :param socket1: one of two Socket objects to be compared for equality against socket2 :param socket2: one of two Socket objects to be compared for equality against socket1 :return: True, if the Socket objects are found to be equal, False otherwise<|endoftext|>
66216e71e35eecb4e813de483ad48c32b5c34256ee43d793fa70a4c9db108fff
def compare_buffers(cs, buffer1, buffer2): '\n This method compares the two List objects for equality using the `Z3Solver.instance().must_be_true()` call.\n :param cs: ConstraintSet to be used for checking buffer1 for semantic equality with buffer2 using `Z3Solver.instance().must_be_true()`\n :param buffer1: one of two List objects to be compared for equality against buffer2\n :param buffer2: one of two List objects to be compared for equality against buffer1\n :return: True, if the List objects are equal, False otherwise\n ' if (len(buffer1) != len(buffer2)): return False for (b1, b2) in zip(buffer1, buffer2): if (not Z3Solver.instance().must_be_true(cs, (b1 == b2))): return False return True
This method compares the two List objects for equality using the `Z3Solver.instance().must_be_true()` call. :param cs: ConstraintSet to be used for checking buffer1 for semantic equality with buffer2 using `Z3Solver.instance().must_be_true()` :param buffer1: one of two List objects to be compared for equality against buffer2 :param buffer2: one of two List objects to be compared for equality against buffer1 :return: True, if the List objects are equal, False otherwise
manticore/native/state_merging.py
compare_buffers
dmgress/manticore
0
python
def compare_buffers(cs, buffer1, buffer2): '\n This method compares the two List objects for equality using the `Z3Solver.instance().must_be_true()` call.\n :param cs: ConstraintSet to be used for checking buffer1 for semantic equality with buffer2 using `Z3Solver.instance().must_be_true()`\n :param buffer1: one of two List objects to be compared for equality against buffer2\n :param buffer2: one of two List objects to be compared for equality against buffer1\n :return: True, if the List objects are equal, False otherwise\n ' if (len(buffer1) != len(buffer2)): return False for (b1, b2) in zip(buffer1, buffer2): if (not Z3Solver.instance().must_be_true(cs, (b1 == b2))): return False return True
def compare_buffers(cs, buffer1, buffer2): '\n This method compares the two List objects for equality using the `Z3Solver.instance().must_be_true()` call.\n :param cs: ConstraintSet to be used for checking buffer1 for semantic equality with buffer2 using `Z3Solver.instance().must_be_true()`\n :param buffer1: one of two List objects to be compared for equality against buffer2\n :param buffer2: one of two List objects to be compared for equality against buffer1\n :return: True, if the List objects are equal, False otherwise\n ' if (len(buffer1) != len(buffer2)): return False for (b1, b2) in zip(buffer1, buffer2): if (not Z3Solver.instance().must_be_true(cs, (b1 == b2))): return False return True<|docstring|>This method compares the two List objects for equality using the `Z3Solver.instance().must_be_true()` call. :param cs: ConstraintSet to be used for checking buffer1 for semantic equality with buffer2 using `Z3Solver.instance().must_be_true()` :param buffer1: one of two List objects to be compared for equality against buffer2 :param buffer2: one of two List objects to be compared for equality against buffer1 :return: True, if the List objects are equal, False otherwise<|endoftext|>
ed58b83dc4d32102d66896c8e33fb59124d0be26022cb28a512452488b50d4a7
def merge_constraints(constraints1, constraints2): '\n :param constraints1: one of two ConstraintSet objects to be merged\n :param constraints2: second of two ConstraintSet objects to be merged\n :return: (Expression, Expression, ConstraintSet) where the first and second Expression objects are conjunctions of\n of all the constraints in constraints1 and constraints2 respectively. The ConstraintSet is an object that contains\n a single constraint that is a logical OR of these two Expression objects.\n ' exp1 = constraints1.constraints[0] for i in range(1, len(constraints1.constraints)): exp1 = (exp1 & constraints1.constraints[i]) exp2 = constraints2.constraints[0] for i in range(1, len(constraints2.constraints)): exp2 = (exp2 & constraints2.constraints[i]) merged_constraint = ConstraintSet() merged_constraint.add((exp1 | exp2)) return (exp1, exp2, merged_constraint)
:param constraints1: one of two ConstraintSet objects to be merged :param constraints2: second of two ConstraintSet objects to be merged :return: (Expression, Expression, ConstraintSet) where the first and second Expression objects are conjunctions of of all the constraints in constraints1 and constraints2 respectively. The ConstraintSet is an object that contains a single constraint that is a logical OR of these two Expression objects.
manticore/native/state_merging.py
merge_constraints
dmgress/manticore
0
python
def merge_constraints(constraints1, constraints2): '\n :param constraints1: one of two ConstraintSet objects to be merged\n :param constraints2: second of two ConstraintSet objects to be merged\n :return: (Expression, Expression, ConstraintSet) where the first and second Expression objects are conjunctions of\n of all the constraints in constraints1 and constraints2 respectively. The ConstraintSet is an object that contains\n a single constraint that is a logical OR of these two Expression objects.\n ' exp1 = constraints1.constraints[0] for i in range(1, len(constraints1.constraints)): exp1 = (exp1 & constraints1.constraints[i]) exp2 = constraints2.constraints[0] for i in range(1, len(constraints2.constraints)): exp2 = (exp2 & constraints2.constraints[i]) merged_constraint = ConstraintSet() merged_constraint.add((exp1 | exp2)) return (exp1, exp2, merged_constraint)
def merge_constraints(constraints1, constraints2): '\n :param constraints1: one of two ConstraintSet objects to be merged\n :param constraints2: second of two ConstraintSet objects to be merged\n :return: (Expression, Expression, ConstraintSet) where the first and second Expression objects are conjunctions of\n of all the constraints in constraints1 and constraints2 respectively. The ConstraintSet is an object that contains\n a single constraint that is a logical OR of these two Expression objects.\n ' exp1 = constraints1.constraints[0] for i in range(1, len(constraints1.constraints)): exp1 = (exp1 & constraints1.constraints[i]) exp2 = constraints2.constraints[0] for i in range(1, len(constraints2.constraints)): exp2 = (exp2 & constraints2.constraints[i]) merged_constraint = ConstraintSet() merged_constraint.add((exp1 | exp2)) return (exp1, exp2, merged_constraint)<|docstring|>:param constraints1: one of two ConstraintSet objects to be merged :param constraints2: second of two ConstraintSet objects to be merged :return: (Expression, Expression, ConstraintSet) where the first and second Expression objects are conjunctions of of all the constraints in constraints1 and constraints2 respectively. The ConstraintSet is an object that contains a single constraint that is a logical OR of these two Expression objects.<|endoftext|>
52c021d1a65832d3d417f0904a75e178ad8305be94c6e59f23489a5112bda5c8
def compare_byte_vals(mem1, mem2, addr, merged_constraint): '\n Compares values in memory at address `addr`, returns True if they are semantically equal, False otherwise\n :param mem1: first of two memory objects we want to use for comparison\n :param mem2: second of two memory objects we want to use for comparison\n :param addr: address at which bytes values are to be compared\n :param merged_constraint: ConstraintSet to be used when using the call to `Z3Solver.instance().must_be_true()`\n :return: returns True if 1 byte values at address `addr` in `mem1` and `mem2` are semantically equal, False otherwise\n ' val1 = mem1.read(addr, 1) val2 = mem2.read(addr, 1) assert ((len(val1) == 1) and (len(val2) == 1)) cond_to_check = (val1[0] == val2[0]) if (not Z3Solver.instance().must_be_true(merged_constraint, cond_to_check)): return False else: return True
Compares values in memory at address `addr`, returns True if they are semantically equal, False otherwise :param mem1: first of two memory objects we want to use for comparison :param mem2: second of two memory objects we want to use for comparison :param addr: address at which bytes values are to be compared :param merged_constraint: ConstraintSet to be used when using the call to `Z3Solver.instance().must_be_true()` :return: returns True if 1 byte values at address `addr` in `mem1` and `mem2` are semantically equal, False otherwise
manticore/native/state_merging.py
compare_byte_vals
dmgress/manticore
0
python
def compare_byte_vals(mem1, mem2, addr, merged_constraint): '\n Compares values in memory at address `addr`, returns True if they are semantically equal, False otherwise\n :param mem1: first of two memory objects we want to use for comparison\n :param mem2: second of two memory objects we want to use for comparison\n :param addr: address at which bytes values are to be compared\n :param merged_constraint: ConstraintSet to be used when using the call to `Z3Solver.instance().must_be_true()`\n :return: returns True if 1 byte values at address `addr` in `mem1` and `mem2` are semantically equal, False otherwise\n ' val1 = mem1.read(addr, 1) val2 = mem2.read(addr, 1) assert ((len(val1) == 1) and (len(val2) == 1)) cond_to_check = (val1[0] == val2[0]) if (not Z3Solver.instance().must_be_true(merged_constraint, cond_to_check)): return False else: return True
def compare_byte_vals(mem1, mem2, addr, merged_constraint): '\n Compares values in memory at address `addr`, returns True if they are semantically equal, False otherwise\n :param mem1: first of two memory objects we want to use for comparison\n :param mem2: second of two memory objects we want to use for comparison\n :param addr: address at which bytes values are to be compared\n :param merged_constraint: ConstraintSet to be used when using the call to `Z3Solver.instance().must_be_true()`\n :return: returns True if 1 byte values at address `addr` in `mem1` and `mem2` are semantically equal, False otherwise\n ' val1 = mem1.read(addr, 1) val2 = mem2.read(addr, 1) assert ((len(val1) == 1) and (len(val2) == 1)) cond_to_check = (val1[0] == val2[0]) if (not Z3Solver.instance().must_be_true(merged_constraint, cond_to_check)): return False else: return True<|docstring|>Compares values in memory at address `addr`, returns True if they are semantically equal, False otherwise :param mem1: first of two memory objects we want to use for comparison :param mem2: second of two memory objects we want to use for comparison :param addr: address at which bytes values are to be compared :param merged_constraint: ConstraintSet to be used when using the call to `Z3Solver.instance().must_be_true()` :return: returns True if 1 byte values at address `addr` in `mem1` and `mem2` are semantically equal, False otherwise<|endoftext|>
8f5fb88d0df55f45254abbf4d9d8092da1cb24a78ca6b3069dd02d6e68342c5d
def compare_mem(mem1, mem2, merged_constraint): '\n This method compares the number of maps, and then their names, permissions, start, and end values.\n If they all match, then it compares the concrete byte values for equality.\n If those match too, it then compares _symbols attribute values for equality if the two memory objects are of\n type SMemory.\n :param mem1: one of two memory objects to be compared\n :param mem2: second of two memory objects to be compared\n :param merged_constraint: ConstraintSet object that is to be used with `Z3Solver.instance().must_be_true()` calls to check the\n memory objects for semantic equality\n :return: True, if the memory objects are equal, False otherwise\n ' maps1 = sorted(list(mem1.maps)) maps2 = sorted(list(mem2.maps)) if (len(maps1) != len(maps2)): return False ret_val = None for (m1, m2) in zip(maps1, maps2): if (m1 != m2): ret_val = False break bytes1 = m1[m1.start:m1.end] bytes2 = m2[m2.start:m2.end] if (bytes1 != bytes2): ret_val = False break checked_addrs = [] if ((mem1.__class__.__name__ == 'SMemory') and (ret_val is not None)): for (addr1, _) in mem1._symbols.items(): checked_addrs.append(addr1) if (not compare_byte_vals(mem1, mem2, addr1, merged_constraint)): ret_val = False break if ((mem2.__class__.__name__ == 'SMemory') and (ret_val is not None)): for (addr2, _) in mem2._symbols.items(): if (addr2 not in checked_addrs): if (not compare_byte_vals(mem1, mem2, addr2, merged_constraint)): ret_val = False break if (ret_val is not None): return ret_val else: return True
This method compares the number of maps, and then their names, permissions, start, and end values. If they all match, then it compares the concrete byte values for equality. If those match too, it then compares _symbols attribute values for equality if the two memory objects are of type SMemory. :param mem1: one of two memory objects to be compared :param mem2: second of two memory objects to be compared :param merged_constraint: ConstraintSet object that is to be used with `Z3Solver.instance().must_be_true()` calls to check the memory objects for semantic equality :return: True, if the memory objects are equal, False otherwise
manticore/native/state_merging.py
compare_mem
dmgress/manticore
0
python
def compare_mem(mem1, mem2, merged_constraint): '\n This method compares the number of maps, and then their names, permissions, start, and end values.\n If they all match, then it compares the concrete byte values for equality.\n If those match too, it then compares _symbols attribute values for equality if the two memory objects are of\n type SMemory.\n :param mem1: one of two memory objects to be compared\n :param mem2: second of two memory objects to be compared\n :param merged_constraint: ConstraintSet object that is to be used with `Z3Solver.instance().must_be_true()` calls to check the\n memory objects for semantic equality\n :return: True, if the memory objects are equal, False otherwise\n ' maps1 = sorted(list(mem1.maps)) maps2 = sorted(list(mem2.maps)) if (len(maps1) != len(maps2)): return False ret_val = None for (m1, m2) in zip(maps1, maps2): if (m1 != m2): ret_val = False break bytes1 = m1[m1.start:m1.end] bytes2 = m2[m2.start:m2.end] if (bytes1 != bytes2): ret_val = False break checked_addrs = [] if ((mem1.__class__.__name__ == 'SMemory') and (ret_val is not None)): for (addr1, _) in mem1._symbols.items(): checked_addrs.append(addr1) if (not compare_byte_vals(mem1, mem2, addr1, merged_constraint)): ret_val = False break if ((mem2.__class__.__name__ == 'SMemory') and (ret_val is not None)): for (addr2, _) in mem2._symbols.items(): if (addr2 not in checked_addrs): if (not compare_byte_vals(mem1, mem2, addr2, merged_constraint)): ret_val = False break if (ret_val is not None): return ret_val else: return True
def compare_mem(mem1, mem2, merged_constraint): '\n This method compares the number of maps, and then their names, permissions, start, and end values.\n If they all match, then it compares the concrete byte values for equality.\n If those match too, it then compares _symbols attribute values for equality if the two memory objects are of\n type SMemory.\n :param mem1: one of two memory objects to be compared\n :param mem2: second of two memory objects to be compared\n :param merged_constraint: ConstraintSet object that is to be used with `Z3Solver.instance().must_be_true()` calls to check the\n memory objects for semantic equality\n :return: True, if the memory objects are equal, False otherwise\n ' maps1 = sorted(list(mem1.maps)) maps2 = sorted(list(mem2.maps)) if (len(maps1) != len(maps2)): return False ret_val = None for (m1, m2) in zip(maps1, maps2): if (m1 != m2): ret_val = False break bytes1 = m1[m1.start:m1.end] bytes2 = m2[m2.start:m2.end] if (bytes1 != bytes2): ret_val = False break checked_addrs = [] if ((mem1.__class__.__name__ == 'SMemory') and (ret_val is not None)): for (addr1, _) in mem1._symbols.items(): checked_addrs.append(addr1) if (not compare_byte_vals(mem1, mem2, addr1, merged_constraint)): ret_val = False break if ((mem2.__class__.__name__ == 'SMemory') and (ret_val is not None)): for (addr2, _) in mem2._symbols.items(): if (addr2 not in checked_addrs): if (not compare_byte_vals(mem1, mem2, addr2, merged_constraint)): ret_val = False break if (ret_val is not None): return ret_val else: return True<|docstring|>This method compares the number of maps, and then their names, permissions, start, and end values. If they all match, then it compares the concrete byte values for equality. If those match too, it then compares _symbols attribute values for equality if the two memory objects are of type SMemory. :param mem1: one of two memory objects to be compared :param mem2: second of two memory objects to be compared :param merged_constraint: ConstraintSet object that is to be used with `Z3Solver.instance().must_be_true()` calls to check the memory objects for semantic equality :return: True, if the memory objects are equal, False otherwise<|endoftext|>
79b1989fc59aaff6673b41ae861936e752d9a07d7cefdf1ebb8f8a8927d4dcf7
def is_merge_possible(state1, state2, merged_constraint): '\n Checks if a merge is possible by checking Input, Output sockets, symbolic_files, syscall_trace, and memory\n for equality.\n :param state1: one of two possible states we want to check for mergeability\n :param state2: second of two possible states we want to check for mergeability\n :param merged_constraint: ConstraintSet of merged constraints for state1 and state2\n :return: True, if state1 and state2 can be merged, False if otherwise\n ' platform1 = state1.platform platform2 = state2.platform ret_val = None if ((not compare_sockets(merged_constraint, platform1.input, platform2.input)) or (not compare_sockets(merged_constraint, platform1.output, platform2.output))): ret_val = (False, 'inequivalent socket operations') if ((ret_val is None) and (platform1.symbolic_files != platform2.symbolic_files)): ret_val = (False, 'inequivalent symbolic files') if ((ret_val is None) and (len(platform1.syscall_trace) != len(platform2.syscall_trace))): ret_val = (False, 'inequivalent syscall trace lengths') if (ret_val is None): for (i, (name1, fd1, data1)) in enumerate(platform1.syscall_trace): (name2, fd2, data2) = platform2.syscall_trace[i] if (not ((name1 == name2) and (fd1 == fd2) and compare_buffers(merged_constraint, data1, data2))): ret_val = (False, 'inequivalent syscall traces') break if ((ret_val is None) and (not compare_mem(state1.mem, state2.mem, merged_constraint))): ret_val = (False, 'inequivalent memory') if (ret_val is not None): return ret_val else: return (True, None)
Checks if a merge is possible by checking Input, Output sockets, symbolic_files, syscall_trace, and memory for equality. :param state1: one of two possible states we want to check for mergeability :param state2: second of two possible states we want to check for mergeability :param merged_constraint: ConstraintSet of merged constraints for state1 and state2 :return: True, if state1 and state2 can be merged, False if otherwise
manticore/native/state_merging.py
is_merge_possible
dmgress/manticore
0
python
def is_merge_possible(state1, state2, merged_constraint): '\n Checks if a merge is possible by checking Input, Output sockets, symbolic_files, syscall_trace, and memory\n for equality.\n :param state1: one of two possible states we want to check for mergeability\n :param state2: second of two possible states we want to check for mergeability\n :param merged_constraint: ConstraintSet of merged constraints for state1 and state2\n :return: True, if state1 and state2 can be merged, False if otherwise\n ' platform1 = state1.platform platform2 = state2.platform ret_val = None if ((not compare_sockets(merged_constraint, platform1.input, platform2.input)) or (not compare_sockets(merged_constraint, platform1.output, platform2.output))): ret_val = (False, 'inequivalent socket operations') if ((ret_val is None) and (platform1.symbolic_files != platform2.symbolic_files)): ret_val = (False, 'inequivalent symbolic files') if ((ret_val is None) and (len(platform1.syscall_trace) != len(platform2.syscall_trace))): ret_val = (False, 'inequivalent syscall trace lengths') if (ret_val is None): for (i, (name1, fd1, data1)) in enumerate(platform1.syscall_trace): (name2, fd2, data2) = platform2.syscall_trace[i] if (not ((name1 == name2) and (fd1 == fd2) and compare_buffers(merged_constraint, data1, data2))): ret_val = (False, 'inequivalent syscall traces') break if ((ret_val is None) and (not compare_mem(state1.mem, state2.mem, merged_constraint))): ret_val = (False, 'inequivalent memory') if (ret_val is not None): return ret_val else: return (True, None)
def is_merge_possible(state1, state2, merged_constraint): '\n Checks if a merge is possible by checking Input, Output sockets, symbolic_files, syscall_trace, and memory\n for equality.\n :param state1: one of two possible states we want to check for mergeability\n :param state2: second of two possible states we want to check for mergeability\n :param merged_constraint: ConstraintSet of merged constraints for state1 and state2\n :return: True, if state1 and state2 can be merged, False if otherwise\n ' platform1 = state1.platform platform2 = state2.platform ret_val = None if ((not compare_sockets(merged_constraint, platform1.input, platform2.input)) or (not compare_sockets(merged_constraint, platform1.output, platform2.output))): ret_val = (False, 'inequivalent socket operations') if ((ret_val is None) and (platform1.symbolic_files != platform2.symbolic_files)): ret_val = (False, 'inequivalent symbolic files') if ((ret_val is None) and (len(platform1.syscall_trace) != len(platform2.syscall_trace))): ret_val = (False, 'inequivalent syscall trace lengths') if (ret_val is None): for (i, (name1, fd1, data1)) in enumerate(platform1.syscall_trace): (name2, fd2, data2) = platform2.syscall_trace[i] if (not ((name1 == name2) and (fd1 == fd2) and compare_buffers(merged_constraint, data1, data2))): ret_val = (False, 'inequivalent syscall traces') break if ((ret_val is None) and (not compare_mem(state1.mem, state2.mem, merged_constraint))): ret_val = (False, 'inequivalent memory') if (ret_val is not None): return ret_val else: return (True, None)<|docstring|>Checks if a merge is possible by checking Input, Output sockets, symbolic_files, syscall_trace, and memory for equality. :param state1: one of two possible states we want to check for mergeability :param state2: second of two possible states we want to check for mergeability :param merged_constraint: ConstraintSet of merged constraints for state1 and state2 :return: True, if state1 and state2 can be merged, False if otherwise<|endoftext|>
7c2e34078e703da8e7d36d4df4397a61c7653064700b0be4b66e9dbd9fefb885
def merge_cpu(cpu1, cpu2, state, exp1, merged_constraint): '\n Merge CPU objects into the state.CPU\n :param cpu1: one of two CPU objects that we wish to merge\n :param cpu2: second of two CPU objects that we wish to merge\n :param state: the state whose CPU attribute we will be updating\n :param exp1: the expression that if satisfiable will cause the CPU registers to take corresponding values from\n `cpu1`, else they will take corresponding values from `cpu2`\n :param merged_constraint: ConstraintSet under which we would want inequality between CPU register values to be\n satisfiable as checked using `Z3Solver.instance().must_be_true()`\n :return: List of registers that were merged\n ' merged_regs = [] for reg in cpu1.canonical_registers: val1 = cpu1.read_register(reg) val2 = cpu2.read_register(reg) if (isinstance(val1, BitVec) and isinstance(val2, BitVec)): assert (val1.size == val2.size) if (issymbolic(val1) or issymbolic(val2) or (val1 != val2)): if Z3Solver.instance().must_be_true(merged_constraint, (val1 != val2)): merged_regs.append(reg) if (cpu1.regfile.sizeof(reg) == 1): state.cpu.write_register(reg, Operators.ITE(exp1, val1, val2)) else: state.cpu.write_register(reg, Operators.ITEBV(cpu1.regfile.sizeof(reg), exp1, val1, val2)) return merged_regs
Merge CPU objects into the state.CPU :param cpu1: one of two CPU objects that we wish to merge :param cpu2: second of two CPU objects that we wish to merge :param state: the state whose CPU attribute we will be updating :param exp1: the expression that if satisfiable will cause the CPU registers to take corresponding values from `cpu1`, else they will take corresponding values from `cpu2` :param merged_constraint: ConstraintSet under which we would want inequality between CPU register values to be satisfiable as checked using `Z3Solver.instance().must_be_true()` :return: List of registers that were merged
manticore/native/state_merging.py
merge_cpu
dmgress/manticore
0
python
def merge_cpu(cpu1, cpu2, state, exp1, merged_constraint): '\n Merge CPU objects into the state.CPU\n :param cpu1: one of two CPU objects that we wish to merge\n :param cpu2: second of two CPU objects that we wish to merge\n :param state: the state whose CPU attribute we will be updating\n :param exp1: the expression that if satisfiable will cause the CPU registers to take corresponding values from\n `cpu1`, else they will take corresponding values from `cpu2`\n :param merged_constraint: ConstraintSet under which we would want inequality between CPU register values to be\n satisfiable as checked using `Z3Solver.instance().must_be_true()`\n :return: List of registers that were merged\n ' merged_regs = [] for reg in cpu1.canonical_registers: val1 = cpu1.read_register(reg) val2 = cpu2.read_register(reg) if (isinstance(val1, BitVec) and isinstance(val2, BitVec)): assert (val1.size == val2.size) if (issymbolic(val1) or issymbolic(val2) or (val1 != val2)): if Z3Solver.instance().must_be_true(merged_constraint, (val1 != val2)): merged_regs.append(reg) if (cpu1.regfile.sizeof(reg) == 1): state.cpu.write_register(reg, Operators.ITE(exp1, val1, val2)) else: state.cpu.write_register(reg, Operators.ITEBV(cpu1.regfile.sizeof(reg), exp1, val1, val2)) return merged_regs
def merge_cpu(cpu1, cpu2, state, exp1, merged_constraint): '\n Merge CPU objects into the state.CPU\n :param cpu1: one of two CPU objects that we wish to merge\n :param cpu2: second of two CPU objects that we wish to merge\n :param state: the state whose CPU attribute we will be updating\n :param exp1: the expression that if satisfiable will cause the CPU registers to take corresponding values from\n `cpu1`, else they will take corresponding values from `cpu2`\n :param merged_constraint: ConstraintSet under which we would want inequality between CPU register values to be\n satisfiable as checked using `Z3Solver.instance().must_be_true()`\n :return: List of registers that were merged\n ' merged_regs = [] for reg in cpu1.canonical_registers: val1 = cpu1.read_register(reg) val2 = cpu2.read_register(reg) if (isinstance(val1, BitVec) and isinstance(val2, BitVec)): assert (val1.size == val2.size) if (issymbolic(val1) or issymbolic(val2) or (val1 != val2)): if Z3Solver.instance().must_be_true(merged_constraint, (val1 != val2)): merged_regs.append(reg) if (cpu1.regfile.sizeof(reg) == 1): state.cpu.write_register(reg, Operators.ITE(exp1, val1, val2)) else: state.cpu.write_register(reg, Operators.ITEBV(cpu1.regfile.sizeof(reg), exp1, val1, val2)) return merged_regs<|docstring|>Merge CPU objects into the state.CPU :param cpu1: one of two CPU objects that we wish to merge :param cpu2: second of two CPU objects that we wish to merge :param state: the state whose CPU attribute we will be updating :param exp1: the expression that if satisfiable will cause the CPU registers to take corresponding values from `cpu1`, else they will take corresponding values from `cpu2` :param merged_constraint: ConstraintSet under which we would want inequality between CPU register values to be satisfiable as checked using `Z3Solver.instance().must_be_true()` :return: List of registers that were merged<|endoftext|>
ce06dc4c3b7a5d689c52afa82c04a820c20163c1db20d361feddaa412cb75577
def merge(state1, state2, exp1, merged_constraint): '\n Merge state1 and state2 into a single state\n :param state1:\n :param state2:\n :param exp1:\n :param merged_constraint:\n :return: the state that is the result of the merging of `state1` and `state2`\n ' merged_state = state1 merged_reg_list = merge_cpu(state1.cpu, state2.cpu, merged_state, exp1, merged_constraint) print('Merged registers: ') print(*merged_reg_list, sep=',') merged_state.constraints = merged_constraint return merged_state
Merge state1 and state2 into a single state :param state1: :param state2: :param exp1: :param merged_constraint: :return: the state that is the result of the merging of `state1` and `state2`
manticore/native/state_merging.py
merge
dmgress/manticore
0
python
def merge(state1, state2, exp1, merged_constraint): '\n Merge state1 and state2 into a single state\n :param state1:\n :param state2:\n :param exp1:\n :param merged_constraint:\n :return: the state that is the result of the merging of `state1` and `state2`\n ' merged_state = state1 merged_reg_list = merge_cpu(state1.cpu, state2.cpu, merged_state, exp1, merged_constraint) print('Merged registers: ') print(*merged_reg_list, sep=',') merged_state.constraints = merged_constraint return merged_state
def merge(state1, state2, exp1, merged_constraint): '\n Merge state1 and state2 into a single state\n :param state1:\n :param state2:\n :param exp1:\n :param merged_constraint:\n :return: the state that is the result of the merging of `state1` and `state2`\n ' merged_state = state1 merged_reg_list = merge_cpu(state1.cpu, state2.cpu, merged_state, exp1, merged_constraint) print('Merged registers: ') print(*merged_reg_list, sep=',') merged_state.constraints = merged_constraint return merged_state<|docstring|>Merge state1 and state2 into a single state :param state1: :param state2: :param exp1: :param merged_constraint: :return: the state that is the result of the merging of `state1` and `state2`<|endoftext|>
c7d823734a4ed03a03c7333bc8b40d2e3ab30f765899399e20c9b8558b1d7466
def __init__(self, *args, **kwargs): '\n Map widget to plot glacier velocities\n ' self.dct = dctools() self.config = {'plot': 'v', 'min_separation_days': 5, 'max_separation_days': 90, 'color_by': 'points'} self.color_index = 0 self.icon_color_index = 0 self._last_click = None self._initialize_widgets()
Map widget to plot glacier velocities
notebooks/velocity_widget.py
__init__
nasa-jpl/its_live
18
python
def __init__(self, *args, **kwargs): '\n \n ' self.dct = dctools() self.config = {'plot': 'v', 'min_separation_days': 5, 'max_separation_days': 90, 'color_by': 'points'} self.color_index = 0 self.icon_color_index = 0 self._last_click = None self._initialize_widgets()
def __init__(self, *args, **kwargs): '\n \n ' self.dct = dctools() self.config = {'plot': 'v', 'min_separation_days': 5, 'max_separation_days': 90, 'color_by': 'points'} self.color_index = 0 self.icon_color_index = 0 self._last_click = None self._initialize_widgets()<|docstring|>Map widget to plot glacier velocities<|endoftext|>
cb620f170aea52ef1a35e97c76a1e5fa407a8c4ed2b65d6879d5ed902ade5abc
def runningMean(self, mid_dates, variable, minpts, tFreq): '\n mid_dates: center dates of `variable` data [datetime64]\n variable: data to be average\n minpts: minimum number of points needed for a valid value, else filled with nan\n tFreq: the spacing between centered averages in Days, default window size = tFreq*2\n ' tsmin = pd.Timestamp(np.min(mid_dates)) tsmax = pd.Timestamp(np.max(mid_dates)) ts = pd.date_range(start=tsmin, end=tsmax, freq=f'{tFreq}D') ts = pd.to_datetime(ts).values idx0 = (~ np.isnan(variable)) runmean = np.empty([(len(ts) - 1), 1]) runmean[:] = np.nan tsmean = ts[0:(- 1)] t_np = mid_dates.astype(np.int64) for i in range((len(ts) - 1)): idx = (((mid_dates >= (ts[i] - np.timedelta64(int((tFreq / 2)), 'D'))) & (mid_dates < (ts[(i + 1)] + np.timedelta64(int((tFreq / 2)), 'D')))) & idx0) if (sum(idx) >= minpts): runmean[i] = np.mean(variable[idx]) tsmean[i] = np.mean(t_np[idx]) tsmean = pd.to_datetime(tsmean).values return (runmean, tsmean)
mid_dates: center dates of `variable` data [datetime64] variable: data to be average minpts: minimum number of points needed for a valid value, else filled with nan tFreq: the spacing between centered averages in Days, default window size = tFreq*2
notebooks/velocity_widget.py
runningMean
nasa-jpl/its_live
18
python
def runningMean(self, mid_dates, variable, minpts, tFreq): '\n mid_dates: center dates of `variable` data [datetime64]\n variable: data to be average\n minpts: minimum number of points needed for a valid value, else filled with nan\n tFreq: the spacing between centered averages in Days, default window size = tFreq*2\n ' tsmin = pd.Timestamp(np.min(mid_dates)) tsmax = pd.Timestamp(np.max(mid_dates)) ts = pd.date_range(start=tsmin, end=tsmax, freq=f'{tFreq}D') ts = pd.to_datetime(ts).values idx0 = (~ np.isnan(variable)) runmean = np.empty([(len(ts) - 1), 1]) runmean[:] = np.nan tsmean = ts[0:(- 1)] t_np = mid_dates.astype(np.int64) for i in range((len(ts) - 1)): idx = (((mid_dates >= (ts[i] - np.timedelta64(int((tFreq / 2)), 'D'))) & (mid_dates < (ts[(i + 1)] + np.timedelta64(int((tFreq / 2)), 'D')))) & idx0) if (sum(idx) >= minpts): runmean[i] = np.mean(variable[idx]) tsmean[i] = np.mean(t_np[idx]) tsmean = pd.to_datetime(tsmean).values return (runmean, tsmean)
def runningMean(self, mid_dates, variable, minpts, tFreq): '\n mid_dates: center dates of `variable` data [datetime64]\n variable: data to be average\n minpts: minimum number of points needed for a valid value, else filled with nan\n tFreq: the spacing between centered averages in Days, default window size = tFreq*2\n ' tsmin = pd.Timestamp(np.min(mid_dates)) tsmax = pd.Timestamp(np.max(mid_dates)) ts = pd.date_range(start=tsmin, end=tsmax, freq=f'{tFreq}D') ts = pd.to_datetime(ts).values idx0 = (~ np.isnan(variable)) runmean = np.empty([(len(ts) - 1), 1]) runmean[:] = np.nan tsmean = ts[0:(- 1)] t_np = mid_dates.astype(np.int64) for i in range((len(ts) - 1)): idx = (((mid_dates >= (ts[i] - np.timedelta64(int((tFreq / 2)), 'D'))) & (mid_dates < (ts[(i + 1)] + np.timedelta64(int((tFreq / 2)), 'D')))) & idx0) if (sum(idx) >= minpts): runmean[i] = np.mean(variable[idx]) tsmean[i] = np.mean(t_np[idx]) tsmean = pd.to_datetime(tsmean).values return (runmean, tsmean)<|docstring|>mid_dates: center dates of `variable` data [datetime64] variable: data to be average minpts: minimum number of points needed for a valid value, else filled with nan tFreq: the spacing between centered averages in Days, default window size = tFreq*2<|endoftext|>
f77f562c6d30a1e896df5d0eacf148986d36fdf133ae4ecc7875de4f0f3e29da
def removeDuplicates(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' if (len(nums) < 2): return len(nums) previous = 0 for i in range(1, len(nums)): if (nums[i] != nums[previous]): previous += 1 nums[previous] = nums[i] return (previous + 1)
:type nums: List[int] :rtype: int
Misc/026_RemoveDuplicatesFromAnArray.py
removeDuplicates
PsiPhiTheta/LeetCode
1
python
def removeDuplicates(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' if (len(nums) < 2): return len(nums) previous = 0 for i in range(1, len(nums)): if (nums[i] != nums[previous]): previous += 1 nums[previous] = nums[i] return (previous + 1)
def removeDuplicates(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' if (len(nums) < 2): return len(nums) previous = 0 for i in range(1, len(nums)): if (nums[i] != nums[previous]): previous += 1 nums[previous] = nums[i] return (previous + 1)<|docstring|>:type nums: List[int] :rtype: int<|endoftext|>
7f7580ac232ac5bfc56983fab87bae584d6667fb648691952ad3658fb8172e3b
def _not_none_and_len(string: str) -> bool: 'helper to figure out if not none and string is populated' is_str = isinstance(string, str) has_len = (False if (re.match('\\S{5,}', '') is None) else True) status = (True if (has_len and is_str) else False) return status
helper to figure out if not none and string is populated
src/ck_tools/main.py
_not_none_and_len
knu2xs/pdal-explor
0
python
def _not_none_and_len(string: str) -> bool: is_str = isinstance(string, str) has_len = (False if (re.match('\\S{5,}', ) is None) else True) status = (True if (has_len and is_str) else False) return status
def _not_none_and_len(string: str) -> bool: is_str = isinstance(string, str) has_len = (False if (re.match('\\S{5,}', ) is None) else True) status = (True if (has_len and is_str) else False) return status<|docstring|>helper to figure out if not none and string is populated<|endoftext|>
16de9f2fbc848ab7636e18096d74b3a471215b53a9cc7a7aa081df33154262a3
def add_group(gis: GIS=None, group_name: str=None) -> Group: '\n Add a group to the GIS for the project for saving resources.\n\n Args:\n gis: Optional\n arcgis.gis.GIS object instance.\n group_name: Optional\n Group to be added to the cloud GIS for storing project resources. Default\n is to load from the .env file. If a group name is not provided, and one is\n not located in the .env file, an exception will be raised.\n\n Returns: Group\n ' load_dotenv(find_dotenv()) if ((gis is None) and isinstance(active_gis, GIS)): gis = active_gis if ((gis is None) and (not isinstance(active_gis, GIS))): url = os.getenv('ESRI_GIS_URL') usr = os.getenv('ESRI_GIS_USERNAME') pswd = os.getenv('ESRI_GIS_PASSWORD') if (group_name is None): group_name = os.getenv('ESRI_GIS_GROUP') err_msg = 'A group name must either be defined in the .env file or explicitly provided.' assert isinstance(group_name, str), err_msg assert len(group_name), err_msg cmgr = gis.groups assert (len([grp for grp in cmgr.search() if (grp.title.lower() == group_name.lower())]) is 0), f'A group named "{group_name}" already exists. Please select another group name.' grp = cmgr.create(group_name) assert isinstance(grp, Group), 'Failed to create the group in the Cloud GIS.' return grp
Add a group to the GIS for the project for saving resources. Args: gis: Optional arcgis.gis.GIS object instance. group_name: Optional Group to be added to the cloud GIS for storing project resources. Default is to load from the .env file. If a group name is not provided, and one is not located in the .env file, an exception will be raised. Returns: Group
src/ck_tools/main.py
add_group
knu2xs/pdal-explor
0
python
def add_group(gis: GIS=None, group_name: str=None) -> Group: '\n Add a group to the GIS for the project for saving resources.\n\n Args:\n gis: Optional\n arcgis.gis.GIS object instance.\n group_name: Optional\n Group to be added to the cloud GIS for storing project resources. Default\n is to load from the .env file. If a group name is not provided, and one is\n not located in the .env file, an exception will be raised.\n\n Returns: Group\n ' load_dotenv(find_dotenv()) if ((gis is None) and isinstance(active_gis, GIS)): gis = active_gis if ((gis is None) and (not isinstance(active_gis, GIS))): url = os.getenv('ESRI_GIS_URL') usr = os.getenv('ESRI_GIS_USERNAME') pswd = os.getenv('ESRI_GIS_PASSWORD') if (group_name is None): group_name = os.getenv('ESRI_GIS_GROUP') err_msg = 'A group name must either be defined in the .env file or explicitly provided.' assert isinstance(group_name, str), err_msg assert len(group_name), err_msg cmgr = gis.groups assert (len([grp for grp in cmgr.search() if (grp.title.lower() == group_name.lower())]) is 0), f'A group named "{group_name}" already exists. Please select another group name.' grp = cmgr.create(group_name) assert isinstance(grp, Group), 'Failed to create the group in the Cloud GIS.' return grp
def add_group(gis: GIS=None, group_name: str=None) -> Group: '\n Add a group to the GIS for the project for saving resources.\n\n Args:\n gis: Optional\n arcgis.gis.GIS object instance.\n group_name: Optional\n Group to be added to the cloud GIS for storing project resources. Default\n is to load from the .env file. If a group name is not provided, and one is\n not located in the .env file, an exception will be raised.\n\n Returns: Group\n ' load_dotenv(find_dotenv()) if ((gis is None) and isinstance(active_gis, GIS)): gis = active_gis if ((gis is None) and (not isinstance(active_gis, GIS))): url = os.getenv('ESRI_GIS_URL') usr = os.getenv('ESRI_GIS_USERNAME') pswd = os.getenv('ESRI_GIS_PASSWORD') if (group_name is None): group_name = os.getenv('ESRI_GIS_GROUP') err_msg = 'A group name must either be defined in the .env file or explicitly provided.' assert isinstance(group_name, str), err_msg assert len(group_name), err_msg cmgr = gis.groups assert (len([grp for grp in cmgr.search() if (grp.title.lower() == group_name.lower())]) is 0), f'A group named "{group_name}" already exists. Please select another group name.' grp = cmgr.create(group_name) assert isinstance(grp, Group), 'Failed to create the group in the Cloud GIS.' return grp<|docstring|>Add a group to the GIS for the project for saving resources. Args: gis: Optional arcgis.gis.GIS object instance. group_name: Optional Group to be added to the cloud GIS for storing project resources. Default is to load from the .env file. If a group name is not provided, and one is not located in the .env file, an exception will be raised. Returns: Group<|endoftext|>
688db6657438bb4c5be1653725661d6724fa83ca052009dc40d3cdfbb4726e63
def _assert_maps(maps, galaxy): 'Basic checks for a Maps object.' assert (maps is not None) assert (maps.plateifu == galaxy.plateifu) assert (maps.mangaid == galaxy.mangaid) assert (maps.wcs is not None) assert (maps.bintype == galaxy.bintype) assert (len(maps._shape) == len(galaxy.shape)) for ii in range(len(maps._shape)): assert (maps._shape[ii] == galaxy.shape[ii])
Basic checks for a Maps object.
tests/tools/test_maps.py
_assert_maps
karenlmasters/marvin
49
python
def _assert_maps(maps, galaxy): assert (maps is not None) assert (maps.plateifu == galaxy.plateifu) assert (maps.mangaid == galaxy.mangaid) assert (maps.wcs is not None) assert (maps.bintype == galaxy.bintype) assert (len(maps._shape) == len(galaxy.shape)) for ii in range(len(maps._shape)): assert (maps._shape[ii] == galaxy.shape[ii])
def _assert_maps(maps, galaxy): assert (maps is not None) assert (maps.plateifu == galaxy.plateifu) assert (maps.mangaid == galaxy.mangaid) assert (maps.wcs is not None) assert (maps.bintype == galaxy.bintype) assert (len(maps._shape) == len(galaxy.shape)) for ii in range(len(maps._shape)): assert (maps._shape[ii] == galaxy.shape[ii])<|docstring|>Basic checks for a Maps object.<|endoftext|>
161fd4beb20d39aa8676943b8f0c23a8bdb3caf21a79e62bc58c5b18db6311c8
def lockout(func): '\n Decorator which requires that the target instrument be unlocked or the supplied key matches the lock holder\n ' @wraps(func) def inner(driver_id): key = get_from_request('key') locker = page.lock_manager[driver_id] if ((locker is None) or (locker == key)): return func(driver_id) raise Locked({'locked-by': locker}) return inner
Decorator which requires that the target instrument be unlocked or the supplied key matches the lock holder
ooi_instrument_agent/views.py
lockout
oceanobservatories/ooi-instrument-agent
0
python
def lockout(func): '\n \n ' @wraps(func) def inner(driver_id): key = get_from_request('key') locker = page.lock_manager[driver_id] if ((locker is None) or (locker == key)): return func(driver_id) raise Locked({'locked-by': locker}) return inner
def lockout(func): '\n \n ' @wraps(func) def inner(driver_id): key = get_from_request('key') locker = page.lock_manager[driver_id] if ((locker is None) or (locker == key)): return func(driver_id) raise Locked({'locked-by': locker}) return inner<|docstring|>Decorator which requires that the target instrument be unlocked or the supplied key matches the lock holder<|endoftext|>
4ab82a7267b48913d321d77a911fd8e4e8db97925cc8de0949655ae439f5c922
def __init__(self, url, token, name='', verify_ssl=True, lazy=False): '\n Parameters\n ----------\n url : str\n API URL to your REDCap server\n token : str\n API token to your project\n name : str, optional\n name for project\n verify_ssl : boolean, str\n Verify SSL, default True. Can pass path to CA_BUNDLE.\n ' self.token = token self.name = name self.url = url self.verify = verify_ssl self.metadata = None self.redcap_version = None self.field_names = None self.def_field = None self.field_labels = None self.forms = None self.events = None self.arm_nums = None self.arm_names = None self.configured = False if (not lazy): self.configure()
Parameters ---------- url : str API URL to your REDCap server token : str API token to your project name : str, optional name for project verify_ssl : boolean, str Verify SSL, default True. Can pass path to CA_BUNDLE.
redcap/project.py
__init__
sujaypatil96/PyCap
0
python
def __init__(self, url, token, name=, verify_ssl=True, lazy=False): '\n Parameters\n ----------\n url : str\n API URL to your REDCap server\n token : str\n API token to your project\n name : str, optional\n name for project\n verify_ssl : boolean, str\n Verify SSL, default True. Can pass path to CA_BUNDLE.\n ' self.token = token self.name = name self.url = url self.verify = verify_ssl self.metadata = None self.redcap_version = None self.field_names = None self.def_field = None self.field_labels = None self.forms = None self.events = None self.arm_nums = None self.arm_names = None self.configured = False if (not lazy): self.configure()
def __init__(self, url, token, name=, verify_ssl=True, lazy=False): '\n Parameters\n ----------\n url : str\n API URL to your REDCap server\n token : str\n API token to your project\n name : str, optional\n name for project\n verify_ssl : boolean, str\n Verify SSL, default True. Can pass path to CA_BUNDLE.\n ' self.token = token self.name = name self.url = url self.verify = verify_ssl self.metadata = None self.redcap_version = None self.field_names = None self.def_field = None self.field_labels = None self.forms = None self.events = None self.arm_nums = None self.arm_names = None self.configured = False if (not lazy): self.configure()<|docstring|>Parameters ---------- url : str API URL to your REDCap server token : str API token to your project name : str, optional name for project verify_ssl : boolean, str Verify SSL, default True. Can pass path to CA_BUNDLE.<|endoftext|>
fc314251f952f4a9ca50e35eaa993f36a708e803f8e03e5248e57f9a4f97532a
def __md(self): "Return the project's metadata structure" p_l = self.__basepl('metadata') p_l['content'] = 'metadata' return self._call_api(p_l, 'metadata')[0]
Return the project's metadata structure
redcap/project.py
__md
sujaypatil96/PyCap
0
python
def __md(self): p_l = self.__basepl('metadata') p_l['content'] = 'metadata' return self._call_api(p_l, 'metadata')[0]
def __md(self): p_l = self.__basepl('metadata') p_l['content'] = 'metadata' return self._call_api(p_l, 'metadata')[0]<|docstring|>Return the project's metadata structure<|endoftext|>
8f4e437c4662fc677c63e42d114b6d6803cc64fd4dae9f019e8f32c735a379d5
def __basepl(self, content, rec_type='flat', format='json'): 'Return a dictionary which can be used as is or added to for\n payloads' d = {'token': self.token, 'content': content, 'format': format} if (content not in ['metadata', 'file']): d['type'] = rec_type return d
Return a dictionary which can be used as is or added to for payloads
redcap/project.py
__basepl
sujaypatil96/PyCap
0
python
def __basepl(self, content, rec_type='flat', format='json'): 'Return a dictionary which can be used as is or added to for\n payloads' d = {'token': self.token, 'content': content, 'format': format} if (content not in ['metadata', 'file']): d['type'] = rec_type return d
def __basepl(self, content, rec_type='flat', format='json'): 'Return a dictionary which can be used as is or added to for\n payloads' d = {'token': self.token, 'content': content, 'format': format} if (content not in ['metadata', 'file']): d['type'] = rec_type return d<|docstring|>Return a dictionary which can be used as is or added to for payloads<|endoftext|>
ab222c64781b143e1de3eabba51d29a656e47b4fbd4146c472d1033e6f01426c
def is_longitudinal(self): '\n Returns\n -------\n boolean :\n longitudinal status of this project\n ' return ((len(self.events) > 0) and (len(self.arm_nums) > 0) and (len(self.arm_names) > 0))
Returns ------- boolean : longitudinal status of this project
redcap/project.py
is_longitudinal
sujaypatil96/PyCap
0
python
def is_longitudinal(self): '\n Returns\n -------\n boolean :\n longitudinal status of this project\n ' return ((len(self.events) > 0) and (len(self.arm_nums) > 0) and (len(self.arm_names) > 0))
def is_longitudinal(self): '\n Returns\n -------\n boolean :\n longitudinal status of this project\n ' return ((len(self.events) > 0) and (len(self.arm_nums) > 0) and (len(self.arm_names) > 0))<|docstring|>Returns ------- boolean : longitudinal status of this project<|endoftext|>
660cdf0d035e10aef2c2b6b97954cba6541d0248528ae4b2939a7172a8f3267a
def filter_metadata(self, key): "\n Return a list of values for the metadata key from each field\n of the project's metadata.\n\n Parameters\n ----------\n key: str\n A known key in the metadata structure\n\n Returns\n -------\n filtered :\n attribute list from each field\n " filtered = [field[key] for field in self.metadata if (key in field)] if (len(filtered) == 0): raise KeyError('Key not found in metadata') return filtered
Return a list of values for the metadata key from each field of the project's metadata. Parameters ---------- key: str A known key in the metadata structure Returns ------- filtered : attribute list from each field
redcap/project.py
filter_metadata
sujaypatil96/PyCap
0
python
def filter_metadata(self, key): "\n Return a list of values for the metadata key from each field\n of the project's metadata.\n\n Parameters\n ----------\n key: str\n A known key in the metadata structure\n\n Returns\n -------\n filtered :\n attribute list from each field\n " filtered = [field[key] for field in self.metadata if (key in field)] if (len(filtered) == 0): raise KeyError('Key not found in metadata') return filtered
def filter_metadata(self, key): "\n Return a list of values for the metadata key from each field\n of the project's metadata.\n\n Parameters\n ----------\n key: str\n A known key in the metadata structure\n\n Returns\n -------\n filtered :\n attribute list from each field\n " filtered = [field[key] for field in self.metadata if (key in field)] if (len(filtered) == 0): raise KeyError('Key not found in metadata') return filtered<|docstring|>Return a list of values for the metadata key from each field of the project's metadata. Parameters ---------- key: str A known key in the metadata structure Returns ------- filtered : attribute list from each field<|endoftext|>
359dcb9e39a8fb85b55c4ba5a1411a7953223b7fb385983c317972628fffc8c0
def _kwargs(self): 'Private method to build a dict for sending to RCRequest\n\n Other default kwargs to the http library should go here' return {'verify': self.verify}
Private method to build a dict for sending to RCRequest Other default kwargs to the http library should go here
redcap/project.py
_kwargs
sujaypatil96/PyCap
0
python
def _kwargs(self): 'Private method to build a dict for sending to RCRequest\n\n Other default kwargs to the http library should go here' return {'verify': self.verify}
def _kwargs(self): 'Private method to build a dict for sending to RCRequest\n\n Other default kwargs to the http library should go here' return {'verify': self.verify}<|docstring|>Private method to build a dict for sending to RCRequest Other default kwargs to the http library should go here<|endoftext|>
b8e07b00da74a8825bdd561a92bff5c401ffeb8b8ac214e516cbc916886f5404
def export_fem(self, arms=None, format='json', df_kwargs=None): "\n Export the project's form to event mapping\n\n Parameters\n ----------\n arms : list\n Limit exported form event mappings to these arm numbers\n format : (``'json'``), ``'csv'``, ``'xml'``\n Return the form event mappings in native objects,\n csv or xml, ``'df''`` will return a ``pandas.DataFrame``\n df_kwargs : dict\n Passed to pandas.read_csv to control construction of\n returned DataFrame\n\n Returns\n -------\n fem : list, str, ``pandas.DataFrame``\n form-event mapping for the project\n " ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('formEventMapping', format=ret_format) if arms: for (i, value) in enumerate(arms): pl['arms[{}]'.format(i)] = value (response, _) = self._call_api(pl, 'exp_fem') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): return read_csv(StringIO(response)) else: return read_csv(StringIO(response), **df_kwargs)
Export the project's form to event mapping Parameters ---------- arms : list Limit exported form event mappings to these arm numbers format : (``'json'``), ``'csv'``, ``'xml'`` Return the form event mappings in native objects, csv or xml, ``'df''`` will return a ``pandas.DataFrame`` df_kwargs : dict Passed to pandas.read_csv to control construction of returned DataFrame Returns ------- fem : list, str, ``pandas.DataFrame`` form-event mapping for the project
redcap/project.py
export_fem
sujaypatil96/PyCap
0
python
def export_fem(self, arms=None, format='json', df_kwargs=None): "\n Export the project's form to event mapping\n\n Parameters\n ----------\n arms : list\n Limit exported form event mappings to these arm numbers\n format : (``'json'``), ``'csv'``, ``'xml'``\n Return the form event mappings in native objects,\n csv or xml, ``'df`` will return a ``pandas.DataFrame``\n df_kwargs : dict\n Passed to pandas.read_csv to control construction of\n returned DataFrame\n\n Returns\n -------\n fem : list, str, ``pandas.DataFrame``\n form-event mapping for the project\n " ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('formEventMapping', format=ret_format) if arms: for (i, value) in enumerate(arms): pl['arms[{}]'.format(i)] = value (response, _) = self._call_api(pl, 'exp_fem') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): return read_csv(StringIO(response)) else: return read_csv(StringIO(response), **df_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None): "\n Export the project's form to event mapping\n\n Parameters\n ----------\n arms : list\n Limit exported form event mappings to these arm numbers\n format : (``'json'``), ``'csv'``, ``'xml'``\n Return the form event mappings in native objects,\n csv or xml, ``'df`` will return a ``pandas.DataFrame``\n df_kwargs : dict\n Passed to pandas.read_csv to control construction of\n returned DataFrame\n\n Returns\n -------\n fem : list, str, ``pandas.DataFrame``\n form-event mapping for the project\n " ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('formEventMapping', format=ret_format) if arms: for (i, value) in enumerate(arms): pl['arms[{}]'.format(i)] = value (response, _) = self._call_api(pl, 'exp_fem') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): return read_csv(StringIO(response)) else: return read_csv(StringIO(response), **df_kwargs)<|docstring|>Export the project's form to event mapping Parameters ---------- arms : list Limit exported form event mappings to these arm numbers format : (``'json'``), ``'csv'``, ``'xml'`` Return the form event mappings in native objects, csv or xml, ``'df''`` will return a ``pandas.DataFrame`` df_kwargs : dict Passed to pandas.read_csv to control construction of returned DataFrame Returns ------- fem : list, str, ``pandas.DataFrame`` form-event mapping for the project<|endoftext|>
de6fd39c6b1619c45afb25b3f69e96cd66e774447d0773c0ab020ceb8c6ee2fd
def export_metadata(self, fields=None, forms=None, format='json', df_kwargs=None): "\n Export the project's metadata\n\n Parameters\n ----------\n fields : list\n Limit exported metadata to these fields\n forms : list\n Limit exported metadata to these forms\n format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``\n Return the metadata in native objects, csv or xml.\n ``'df'`` will return a ``pandas.DataFrame``.\n df_kwargs : dict\n Passed to ``pandas.read_csv`` to control construction of\n returned DataFrame.\n by default ``{'index_col': 'field_name'}``\n\n Returns\n -------\n metadata : list, str, ``pandas.DataFrame``\n metadata sttructure for the project.\n " ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('metadata', format=ret_format) to_add = [fields, forms] str_add = ['fields', 'forms'] for (key, data) in zip(str_add, to_add): if data: for (i, value) in enumerate(data): pl['{}[{}]'.format(key, i)] = value (response, _) = self._call_api(pl, 'metadata') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): df_kwargs = {'index_col': 'field_name'} return read_csv(StringIO(response), **df_kwargs)
Export the project's metadata Parameters ---------- fields : list Limit exported metadata to these fields forms : list Limit exported metadata to these forms format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Return the metadata in native objects, csv or xml. ``'df'`` will return a ``pandas.DataFrame``. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default ``{'index_col': 'field_name'}`` Returns ------- metadata : list, str, ``pandas.DataFrame`` metadata sttructure for the project.
redcap/project.py
export_metadata
sujaypatil96/PyCap
0
python
def export_metadata(self, fields=None, forms=None, format='json', df_kwargs=None): "\n Export the project's metadata\n\n Parameters\n ----------\n fields : list\n Limit exported metadata to these fields\n forms : list\n Limit exported metadata to these forms\n format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``\n Return the metadata in native objects, csv or xml.\n ``'df'`` will return a ``pandas.DataFrame``.\n df_kwargs : dict\n Passed to ``pandas.read_csv`` to control construction of\n returned DataFrame.\n by default ``{'index_col': 'field_name'}``\n\n Returns\n -------\n metadata : list, str, ``pandas.DataFrame``\n metadata sttructure for the project.\n " ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('metadata', format=ret_format) to_add = [fields, forms] str_add = ['fields', 'forms'] for (key, data) in zip(str_add, to_add): if data: for (i, value) in enumerate(data): pl['{}[{}]'.format(key, i)] = value (response, _) = self._call_api(pl, 'metadata') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): df_kwargs = {'index_col': 'field_name'} return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json', df_kwargs=None): "\n Export the project's metadata\n\n Parameters\n ----------\n fields : list\n Limit exported metadata to these fields\n forms : list\n Limit exported metadata to these forms\n format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``\n Return the metadata in native objects, csv or xml.\n ``'df'`` will return a ``pandas.DataFrame``.\n df_kwargs : dict\n Passed to ``pandas.read_csv`` to control construction of\n returned DataFrame.\n by default ``{'index_col': 'field_name'}``\n\n Returns\n -------\n metadata : list, str, ``pandas.DataFrame``\n metadata sttructure for the project.\n " ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('metadata', format=ret_format) to_add = [fields, forms] str_add = ['fields', 'forms'] for (key, data) in zip(str_add, to_add): if data: for (i, value) in enumerate(data): pl['{}[{}]'.format(key, i)] = value (response, _) = self._call_api(pl, 'metadata') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): df_kwargs = {'index_col': 'field_name'} return read_csv(StringIO(response), **df_kwargs)<|docstring|>Export the project's metadata Parameters ---------- fields : list Limit exported metadata to these fields forms : list Limit exported metadata to these forms format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Return the metadata in native objects, csv or xml. ``'df'`` will return a ``pandas.DataFrame``. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default ``{'index_col': 'field_name'}`` Returns ------- metadata : list, str, ``pandas.DataFrame`` metadata sttructure for the project.<|endoftext|>
bc773bdaa07ba33785c359858270901d8aa2acffcbe28d274033545ca7e23f15
def export_records(self, records=None, fields=None, forms=None, events=None, raw_or_label='raw', event_name='label', format='json', export_survey_fields=False, export_data_access_groups=False, df_kwargs=None, export_checkbox_labels=False, filter_logic=None): '\n Export data from the REDCap project.\n\n Parameters\n ----------\n records : list\n array of record names specifying specific records to export.\n by default, all records are exported\n fields : list\n array of field names specifying specific fields to pull\n by default, all fields are exported\n forms : list\n array of form names to export. If in the web UI, the form\n name has a space in it, replace the space with an underscore\n by default, all forms are exported\n events : list\n an array of unique event names from which to export records\n\n :note: this only applies to longitudinal projects\n raw_or_label : (``\'raw\'``), ``\'label\'``, ``\'both\'``\n export the raw coded values or labels for the options of\n multiple choice fields, or both\n event_name : (``\'label\'``), ``\'unique\'``\n export the unique event name or the event label\n format : (``\'json\'``), ``\'csv\'``, ``\'xml\'``, ``\'df\'``\n Format of returned data. ``\'json\'`` returns json-decoded\n objects while ``\'csv\'`` and ``\'xml\'`` return other formats.\n ``\'df\'`` will attempt to return a ``pandas.DataFrame``.\n export_survey_fields : (``False``), True\n specifies whether or not to export the survey identifier\n field (e.g., "redcap_survey_identifier") or survey timestamp\n fields (e.g., form_name+"_timestamp") when surveys are\n utilized in the project.\n export_data_access_groups : (``False``), ``True``\n specifies whether or not to export the\n ``"redcap_data_access_group"`` field when data access groups\n are utilized in the project.\n\n :note: This flag is only viable if the user whose token is\n being used to make the API request is *not* in a data\n access group. If the user is in a group, then this flag\n will revert to its default value.\n df_kwargs : dict\n Passed to ``pandas.read_csv`` to control construction of\n returned DataFrame.\n by default, ``{\'index_col\': self.def_field}``\n export_checkbox_labels : (``False``), ``True``\n specify whether to export checkbox values as their label on\n export.\n filter_logic : string\n specify the filterLogic to be sent to the API.\n\n Returns\n -------\n data : list, str, ``pandas.DataFrame``\n exported data\n ' ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('record', format=ret_format) fields = self.backfill_fields(fields, forms) keys_to_add = (records, fields, forms, events, raw_or_label, event_name, export_survey_fields, export_data_access_groups, export_checkbox_labels) str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', 'exportCheckboxLabel') for (key, data) in zip(str_keys, keys_to_add): if data: if (key in ('fields', 'records', 'forms', 'events')): for (i, value) in enumerate(data): pl['{}[{}]'.format(key, i)] = value else: pl[key] = data if filter_logic: pl['filterLogic'] = filter_logic (response, _) = self._call_api(pl, 'exp_record') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): if self.is_longitudinal(): df_kwargs = {'index_col': [self.def_field, 'redcap_event_name']} else: df_kwargs = {'index_col': self.def_field} buf = StringIO(response) df = read_csv(buf, **df_kwargs) buf.close() return df
Export data from the REDCap project. Parameters ---------- records : list array of record names specifying specific records to export. by default, all records are exported fields : list array of field names specifying specific fields to pull by default, all fields are exported forms : list array of form names to export. If in the web UI, the form name has a space in it, replace the space with an underscore by default, all forms are exported events : list an array of unique event names from which to export records :note: this only applies to longitudinal projects raw_or_label : (``'raw'``), ``'label'``, ``'both'`` export the raw coded values or labels for the options of multiple choice fields, or both event_name : (``'label'``), ``'unique'`` export the unique event name or the event label format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Format of returned data. ``'json'`` returns json-decoded objects while ``'csv'`` and ``'xml'`` return other formats. ``'df'`` will attempt to return a ``pandas.DataFrame``. export_survey_fields : (``False``), True specifies whether or not to export the survey identifier field (e.g., "redcap_survey_identifier") or survey timestamp fields (e.g., form_name+"_timestamp") when surveys are utilized in the project. export_data_access_groups : (``False``), ``True`` specifies whether or not to export the ``"redcap_data_access_group"`` field when data access groups are utilized in the project. :note: This flag is only viable if the user whose token is being used to make the API request is *not* in a data access group. If the user is in a group, then this flag will revert to its default value. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default, ``{'index_col': self.def_field}`` export_checkbox_labels : (``False``), ``True`` specify whether to export checkbox values as their label on export. filter_logic : string specify the filterLogic to be sent to the API. Returns ------- data : list, str, ``pandas.DataFrame`` exported data
redcap/project.py
export_records
sujaypatil96/PyCap
0
python
def export_records(self, records=None, fields=None, forms=None, events=None, raw_or_label='raw', event_name='label', format='json', export_survey_fields=False, export_data_access_groups=False, df_kwargs=None, export_checkbox_labels=False, filter_logic=None): '\n Export data from the REDCap project.\n\n Parameters\n ----------\n records : list\n array of record names specifying specific records to export.\n by default, all records are exported\n fields : list\n array of field names specifying specific fields to pull\n by default, all fields are exported\n forms : list\n array of form names to export. If in the web UI, the form\n name has a space in it, replace the space with an underscore\n by default, all forms are exported\n events : list\n an array of unique event names from which to export records\n\n :note: this only applies to longitudinal projects\n raw_or_label : (``\'raw\'``), ``\'label\'``, ``\'both\'``\n export the raw coded values or labels for the options of\n multiple choice fields, or both\n event_name : (``\'label\'``), ``\'unique\'``\n export the unique event name or the event label\n format : (``\'json\'``), ``\'csv\'``, ``\'xml\'``, ``\'df\'``\n Format of returned data. ``\'json\'`` returns json-decoded\n objects while ``\'csv\'`` and ``\'xml\'`` return other formats.\n ``\'df\'`` will attempt to return a ``pandas.DataFrame``.\n export_survey_fields : (``False``), True\n specifies whether or not to export the survey identifier\n field (e.g., "redcap_survey_identifier") or survey timestamp\n fields (e.g., form_name+"_timestamp") when surveys are\n utilized in the project.\n export_data_access_groups : (``False``), ``True``\n specifies whether or not to export the\n ``"redcap_data_access_group"`` field when data access groups\n are utilized in the project.\n\n :note: This flag is only viable if the user whose token is\n being used to make the API request is *not* in a data\n access group. If the user is in a group, then this flag\n will revert to its default value.\n df_kwargs : dict\n Passed to ``pandas.read_csv`` to control construction of\n returned DataFrame.\n by default, ``{\'index_col\': self.def_field}``\n export_checkbox_labels : (``False``), ``True``\n specify whether to export checkbox values as their label on\n export.\n filter_logic : string\n specify the filterLogic to be sent to the API.\n\n Returns\n -------\n data : list, str, ``pandas.DataFrame``\n exported data\n ' ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('record', format=ret_format) fields = self.backfill_fields(fields, forms) keys_to_add = (records, fields, forms, events, raw_or_label, event_name, export_survey_fields, export_data_access_groups, export_checkbox_labels) str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', 'exportCheckboxLabel') for (key, data) in zip(str_keys, keys_to_add): if data: if (key in ('fields', 'records', 'forms', 'events')): for (i, value) in enumerate(data): pl['{}[{}]'.format(key, i)] = value else: pl[key] = data if filter_logic: pl['filterLogic'] = filter_logic (response, _) = self._call_api(pl, 'exp_record') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): if self.is_longitudinal(): df_kwargs = {'index_col': [self.def_field, 'redcap_event_name']} else: df_kwargs = {'index_col': self.def_field} buf = StringIO(response) df = read_csv(buf, **df_kwargs) buf.close() return df
def export_records(self, records=None, fields=None, forms=None, events=None, raw_or_label='raw', event_name='label', format='json', export_survey_fields=False, export_data_access_groups=False, df_kwargs=None, export_checkbox_labels=False, filter_logic=None): '\n Export data from the REDCap project.\n\n Parameters\n ----------\n records : list\n array of record names specifying specific records to export.\n by default, all records are exported\n fields : list\n array of field names specifying specific fields to pull\n by default, all fields are exported\n forms : list\n array of form names to export. If in the web UI, the form\n name has a space in it, replace the space with an underscore\n by default, all forms are exported\n events : list\n an array of unique event names from which to export records\n\n :note: this only applies to longitudinal projects\n raw_or_label : (``\'raw\'``), ``\'label\'``, ``\'both\'``\n export the raw coded values or labels for the options of\n multiple choice fields, or both\n event_name : (``\'label\'``), ``\'unique\'``\n export the unique event name or the event label\n format : (``\'json\'``), ``\'csv\'``, ``\'xml\'``, ``\'df\'``\n Format of returned data. ``\'json\'`` returns json-decoded\n objects while ``\'csv\'`` and ``\'xml\'`` return other formats.\n ``\'df\'`` will attempt to return a ``pandas.DataFrame``.\n export_survey_fields : (``False``), True\n specifies whether or not to export the survey identifier\n field (e.g., "redcap_survey_identifier") or survey timestamp\n fields (e.g., form_name+"_timestamp") when surveys are\n utilized in the project.\n export_data_access_groups : (``False``), ``True``\n specifies whether or not to export the\n ``"redcap_data_access_group"`` field when data access groups\n are utilized in the project.\n\n :note: This flag is only viable if the user whose token is\n being used to make the API request is *not* in a data\n access group. If the user is in a group, then this flag\n will revert to its default value.\n df_kwargs : dict\n Passed to ``pandas.read_csv`` to control construction of\n returned DataFrame.\n by default, ``{\'index_col\': self.def_field}``\n export_checkbox_labels : (``False``), ``True``\n specify whether to export checkbox values as their label on\n export.\n filter_logic : string\n specify the filterLogic to be sent to the API.\n\n Returns\n -------\n data : list, str, ``pandas.DataFrame``\n exported data\n ' ret_format = format if (format == 'df'): from pandas import read_csv ret_format = 'csv' pl = self.__basepl('record', format=ret_format) fields = self.backfill_fields(fields, forms) keys_to_add = (records, fields, forms, events, raw_or_label, event_name, export_survey_fields, export_data_access_groups, export_checkbox_labels) str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', 'exportCheckboxLabel') for (key, data) in zip(str_keys, keys_to_add): if data: if (key in ('fields', 'records', 'forms', 'events')): for (i, value) in enumerate(data): pl['{}[{}]'.format(key, i)] = value else: pl[key] = data if filter_logic: pl['filterLogic'] = filter_logic (response, _) = self._call_api(pl, 'exp_record') if (format in ('json', 'csv', 'xml')): return response elif (format == 'df'): if (not df_kwargs): if self.is_longitudinal(): df_kwargs = {'index_col': [self.def_field, 'redcap_event_name']} else: df_kwargs = {'index_col': self.def_field} buf = StringIO(response) df = read_csv(buf, **df_kwargs) buf.close() return df<|docstring|>Export data from the REDCap project. Parameters ---------- records : list array of record names specifying specific records to export. by default, all records are exported fields : list array of field names specifying specific fields to pull by default, all fields are exported forms : list array of form names to export. If in the web UI, the form name has a space in it, replace the space with an underscore by default, all forms are exported events : list an array of unique event names from which to export records :note: this only applies to longitudinal projects raw_or_label : (``'raw'``), ``'label'``, ``'both'`` export the raw coded values or labels for the options of multiple choice fields, or both event_name : (``'label'``), ``'unique'`` export the unique event name or the event label format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Format of returned data. ``'json'`` returns json-decoded objects while ``'csv'`` and ``'xml'`` return other formats. ``'df'`` will attempt to return a ``pandas.DataFrame``. export_survey_fields : (``False``), True specifies whether or not to export the survey identifier field (e.g., "redcap_survey_identifier") or survey timestamp fields (e.g., form_name+"_timestamp") when surveys are utilized in the project. export_data_access_groups : (``False``), ``True`` specifies whether or not to export the ``"redcap_data_access_group"`` field when data access groups are utilized in the project. :note: This flag is only viable if the user whose token is being used to make the API request is *not* in a data access group. If the user is in a group, then this flag will revert to its default value. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default, ``{'index_col': self.def_field}`` export_checkbox_labels : (``False``), ``True`` specify whether to export checkbox values as their label on export. filter_logic : string specify the filterLogic to be sent to the API. Returns ------- data : list, str, ``pandas.DataFrame`` exported data<|endoftext|>
731b1502673baedcda468b317e5c8c71807c5afbe418b832b4eff2fd9b6c391b
def metadata_type(self, field_name): "If the given field_name is validated by REDCap, return it's type" return self.__meta_metadata(field_name, 'text_validation_type_or_show_slider_number')
If the given field_name is validated by REDCap, return it's type
redcap/project.py
metadata_type
sujaypatil96/PyCap
0
python
def metadata_type(self, field_name): return self.__meta_metadata(field_name, 'text_validation_type_or_show_slider_number')
def metadata_type(self, field_name): return self.__meta_metadata(field_name, 'text_validation_type_or_show_slider_number')<|docstring|>If the given field_name is validated by REDCap, return it's type<|endoftext|>
2f4b59f62a0e606b7da3aa0a05fd496833d1f1687ce8da655287176e0e5eb2fd
def __meta_metadata(self, field, key): 'Return the value for key for the field in the metadata' mf = '' try: mf = str([f[key] for f in self.metadata if (f['field_name'] == field)][0]) except IndexError: print(('%s not in metadata field:%s' % (key, field))) return mf else: return mf
Return the value for key for the field in the metadata
redcap/project.py
__meta_metadata
sujaypatil96/PyCap
0
python
def __meta_metadata(self, field, key): mf = try: mf = str([f[key] for f in self.metadata if (f['field_name'] == field)][0]) except IndexError: print(('%s not in metadata field:%s' % (key, field))) return mf else: return mf
def __meta_metadata(self, field, key): mf = try: mf = str([f[key] for f in self.metadata if (f['field_name'] == field)][0]) except IndexError: print(('%s not in metadata field:%s' % (key, field))) return mf else: return mf<|docstring|>Return the value for key for the field in the metadata<|endoftext|>
e189a0ffd049cc91bc70a79dc44628a3c00eee015405158ecefa65082e78113b
def backfill_fields(self, fields, forms): '\n Properly backfill fields to explicitly request specific\n keys. The issue is that >6.X servers *only* return requested fields\n so to improve backwards compatiblity for PyCap clients, add specific fields\n when required.\n\n Parameters\n ----------\n fields: list\n requested fields\n forms: list\n requested forms\n\n Returns\n -------\n new fields, forms\n ' if (forms and (not fields)): new_fields = [self.def_field] elif (fields and (self.def_field not in fields)): new_fields = list(fields) if (self.def_field not in fields): new_fields.append(self.def_field) elif (not fields): new_fields = self.field_names else: new_fields = list(fields) return new_fields
Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms
redcap/project.py
backfill_fields
sujaypatil96/PyCap
0
python
def backfill_fields(self, fields, forms): '\n Properly backfill fields to explicitly request specific\n keys. The issue is that >6.X servers *only* return requested fields\n so to improve backwards compatiblity for PyCap clients, add specific fields\n when required.\n\n Parameters\n ----------\n fields: list\n requested fields\n forms: list\n requested forms\n\n Returns\n -------\n new fields, forms\n ' if (forms and (not fields)): new_fields = [self.def_field] elif (fields and (self.def_field not in fields)): new_fields = list(fields) if (self.def_field not in fields): new_fields.append(self.def_field) elif (not fields): new_fields = self.field_names else: new_fields = list(fields) return new_fields
def backfill_fields(self, fields, forms): '\n Properly backfill fields to explicitly request specific\n keys. The issue is that >6.X servers *only* return requested fields\n so to improve backwards compatiblity for PyCap clients, add specific fields\n when required.\n\n Parameters\n ----------\n fields: list\n requested fields\n forms: list\n requested forms\n\n Returns\n -------\n new fields, forms\n ' if (forms and (not fields)): new_fields = [self.def_field] elif (fields and (self.def_field not in fields)): new_fields = list(fields) if (self.def_field not in fields): new_fields.append(self.def_field) elif (not fields): new_fields = self.field_names else: new_fields = list(fields) return new_fields<|docstring|>Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms<|endoftext|>
239d6aaa4ecdc9a9da8676c62beebb758dba60a198ca7e4c93959f324a0fe93a
def names_labels(self, do_print=False): 'Simple helper function to get all field names and labels ' if do_print: for (name, label) in zip(self.field_names, self.field_labels): print(('%s --> %s' % (str(name), str(label)))) return (self.field_names, self.field_labels)
Simple helper function to get all field names and labels
redcap/project.py
names_labels
sujaypatil96/PyCap
0
python
def names_labels(self, do_print=False): ' ' if do_print: for (name, label) in zip(self.field_names, self.field_labels): print(('%s --> %s' % (str(name), str(label)))) return (self.field_names, self.field_labels)
def names_labels(self, do_print=False): ' ' if do_print: for (name, label) in zip(self.field_names, self.field_labels): print(('%s --> %s' % (str(name), str(label)))) return (self.field_names, self.field_labels)<|docstring|>Simple helper function to get all field names and labels<|endoftext|>
b3cc4984cdc66515708c3a004df457c3e8a2424d38830d39d591f94274cf6b79
def import_records(self, to_import, overwrite='normal', format='json', return_format='json', return_content='count', date_format='YMD', force_auto_number=False): "\n Import data into the RedCap Project\n\n Parameters\n ----------\n to_import : array of dicts, csv/xml string, ``pandas.DataFrame``\n :note:\n If you pass a csv or xml string, you should use the\n ``format`` parameter appropriately.\n :note:\n Keys of the dictionaries should be subset of project's,\n fields, but this isn't a requirement. If you provide keys\n that aren't defined fields, the returned response will\n contain an ``'error'`` key.\n overwrite : ('normal'), 'overwrite'\n ``'overwrite'`` will erase values previously stored in the\n database if not specified in the to_import dictionaries.\n format : ('json'), 'xml', 'csv'\n Format of incoming data. By default, to_import will be json-encoded\n return_format : ('json'), 'csv', 'xml'\n Response format. By default, response will be json-decoded.\n return_content : ('count'), 'ids', 'nothing'\n By default, the response contains a 'count' key with the number of\n records just imported. By specifying 'ids', a list of ids\n imported will be returned. 'nothing' will only return\n the HTTP status code and no message.\n date_format : ('YMD'), 'DMY', 'MDY'\n Describes the formatting of dates. By default, date strings\n are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date\n strings are formatted as 'MM/DD/YYYY' set this parameter as\n 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No\n other formattings are allowed.\n force_auto_number : ('False') Enables automatic assignment of record IDs\n of imported records by REDCap. If this is set to true, and auto-numbering\n for records is enabled for the project, auto-numbering of imported records\n will be enabled.\n\n Returns\n -------\n response : dict, str\n response from REDCap API, json-decoded if ``return_format`` == ``'json'``\n " pl = self.__basepl('record') if hasattr(to_import, 'to_csv'): buf = StringIO() if self.is_longitudinal(): csv_kwargs = {'index_label': [self.def_field, 'redcap_event_name']} else: csv_kwargs = {'index_label': self.def_field} to_import.to_csv(buf, **csv_kwargs) pl['data'] = buf.getvalue() buf.close() format = 'csv' elif (format == 'json'): pl['data'] = json.dumps(to_import, separators=(',', ':')) else: pl['data'] = to_import pl['overwriteBehavior'] = overwrite pl['format'] = format pl['returnFormat'] = return_format pl['returnContent'] = return_content pl['dateFormat'] = date_format pl['forceAutoNumber'] = force_auto_number response = self._call_api(pl, 'imp_record')[0] if ('error' in response): raise RedcapError(str(response)) return response
Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'``
redcap/project.py
import_records
sujaypatil96/PyCap
0
python
def import_records(self, to_import, overwrite='normal', format='json', return_format='json', return_content='count', date_format='YMD', force_auto_number=False): "\n Import data into the RedCap Project\n\n Parameters\n ----------\n to_import : array of dicts, csv/xml string, ``pandas.DataFrame``\n :note:\n If you pass a csv or xml string, you should use the\n ``format`` parameter appropriately.\n :note:\n Keys of the dictionaries should be subset of project's,\n fields, but this isn't a requirement. If you provide keys\n that aren't defined fields, the returned response will\n contain an ``'error'`` key.\n overwrite : ('normal'), 'overwrite'\n ``'overwrite'`` will erase values previously stored in the\n database if not specified in the to_import dictionaries.\n format : ('json'), 'xml', 'csv'\n Format of incoming data. By default, to_import will be json-encoded\n return_format : ('json'), 'csv', 'xml'\n Response format. By default, response will be json-decoded.\n return_content : ('count'), 'ids', 'nothing'\n By default, the response contains a 'count' key with the number of\n records just imported. By specifying 'ids', a list of ids\n imported will be returned. 'nothing' will only return\n the HTTP status code and no message.\n date_format : ('YMD'), 'DMY', 'MDY'\n Describes the formatting of dates. By default, date strings\n are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date\n strings are formatted as 'MM/DD/YYYY' set this parameter as\n 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No\n other formattings are allowed.\n force_auto_number : ('False') Enables automatic assignment of record IDs\n of imported records by REDCap. If this is set to true, and auto-numbering\n for records is enabled for the project, auto-numbering of imported records\n will be enabled.\n\n Returns\n -------\n response : dict, str\n response from REDCap API, json-decoded if ``return_format`` == ``'json'``\n " pl = self.__basepl('record') if hasattr(to_import, 'to_csv'): buf = StringIO() if self.is_longitudinal(): csv_kwargs = {'index_label': [self.def_field, 'redcap_event_name']} else: csv_kwargs = {'index_label': self.def_field} to_import.to_csv(buf, **csv_kwargs) pl['data'] = buf.getvalue() buf.close() format = 'csv' elif (format == 'json'): pl['data'] = json.dumps(to_import, separators=(',', ':')) else: pl['data'] = to_import pl['overwriteBehavior'] = overwrite pl['format'] = format pl['returnFormat'] = return_format pl['returnContent'] = return_content pl['dateFormat'] = date_format pl['forceAutoNumber'] = force_auto_number response = self._call_api(pl, 'imp_record')[0] if ('error' in response): raise RedcapError(str(response)) return response
def import_records(self, to_import, overwrite='normal', format='json', return_format='json', return_content='count', date_format='YMD', force_auto_number=False): "\n Import data into the RedCap Project\n\n Parameters\n ----------\n to_import : array of dicts, csv/xml string, ``pandas.DataFrame``\n :note:\n If you pass a csv or xml string, you should use the\n ``format`` parameter appropriately.\n :note:\n Keys of the dictionaries should be subset of project's,\n fields, but this isn't a requirement. If you provide keys\n that aren't defined fields, the returned response will\n contain an ``'error'`` key.\n overwrite : ('normal'), 'overwrite'\n ``'overwrite'`` will erase values previously stored in the\n database if not specified in the to_import dictionaries.\n format : ('json'), 'xml', 'csv'\n Format of incoming data. By default, to_import will be json-encoded\n return_format : ('json'), 'csv', 'xml'\n Response format. By default, response will be json-decoded.\n return_content : ('count'), 'ids', 'nothing'\n By default, the response contains a 'count' key with the number of\n records just imported. By specifying 'ids', a list of ids\n imported will be returned. 'nothing' will only return\n the HTTP status code and no message.\n date_format : ('YMD'), 'DMY', 'MDY'\n Describes the formatting of dates. By default, date strings\n are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date\n strings are formatted as 'MM/DD/YYYY' set this parameter as\n 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No\n other formattings are allowed.\n force_auto_number : ('False') Enables automatic assignment of record IDs\n of imported records by REDCap. If this is set to true, and auto-numbering\n for records is enabled for the project, auto-numbering of imported records\n will be enabled.\n\n Returns\n -------\n response : dict, str\n response from REDCap API, json-decoded if ``return_format`` == ``'json'``\n " pl = self.__basepl('record') if hasattr(to_import, 'to_csv'): buf = StringIO() if self.is_longitudinal(): csv_kwargs = {'index_label': [self.def_field, 'redcap_event_name']} else: csv_kwargs = {'index_label': self.def_field} to_import.to_csv(buf, **csv_kwargs) pl['data'] = buf.getvalue() buf.close() format = 'csv' elif (format == 'json'): pl['data'] = json.dumps(to_import, separators=(',', ':')) else: pl['data'] = to_import pl['overwriteBehavior'] = overwrite pl['format'] = format pl['returnFormat'] = return_format pl['returnContent'] = return_content pl['dateFormat'] = date_format pl['forceAutoNumber'] = force_auto_number response = self._call_api(pl, 'imp_record')[0] if ('error' in response): raise RedcapError(str(response)) return response<|docstring|>Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'``<|endoftext|>
d61ce7c93c656a6a5c99ee93751f80cea32300af32853022c3759909fe92ea7e
def export_file(self, record, field, event=None, return_format='json'): "\n Export the contents of a file stored for a particular record\n\n Notes\n -----\n Unlike other export methods, this works on a single record.\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name containing the file to be exported.\n event: str\n for longitudinal projects, specify the unique event here\n return_format: ('json'), 'csv', 'xml'\n format of error message\n\n Returns\n -------\n content : bytes\n content of the file\n content_map : dict\n content-type dictionary\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event (content, headers) = self._call_api(pl, 'exp_file') if ('content-type' in headers): splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv in splat if ('=' in kv)] content_map = dict(kv) else: content_map = {} return (content, content_map)
Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary
redcap/project.py
export_file
sujaypatil96/PyCap
0
python
def export_file(self, record, field, event=None, return_format='json'): "\n Export the contents of a file stored for a particular record\n\n Notes\n -----\n Unlike other export methods, this works on a single record.\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name containing the file to be exported.\n event: str\n for longitudinal projects, specify the unique event here\n return_format: ('json'), 'csv', 'xml'\n format of error message\n\n Returns\n -------\n content : bytes\n content of the file\n content_map : dict\n content-type dictionary\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event (content, headers) = self._call_api(pl, 'exp_file') if ('content-type' in headers): splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', )) for kv in splat if ('=' in kv)] content_map = dict(kv) else: content_map = {} return (content, content_map)
def export_file(self, record, field, event=None, return_format='json'): "\n Export the contents of a file stored for a particular record\n\n Notes\n -----\n Unlike other export methods, this works on a single record.\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name containing the file to be exported.\n event: str\n for longitudinal projects, specify the unique event here\n return_format: ('json'), 'csv', 'xml'\n format of error message\n\n Returns\n -------\n content : bytes\n content of the file\n content_map : dict\n content-type dictionary\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event (content, headers) = self._call_api(pl, 'exp_file') if ('content-type' in headers): splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', )) for kv in splat if ('=' in kv)] content_map = dict(kv) else: content_map = {} return (content, content_map)<|docstring|>Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary<|endoftext|>
17910e999576e1c2be709d33a278a4a5d6b0914964d2ce6af5284df0806f61cc
def import_file(self, record, field, fname, fobj, event=None, repeat_instance=None, return_format='json'): "\n Import the contents of a file represented by fobj to a\n particular records field\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name where the file will go\n fname : str\n file name visible in REDCap UI\n fobj : file object\n file object as returned by `open`\n event : str\n for longitudinal projects, specify the unique event here\n repeat_instance : int\n (only for projects with repeating instruments/events)\n The repeat instance number of the repeating event (if longitudinal)\n or the repeating instrument (if classic or longitudinal).\n return_format : ('json'), 'csv', 'xml'\n format of error message\n\n Returns\n -------\n response :\n response from server as specified by ``return_format``\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'import' pl['field'] = field pl['record'] = record if event: pl['event'] = event if repeat_instance: pl['repeat_instance'] = repeat_instance file_kwargs = {'files': {'file': (fname, fobj)}} return self._call_api(pl, 'imp_file', **file_kwargs)[0]
Import the contents of a file represented by fobj to a particular records field Parameters ---------- record : str record ID field : str field name where the file will go fname : str file name visible in REDCap UI fobj : file object file object as returned by `open` event : str for longitudinal projects, specify the unique event here repeat_instance : int (only for projects with repeating instruments/events) The repeat instance number of the repeating event (if longitudinal) or the repeating instrument (if classic or longitudinal). return_format : ('json'), 'csv', 'xml' format of error message Returns ------- response : response from server as specified by ``return_format``
redcap/project.py
import_file
sujaypatil96/PyCap
0
python
def import_file(self, record, field, fname, fobj, event=None, repeat_instance=None, return_format='json'): "\n Import the contents of a file represented by fobj to a\n particular records field\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name where the file will go\n fname : str\n file name visible in REDCap UI\n fobj : file object\n file object as returned by `open`\n event : str\n for longitudinal projects, specify the unique event here\n repeat_instance : int\n (only for projects with repeating instruments/events)\n The repeat instance number of the repeating event (if longitudinal)\n or the repeating instrument (if classic or longitudinal).\n return_format : ('json'), 'csv', 'xml'\n format of error message\n\n Returns\n -------\n response :\n response from server as specified by ``return_format``\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'import' pl['field'] = field pl['record'] = record if event: pl['event'] = event if repeat_instance: pl['repeat_instance'] = repeat_instance file_kwargs = {'files': {'file': (fname, fobj)}} return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def import_file(self, record, field, fname, fobj, event=None, repeat_instance=None, return_format='json'): "\n Import the contents of a file represented by fobj to a\n particular records field\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name where the file will go\n fname : str\n file name visible in REDCap UI\n fobj : file object\n file object as returned by `open`\n event : str\n for longitudinal projects, specify the unique event here\n repeat_instance : int\n (only for projects with repeating instruments/events)\n The repeat instance number of the repeating event (if longitudinal)\n or the repeating instrument (if classic or longitudinal).\n return_format : ('json'), 'csv', 'xml'\n format of error message\n\n Returns\n -------\n response :\n response from server as specified by ``return_format``\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'import' pl['field'] = field pl['record'] = record if event: pl['event'] = event if repeat_instance: pl['repeat_instance'] = repeat_instance file_kwargs = {'files': {'file': (fname, fobj)}} return self._call_api(pl, 'imp_file', **file_kwargs)[0]<|docstring|>Import the contents of a file represented by fobj to a particular records field Parameters ---------- record : str record ID field : str field name where the file will go fname : str file name visible in REDCap UI fobj : file object file object as returned by `open` event : str for longitudinal projects, specify the unique event here repeat_instance : int (only for projects with repeating instruments/events) The repeat instance number of the repeating event (if longitudinal) or the repeating instrument (if classic or longitudinal). return_format : ('json'), 'csv', 'xml' format of error message Returns ------- response : response from server as specified by ``return_format``<|endoftext|>
e4c0e0bc36c588f559026280d585cca56c9ec333f2339755e13ea86bfecd5791
def delete_file(self, record, field, return_format='json', event=None): "\n Delete a file from REDCap\n\n Notes\n -----\n There is no undo button to this.\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name\n return_format : (``'json'``), ``'csv'``, ``'xml'``\n return format for error message\n event : str\n If longitudinal project, event to delete file from\n\n Returns\n -------\n response : dict, str\n response from REDCap after deleting file\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'delete' pl['record'] = record pl['field'] = field if event: pl['event'] = event return self._call_api(pl, 'del_file')[0]
Delete a file from REDCap Notes ----- There is no undo button to this. Parameters ---------- record : str record ID field : str field name return_format : (``'json'``), ``'csv'``, ``'xml'`` return format for error message event : str If longitudinal project, event to delete file from Returns ------- response : dict, str response from REDCap after deleting file
redcap/project.py
delete_file
sujaypatil96/PyCap
0
python
def delete_file(self, record, field, return_format='json', event=None): "\n Delete a file from REDCap\n\n Notes\n -----\n There is no undo button to this.\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name\n return_format : (``'json'``), ``'csv'``, ``'xml'``\n return format for error message\n event : str\n If longitudinal project, event to delete file from\n\n Returns\n -------\n response : dict, str\n response from REDCap after deleting file\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'delete' pl['record'] = record pl['field'] = field if event: pl['event'] = event return self._call_api(pl, 'del_file')[0]
def delete_file(self, record, field, return_format='json', event=None): "\n Delete a file from REDCap\n\n Notes\n -----\n There is no undo button to this.\n\n Parameters\n ----------\n record : str\n record ID\n field : str\n field name\n return_format : (``'json'``), ``'csv'``, ``'xml'``\n return format for error message\n event : str\n If longitudinal project, event to delete file from\n\n Returns\n -------\n response : dict, str\n response from REDCap after deleting file\n " self._check_file_field(field) pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'delete' pl['record'] = record pl['field'] = field if event: pl['event'] = event return self._call_api(pl, 'del_file')[0]<|docstring|>Delete a file from REDCap Notes ----- There is no undo button to this. Parameters ---------- record : str record ID field : str field name return_format : (``'json'``), ``'csv'``, ``'xml'`` return format for error message event : str If longitudinal project, event to delete file from Returns ------- response : dict, str response from REDCap after deleting file<|endoftext|>
69d925ed0e1f6367fef31ff19eedf9b33d21046c65539132b6b62b67ed0c1404
def _check_file_field(self, field): 'Check that field exists and is a file field' is_field = (field in self.field_names) is_file = (self.__meta_metadata(field, 'field_type') == 'file') if (not (is_field and is_file)): msg = ("'%s' is not a field or not a 'file' field" % field) raise ValueError(msg) else: return True
Check that field exists and is a file field
redcap/project.py
_check_file_field
sujaypatil96/PyCap
0
python
def _check_file_field(self, field): is_field = (field in self.field_names) is_file = (self.__meta_metadata(field, 'field_type') == 'file') if (not (is_field and is_file)): msg = ("'%s' is not a field or not a 'file' field" % field) raise ValueError(msg) else: return True
def _check_file_field(self, field): is_field = (field in self.field_names) is_file = (self.__meta_metadata(field, 'field_type') == 'file') if (not (is_field and is_file)): msg = ("'%s' is not a field or not a 'file' field" % field) raise ValueError(msg) else: return True<|docstring|>Check that field exists and is a file field<|endoftext|>
71dd6c119070a8be940c17fa595eba0cd62e60bd661381e762762d25a22be5a5
def export_users(self, format='json'): "\n Export the users of the Project\n\n Notes\n -----\n Each user will have the following keys:\n\n * ``'firstname'`` : User's first name\n * ``'lastname'`` : User's last name\n * ``'email'`` : Email address\n * ``'username'`` : User's username\n * ``'expiration'`` : Project access expiration date\n * ``'data_access_group'`` : data access group ID\n * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)\n * ``'forms'`` : a list of dicts with a single key as the form name and\n value is an integer describing that user's form rights,\n where: 0=no access, 1=view records/responses and edit\n records (survey responses are read-only), 2=read only, and\n 3=edit survey responses,\n\n\n Parameters\n ----------\n format : (``'json'``), ``'csv'``, ``'xml'``\n response return format\n\n Returns\n -------\n users: list, str\n list of users dicts when ``'format'='json'``,\n otherwise a string\n " pl = self.__basepl(content='user', format=format) return self._call_api(pl, 'exp_user')[0]
Export the users of the Project Notes ----- Each user will have the following keys: * ``'firstname'`` : User's first name * ``'lastname'`` : User's last name * ``'email'`` : Email address * ``'username'`` : User's username * ``'expiration'`` : Project access expiration date * ``'data_access_group'`` : data access group ID * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set) * ``'forms'`` : a list of dicts with a single key as the form name and value is an integer describing that user's form rights, where: 0=no access, 1=view records/responses and edit records (survey responses are read-only), 2=read only, and 3=edit survey responses, Parameters ---------- format : (``'json'``), ``'csv'``, ``'xml'`` response return format Returns ------- users: list, str list of users dicts when ``'format'='json'``, otherwise a string
redcap/project.py
export_users
sujaypatil96/PyCap
0
python
def export_users(self, format='json'): "\n Export the users of the Project\n\n Notes\n -----\n Each user will have the following keys:\n\n * ``'firstname'`` : User's first name\n * ``'lastname'`` : User's last name\n * ``'email'`` : Email address\n * ``'username'`` : User's username\n * ``'expiration'`` : Project access expiration date\n * ``'data_access_group'`` : data access group ID\n * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)\n * ``'forms'`` : a list of dicts with a single key as the form name and\n value is an integer describing that user's form rights,\n where: 0=no access, 1=view records/responses and edit\n records (survey responses are read-only), 2=read only, and\n 3=edit survey responses,\n\n\n Parameters\n ----------\n format : (``'json'``), ``'csv'``, ``'xml'``\n response return format\n\n Returns\n -------\n users: list, str\n list of users dicts when ``'format'='json'``,\n otherwise a string\n " pl = self.__basepl(content='user', format=format) return self._call_api(pl, 'exp_user')[0]
def export_users(self, format='json'): "\n Export the users of the Project\n\n Notes\n -----\n Each user will have the following keys:\n\n * ``'firstname'`` : User's first name\n * ``'lastname'`` : User's last name\n * ``'email'`` : Email address\n * ``'username'`` : User's username\n * ``'expiration'`` : Project access expiration date\n * ``'data_access_group'`` : data access group ID\n * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)\n * ``'forms'`` : a list of dicts with a single key as the form name and\n value is an integer describing that user's form rights,\n where: 0=no access, 1=view records/responses and edit\n records (survey responses are read-only), 2=read only, and\n 3=edit survey responses,\n\n\n Parameters\n ----------\n format : (``'json'``), ``'csv'``, ``'xml'``\n response return format\n\n Returns\n -------\n users: list, str\n list of users dicts when ``'format'='json'``,\n otherwise a string\n " pl = self.__basepl(content='user', format=format) return self._call_api(pl, 'exp_user')[0]<|docstring|>Export the users of the Project Notes ----- Each user will have the following keys: * ``'firstname'`` : User's first name * ``'lastname'`` : User's last name * ``'email'`` : Email address * ``'username'`` : User's username * ``'expiration'`` : Project access expiration date * ``'data_access_group'`` : data access group ID * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set) * ``'forms'`` : a list of dicts with a single key as the form name and value is an integer describing that user's form rights, where: 0=no access, 1=view records/responses and edit records (survey responses are read-only), 2=read only, and 3=edit survey responses, Parameters ---------- format : (``'json'``), ``'csv'``, ``'xml'`` response return format Returns ------- users: list, str list of users dicts when ``'format'='json'``, otherwise a string<|endoftext|>
03819c78f3fe06353cbb12c5cb6b06787f6291a0ef0661d67047ba5ea889de4f
def export_survey_participant_list(self, instrument, event=None, format='json'): '\n Export the Survey Participant List\n\n Notes\n -----\n The passed instrument must be set up as a survey instrument.\n\n Parameters\n ----------\n instrument: str\n Name of instrument as seen in second column of Data Dictionary.\n event: str\n Unique event name, only used in longitudinal projects\n format: (json, xml, csv), json by default\n Format of returned data\n ' pl = self.__basepl(content='participantList', format=format) pl['instrument'] = instrument if event: pl['event'] = event return self._call_api(pl, 'exp_survey_participant_list')
Export the Survey Participant List Notes ----- The passed instrument must be set up as a survey instrument. Parameters ---------- instrument: str Name of instrument as seen in second column of Data Dictionary. event: str Unique event name, only used in longitudinal projects format: (json, xml, csv), json by default Format of returned data
redcap/project.py
export_survey_participant_list
sujaypatil96/PyCap
0
python
def export_survey_participant_list(self, instrument, event=None, format='json'): '\n Export the Survey Participant List\n\n Notes\n -----\n The passed instrument must be set up as a survey instrument.\n\n Parameters\n ----------\n instrument: str\n Name of instrument as seen in second column of Data Dictionary.\n event: str\n Unique event name, only used in longitudinal projects\n format: (json, xml, csv), json by default\n Format of returned data\n ' pl = self.__basepl(content='participantList', format=format) pl['instrument'] = instrument if event: pl['event'] = event return self._call_api(pl, 'exp_survey_participant_list')
def export_survey_participant_list(self, instrument, event=None, format='json'): '\n Export the Survey Participant List\n\n Notes\n -----\n The passed instrument must be set up as a survey instrument.\n\n Parameters\n ----------\n instrument: str\n Name of instrument as seen in second column of Data Dictionary.\n event: str\n Unique event name, only used in longitudinal projects\n format: (json, xml, csv), json by default\n Format of returned data\n ' pl = self.__basepl(content='participantList', format=format) pl['instrument'] = instrument if event: pl['event'] = event return self._call_api(pl, 'exp_survey_participant_list')<|docstring|>Export the Survey Participant List Notes ----- The passed instrument must be set up as a survey instrument. Parameters ---------- instrument: str Name of instrument as seen in second column of Data Dictionary. event: str Unique event name, only used in longitudinal projects format: (json, xml, csv), json by default Format of returned data<|endoftext|>
a9e75ef545e9be0efc6c20005bf3e70b6acd16af449ef40f3b03dbb2cb0d944d
def export_project_info(self, format='json'): '\n Export Project Information\n\n Parameters\n ----------\n format: (json, xml, csv), json by default\n Format of returned data\n ' pl = self.__basepl(content='project', format=format) return self._call_api(pl, 'exp_proj')[0]
Export Project Information Parameters ---------- format: (json, xml, csv), json by default Format of returned data
redcap/project.py
export_project_info
sujaypatil96/PyCap
0
python
def export_project_info(self, format='json'): '\n Export Project Information\n\n Parameters\n ----------\n format: (json, xml, csv), json by default\n Format of returned data\n ' pl = self.__basepl(content='project', format=format) return self._call_api(pl, 'exp_proj')[0]
def export_project_info(self, format='json'): '\n Export Project Information\n\n Parameters\n ----------\n format: (json, xml, csv), json by default\n Format of returned data\n ' pl = self.__basepl(content='project', format=format) return self._call_api(pl, 'exp_proj')[0]<|docstring|>Export Project Information Parameters ---------- format: (json, xml, csv), json by default Format of returned data<|endoftext|>
8645630116d3b93a23dc704971a58ab1c0ffc9e3361e5023ee6500fb3b3b9d41
def add_evaluation_detections_1(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Genertes list of detections using GT and adds FP bboxes for all GT bboxes \n \n Scores are assigned using the AVERAGE CLASS SCORE + uniform random noise \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE verbose = False (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 cls_avg_score = class_pred_stats['avg'][class_id] orig_tp_scores = np.full(score_sz, cls_avg_score) orig_fp_scores = np.full(score_sz, cls_avg_score) fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d} class avg score: {:.4f} GT boxes : {:3d}'.format(class_id, cls_avg_score, gt_bbox_count)) print(' --------------------------------------------------------------------------------------') print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)
Use GT bboxes and a series of False Postivies as detections to pass on to FCN Genertes list of detections using GT and adds FP bboxes for all GT bboxes Scores are assigned using the AVERAGE CLASS SCORE + uniform random noise Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations
mrcnn/detect_eval_layer.py
add_evaluation_detections_1
kbardool/mrcnn3
2
python
def add_evaluation_detections_1(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Genertes list of detections using GT and adds FP bboxes for all GT bboxes \n \n Scores are assigned using the AVERAGE CLASS SCORE + uniform random noise \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE verbose = False (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 cls_avg_score = class_pred_stats['avg'][class_id] orig_tp_scores = np.full(score_sz, cls_avg_score) orig_fp_scores = np.full(score_sz, cls_avg_score) fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d} class avg score: {:.4f} GT boxes : {:3d}'.format(class_id, cls_avg_score, gt_bbox_count)) print(' --------------------------------------------------------------------------------------') print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)
def add_evaluation_detections_1(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Genertes list of detections using GT and adds FP bboxes for all GT bboxes \n \n Scores are assigned using the AVERAGE CLASS SCORE + uniform random noise \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE verbose = False (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 cls_avg_score = class_pred_stats['avg'][class_id] orig_tp_scores = np.full(score_sz, cls_avg_score) orig_fp_scores = np.full(score_sz, cls_avg_score) fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d} class avg score: {:.4f} GT boxes : {:3d}'.format(class_id, cls_avg_score, gt_bbox_count)) print(' --------------------------------------------------------------------------------------') print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)<|docstring|>Use GT bboxes and a series of False Postivies as detections to pass on to FCN Genertes list of detections using GT and adds FP bboxes for all GT bboxes Scores are assigned using the AVERAGE CLASS SCORE + uniform random noise Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations<|endoftext|>
31b2024ea523151ed8f06e61934415717ef643cd48882de15714da66aa6c0bfa
def add_evaluation_detections_2(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Generates list of detections using GT and adds FP bboxes for all GT bboxes \n\n Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise \n GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd quantile \n recieving the majority when there an odd number of GT boxes:\n \n Difference with add_evaluation_detections_3:\n \n In ODD number of bounding boxes, Q3 receives the lesser half of boxes\n ---------------------------------------------------------------------\n q3_count = gt_bbox_count // 2 \n q1_count = gt_bbox_count - q3_count \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 q3_count = (gt_bbox_count // 2) q1_count = (gt_bbox_count - q3_count) q1_score = class_pred_stats['pct'][class_id][0] q3_score = class_pred_stats['pct'][class_id][2] orig_tp_scores = np.zeros(score_sz) orig_fp_scores = np.zeros(score_sz) orig_tp_scores[:q3_count] = q3_score orig_tp_scores[q3_count:] = q1_score orig_fp_scores[:q3_count] = q3_score orig_fp_scores[q3_count:] = q1_score fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d}'.format(class_id)) print(' --------------------------------------------------------------------------------------') print(' GT boxes : {:3d} q3 count/score: {:3d} / {:.4f} q1_count/score: {:3d} / {:.4f} '.format(gt_bbox_count, q3_count, q3_score, q1_count, q1_score)) print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)
Use GT bboxes and a series of False Postivies as detections to pass on to FCN Generates list of detections using GT and adds FP bboxes for all GT bboxes Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd quantile recieving the majority when there an odd number of GT boxes: Difference with add_evaluation_detections_3: In ODD number of bounding boxes, Q3 receives the lesser half of boxes --------------------------------------------------------------------- q3_count = gt_bbox_count // 2 q1_count = gt_bbox_count - q3_count Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations
mrcnn/detect_eval_layer.py
add_evaluation_detections_2
kbardool/mrcnn3
2
python
def add_evaluation_detections_2(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Generates list of detections using GT and adds FP bboxes for all GT bboxes \n\n Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise \n GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd quantile \n recieving the majority when there an odd number of GT boxes:\n \n Difference with add_evaluation_detections_3:\n \n In ODD number of bounding boxes, Q3 receives the lesser half of boxes\n ---------------------------------------------------------------------\n q3_count = gt_bbox_count // 2 \n q1_count = gt_bbox_count - q3_count \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 q3_count = (gt_bbox_count // 2) q1_count = (gt_bbox_count - q3_count) q1_score = class_pred_stats['pct'][class_id][0] q3_score = class_pred_stats['pct'][class_id][2] orig_tp_scores = np.zeros(score_sz) orig_fp_scores = np.zeros(score_sz) orig_tp_scores[:q3_count] = q3_score orig_tp_scores[q3_count:] = q1_score orig_fp_scores[:q3_count] = q3_score orig_fp_scores[q3_count:] = q1_score fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d}'.format(class_id)) print(' --------------------------------------------------------------------------------------') print(' GT boxes : {:3d} q3 count/score: {:3d} / {:.4f} q1_count/score: {:3d} / {:.4f} '.format(gt_bbox_count, q3_count, q3_score, q1_count, q1_score)) print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)
def add_evaluation_detections_2(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Generates list of detections using GT and adds FP bboxes for all GT bboxes \n\n Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise \n GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd quantile \n recieving the majority when there an odd number of GT boxes:\n \n Difference with add_evaluation_detections_3:\n \n In ODD number of bounding boxes, Q3 receives the lesser half of boxes\n ---------------------------------------------------------------------\n q3_count = gt_bbox_count // 2 \n q1_count = gt_bbox_count - q3_count \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 q3_count = (gt_bbox_count // 2) q1_count = (gt_bbox_count - q3_count) q1_score = class_pred_stats['pct'][class_id][0] q3_score = class_pred_stats['pct'][class_id][2] orig_tp_scores = np.zeros(score_sz) orig_fp_scores = np.zeros(score_sz) orig_tp_scores[:q3_count] = q3_score orig_tp_scores[q3_count:] = q1_score orig_fp_scores[:q3_count] = q3_score orig_fp_scores[q3_count:] = q1_score fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d}'.format(class_id)) print(' --------------------------------------------------------------------------------------') print(' GT boxes : {:3d} q3 count/score: {:3d} / {:.4f} q1_count/score: {:3d} / {:.4f} '.format(gt_bbox_count, q3_count, q3_score, q1_count, q1_score)) print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)<|docstring|>Use GT bboxes and a series of False Postivies as detections to pass on to FCN Generates list of detections using GT and adds FP bboxes for all GT bboxes Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd quantile recieving the majority when there an odd number of GT boxes: Difference with add_evaluation_detections_3: In ODD number of bounding boxes, Q3 receives the lesser half of boxes --------------------------------------------------------------------- q3_count = gt_bbox_count // 2 q1_count = gt_bbox_count - q3_count Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations<|endoftext|>
766e707bad892b1855136e8dc4c0a9f7a8c6219eac25a67a76da91b5ec9934ad
def add_evaluation_detections_2b(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Generates list of detections using GT and adds FP bboxes for all GT bboxes \n\n Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise \n GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd Q getting\n majority when there an odd number of GT boxes:\n \n Difference with add_evaluation_detections_2:\n \n In ODD number of bounding boxes, Q1 receives the lesser half of boxes\n ---------------------------------------------------------------------\n q1_count = gt_bbox_count // 2 \n q3_count = gt_bbox_count - q1_count \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 q1_count = (gt_bbox_count // 2) q3_count = (gt_bbox_count - q1_count) q1_score = class_pred_stats['pct'][class_id][0] q3_score = class_pred_stats['pct'][class_id][2] orig_tp_scores = np.zeros(score_sz) orig_fp_scores = np.zeros(score_sz) orig_tp_scores[:q3_count] = q3_score orig_tp_scores[q3_count:] = q1_score orig_fp_scores[:q3_count] = q3_score orig_fp_scores[q3_count:] = q1_score fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d}'.format(class_id)) print(' --------------------------------------------------------------------------------------') print(' GT boxes : {:3d} q3 count/score: {:3d} / {:.4f} q1_count/score: {:3d} / {:.4f} '.format(gt_bbox_count, q3_count, q3_score, q1_count, q1_score)) print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)
Use GT bboxes and a series of False Postivies as detections to pass on to FCN Generates list of detections using GT and adds FP bboxes for all GT bboxes Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd Q getting majority when there an odd number of GT boxes: Difference with add_evaluation_detections_2: In ODD number of bounding boxes, Q1 receives the lesser half of boxes --------------------------------------------------------------------- q1_count = gt_bbox_count // 2 q3_count = gt_bbox_count - q1_count Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations
mrcnn/detect_eval_layer.py
add_evaluation_detections_2b
kbardool/mrcnn3
2
python
def add_evaluation_detections_2b(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Generates list of detections using GT and adds FP bboxes for all GT bboxes \n\n Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise \n GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd Q getting\n majority when there an odd number of GT boxes:\n \n Difference with add_evaluation_detections_2:\n \n In ODD number of bounding boxes, Q1 receives the lesser half of boxes\n ---------------------------------------------------------------------\n q1_count = gt_bbox_count // 2 \n q3_count = gt_bbox_count - q1_count \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 q1_count = (gt_bbox_count // 2) q3_count = (gt_bbox_count - q1_count) q1_score = class_pred_stats['pct'][class_id][0] q3_score = class_pred_stats['pct'][class_id][2] orig_tp_scores = np.zeros(score_sz) orig_fp_scores = np.zeros(score_sz) orig_tp_scores[:q3_count] = q3_score orig_tp_scores[q3_count:] = q1_score orig_fp_scores[:q3_count] = q3_score orig_fp_scores[q3_count:] = q1_score fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d}'.format(class_id)) print(' --------------------------------------------------------------------------------------') print(' GT boxes : {:3d} q3 count/score: {:3d} / {:.4f} q1_count/score: {:3d} / {:.4f} '.format(gt_bbox_count, q3_count, q3_score, q1_count, q1_score)) print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)
def add_evaluation_detections_2b(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes and a series of False Postivies as detections to pass on to FCN\n Generates list of detections using GT and adds FP bboxes for all GT bboxes \n\n Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise \n GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd Q getting\n majority when there an odd number of GT boxes:\n \n Difference with add_evaluation_detections_2:\n \n In ODD number of bounding boxes, Q1 receives the lesser half of boxes\n ---------------------------------------------------------------------\n q1_count = gt_bbox_count // 2 \n q3_count = gt_bbox_count - q1_count \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] fp_bboxes = flip_bbox(tp_bboxes, (height, width), flip_x=True, flip_y=True) gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 q1_count = (gt_bbox_count // 2) q3_count = (gt_bbox_count - q1_count) q1_score = class_pred_stats['pct'][class_id][0] q3_score = class_pred_stats['pct'][class_id][2] orig_tp_scores = np.zeros(score_sz) orig_fp_scores = np.zeros(score_sz) orig_tp_scores[:q3_count] = q3_score orig_tp_scores[q3_count:] = q1_score orig_fp_scores[:q3_count] = q3_score orig_fp_scores[q3_count:] = q1_score fp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_noise = np.round(np.random.uniform(low=(- scale), high=scale, size=score_sz), 4) tp_scores = (orig_tp_scores + tp_noise) fp_scores = (orig_fp_scores + fp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) fp_scores = np.clip(fp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d}'.format(class_id)) print(' --------------------------------------------------------------------------------------') print(' GT boxes : {:3d} q3 count/score: {:3d} / {:.4f} q1_count/score: {:3d} / {:.4f} '.format(gt_bbox_count, q3_count, q3_score, q1_count, q1_score)) print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) print('\n FP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(fp_bboxes, orig_fp_scores, fp_noise, fp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) large_ious = np.where((overlaps > 0.8))[0] if (len(large_ious) > 0): if verbose: print('Large IOUS encountered!!!! :', large_ious) print('---------------------------------------------') print('class id :', class_id, 'class prediction avg:', cls_tp_score, cls_fp_score) print('---------------------------------------------') print(' pre adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print('---------------------------------------------') print(overlaps, '\n') print('fp_bboxes before adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) fp_bboxes[large_ious] = np.rint((fp_bboxes[large_ious].astype(np.float) * 0.85)) overlaps = compute_2D_iou(tp_bboxes, fp_bboxes) if verbose: print('fp_bboxes after adjustment ', fp_bboxes.shape) print(fp_bboxes[large_ious]) print(' post adjustment overlaps ', overlaps.shape, ' max: ', overlaps.max()) print(' ---------------------------------------------') print(overlaps, '\n') m_overlap = overlaps.max() max_overlap = (m_overlap if (m_overlap > max_overlap) else max_overlap) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) class_fp = np.concatenate([fp_bboxes, classes, fp_scores, ((- 1) * tp_ind)], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp, class_fp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes + fp boxes with noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes + fp_boxes with noise - After Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) print('\n MAX OVERLAP {:.5f}'.format(max_overlap)) return (mod_detections[top_ids], max_overlap)<|docstring|>Use GT bboxes and a series of False Postivies as detections to pass on to FCN Generates list of detections using GT and adds FP bboxes for all GT bboxes Scores are assigned using the 1st/3rd CLASS SCORE QUANTILES + uniform random noise GT and FP boxes are evenly split between 1st/3rd quantiles, with the 3rd Q getting majority when there an odd number of GT boxes: Difference with add_evaluation_detections_2: In ODD number of bounding boxes, Q1 receives the lesser half of boxes --------------------------------------------------------------------- q1_count = gt_bbox_count // 2 q3_count = gt_bbox_count - q1_count Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] After inserting false positives M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations<|endoftext|>
e38deaa4c0a66999b895b033012247bb167c53077403e0c28da8a7aff068f080
def add_evaluation_detections_3(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes as detections to pass on to FCN as detections. Scores are assigned \n using the AVERAGE CLASS SCORE \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 cls_avg_score = class_pred_stats['avg'][class_id] orig_tp_scores = np.full(score_sz, 0.5) tp_noise = np.zeros(score_sz) tp_scores = (orig_tp_scores + tp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d} class avg score: {:.4f} GT boxes : {:3d}'.format(class_id, cls_avg_score, gt_bbox_count)) print(' --------------------------------------------------------------------------------------') print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes with additive noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes with additive noise, after Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) return (mod_detections[top_ids], max_overlap)
Use GT bboxes as detections to pass on to FCN as detections. Scores are assigned using the AVERAGE CLASS SCORE Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations
mrcnn/detect_eval_layer.py
add_evaluation_detections_3
kbardool/mrcnn3
2
python
def add_evaluation_detections_3(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes as detections to pass on to FCN as detections. Scores are assigned \n using the AVERAGE CLASS SCORE \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 cls_avg_score = class_pred_stats['avg'][class_id] orig_tp_scores = np.full(score_sz, 0.5) tp_noise = np.zeros(score_sz) tp_scores = (orig_tp_scores + tp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d} class avg score: {:.4f} GT boxes : {:3d}'.format(class_id, cls_avg_score, gt_bbox_count)) print(' --------------------------------------------------------------------------------------') print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes with additive noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes with additive noise, after Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) return (mod_detections[top_ids], max_overlap)
def add_evaluation_detections_3(gt_class_ids, gt_bboxes, config, class_pred_stats): '\n Use GT bboxes as detections to pass on to FCN as detections. Scores are assigned \n using the AVERAGE CLASS SCORE \n \n Inputs:\n ------\n gt_class_ids: \n gt_bboxes: \n config\n \n Returns:\n --------\n detections [M, (y1, x1, y2, x2, class_id, score)] \n M = N * 2 (N: number of GT annotations)\n Number of FP insertions is equal to number of GT annotations\n ' verbose = config.VERBOSE (height, width) = config.IMAGE_SHAPE[:2] gt_nz_idxs = np.where((gt_class_ids > 0))[0] gt_nz_class_ids = gt_class_ids[gt_nz_idxs] gt_nz_bboxes = gt_bboxes[gt_nz_idxs] mod_detections = np.empty((0, 7)) max_overlap = 0 for class_id in np.unique(gt_nz_class_ids): gt_class_ixs = np.where((gt_nz_class_ids == class_id))[0] tp_bboxes = gt_nz_bboxes[gt_class_ixs] gt_bbox_count = gt_class_ixs.shape[0] score_sz = (gt_bbox_count, 1) scale = 0.001 cls_avg_score = class_pred_stats['avg'][class_id] orig_tp_scores = np.full(score_sz, 0.5) tp_noise = np.zeros(score_sz) tp_scores = (orig_tp_scores + tp_noise) tp_scores = np.clip(tp_scores, 0.0, 1.0) if verbose: print() print(' --------------------------------------------------------------------------------------') print(' class id : {:3d} class avg score: {:.4f} GT boxes : {:3d}'.format(class_id, cls_avg_score, gt_bbox_count)) print(' --------------------------------------------------------------------------------------') print('\n TP Boxes: \t box \t\t\t orig_score \t noise \t\t new_score') print(' ', ('-' * 80)) for (i, (box, orig_score, noise, new_score)) in enumerate(zip(tp_bboxes, orig_tp_scores, tp_noise, tp_scores)): print(' ', i, ' \t', box, '\t\t', orig_score, '\t', noise, '\t', new_score) classes = np.expand_dims(gt_nz_class_ids[gt_class_ixs], axis=(- 1)) tp_ind = np.ones((gt_class_ixs.shape[0], 1)) class_tp = np.concatenate([gt_nz_bboxes[gt_class_ixs], classes, tp_scores, tp_ind], axis=(- 1)) mod_detections = np.vstack([mod_detections, class_tp]) roi_count = config.DETECTION_MAX_INSTANCES top_ids = np.argsort(mod_detections[(:, 5)])[::(- 1)][:roi_count] if verbose: print() print('GT boxes with additive noise, prior to sort:', mod_detections.shape) print(('-' * 75)) print(mod_detections) print() print('GT boxes with additive noise, after Sort (Final):', mod_detections.shape) print(('-' * 75)) print(mod_detections[top_ids]) return (mod_detections[top_ids], max_overlap)<|docstring|>Use GT bboxes as detections to pass on to FCN as detections. Scores are assigned using the AVERAGE CLASS SCORE Inputs: ------ gt_class_ids: gt_bboxes: config Returns: -------- detections [M, (y1, x1, y2, x2, class_id, score)] M = N * 2 (N: number of GT annotations) Number of FP insertions is equal to number of GT annotations<|endoftext|>
53c0482152bfc50e252817afe4cbc020dfd97141a86e2506cbeb8519e6610a1f
def test_file_upload(self): '\n Upload a text valid file\n ' file_uploaded = http_client().post(':5000/files', 'user_id=1 file@/sw/apps2/qualibrate-api/LICENSE') self.assertTrue((file_uploaded['mime'] == 'text/plain'))
Upload a text valid file
test/ep/file_test.py
test_file_upload
alanjimenez1/qualibrate-api
0
python
def test_file_upload(self): '\n \n ' file_uploaded = http_client().post(':5000/files', 'user_id=1 file@/sw/apps2/qualibrate-api/LICENSE') self.assertTrue((file_uploaded['mime'] == 'text/plain'))
def test_file_upload(self): '\n \n ' file_uploaded = http_client().post(':5000/files', 'user_id=1 file@/sw/apps2/qualibrate-api/LICENSE') self.assertTrue((file_uploaded['mime'] == 'text/plain'))<|docstring|>Upload a text valid file<|endoftext|>
d17364688c4cf415d3e057eda6adf5603b92818aa66c395494eb53add67eb029
def np_power(mat_g, alpha): 'Computes mat_g^alpha for a square symmetric matrix mat_g.' (mat_u, diag_d, mat_v) = np.linalg.svd(mat_g) diag_d = np.power(diag_d, alpha) return np.dot(np.dot(mat_u, np.diag(diag_d)), mat_v)
Computes mat_g^alpha for a square symmetric matrix mat_g.
tensorflow/contrib/opt/python/training/matrix_functions_test.py
np_power
nicolasoyharcabal/tensorflow
848
python
def np_power(mat_g, alpha): (mat_u, diag_d, mat_v) = np.linalg.svd(mat_g) diag_d = np.power(diag_d, alpha) return np.dot(np.dot(mat_u, np.diag(diag_d)), mat_v)
def np_power(mat_g, alpha): (mat_u, diag_d, mat_v) = np.linalg.svd(mat_g) diag_d = np.power(diag_d, alpha) return np.dot(np.dot(mat_u, np.diag(diag_d)), mat_v)<|docstring|>Computes mat_g^alpha for a square symmetric matrix mat_g.<|endoftext|>
4425dfb53c8fd0cd0fd2b401cf0041c274a342910fd608f1c2d0ce4569824a4a
def testMatrixSquareRootFunction(self): 'Tests for matrix square roots.' size = 20 mat_a = np.random.rand(size, size) mat = np.dot(mat_a, mat_a.T) expected_mat = np_power(mat, 0.5) mat_root = matrix_functions.matrix_square_root(mat, size) self.assertAllCloseAccordingToType(expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)
Tests for matrix square roots.
tensorflow/contrib/opt/python/training/matrix_functions_test.py
testMatrixSquareRootFunction
nicolasoyharcabal/tensorflow
848
python
def testMatrixSquareRootFunction(self): size = 20 mat_a = np.random.rand(size, size) mat = np.dot(mat_a, mat_a.T) expected_mat = np_power(mat, 0.5) mat_root = matrix_functions.matrix_square_root(mat, size) self.assertAllCloseAccordingToType(expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)
def testMatrixSquareRootFunction(self): size = 20 mat_a = np.random.rand(size, size) mat = np.dot(mat_a, mat_a.T) expected_mat = np_power(mat, 0.5) mat_root = matrix_functions.matrix_square_root(mat, size) self.assertAllCloseAccordingToType(expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)<|docstring|>Tests for matrix square roots.<|endoftext|>
b9ac3921469f860c5a267fcff464aa0a69745d9b20719ef1e27dea159bee0a33
def testMatrixInversePthRootFunction(self): 'Tests for matrix inverse pth roots.' size = 20 mat_a = np.random.rand(size, size) mat = np.dot(mat_a, mat_a.T) expected_mat = np_power(mat, (- 0.125)) mat_root = matrix_functions.matrix_inverse_pth_root(mat, size, (- 0.125)) self.assertAllCloseAccordingToType(expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)
Tests for matrix inverse pth roots.
tensorflow/contrib/opt/python/training/matrix_functions_test.py
testMatrixInversePthRootFunction
nicolasoyharcabal/tensorflow
848
python
def testMatrixInversePthRootFunction(self): size = 20 mat_a = np.random.rand(size, size) mat = np.dot(mat_a, mat_a.T) expected_mat = np_power(mat, (- 0.125)) mat_root = matrix_functions.matrix_inverse_pth_root(mat, size, (- 0.125)) self.assertAllCloseAccordingToType(expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)
def testMatrixInversePthRootFunction(self): size = 20 mat_a = np.random.rand(size, size) mat = np.dot(mat_a, mat_a.T) expected_mat = np_power(mat, (- 0.125)) mat_root = matrix_functions.matrix_inverse_pth_root(mat, size, (- 0.125)) self.assertAllCloseAccordingToType(expected_mat, mat_root, atol=TOLERANCE, rtol=TOLERANCE)<|docstring|>Tests for matrix inverse pth roots.<|endoftext|>
85660944bae39703f692f9ca95018bcc688a2b3e92c6796cb610137cc70f7ca7
def __init__(self, interaction__matrix, driver_matrix, dtype=torch.float32, device=None): '\n Coontinuous time time-invariant linear dynamics of the form: `dx/dt = Ax + Bu`.\n :param interaction__matrix: The interaction matrix `A`, that determines how states of `n_nodes`\n nodes interact, shape `n_nodes x n_nodes`.\n :param driver_matrix: The driver matrix B, which determines how `k` control signals are\n applied in the linear dynamics, shape `n_nodes x k`.\n :param dtype: torch datatype of the calculations\n :param device: torch device of the calculations, usually "cpu" or "cuda:0"\n ' super().__init__(['x']) self.device = device self.dtype = dtype if (self.device is None): if torch.cuda.is_available(): self.device = ('cuda:' + str(torch.cuda.current_device())) else: self.device = torch.device('cpu') self.interaction__matrix = interaction__matrix.unsqueeze(0) self.driver_matrix = driver_matrix.unsqueeze(0) self.label = 'linear'
Coontinuous time time-invariant linear dynamics of the form: `dx/dt = Ax + Bu`. :param interaction__matrix: The interaction matrix `A`, that determines how states of `n_nodes` nodes interact, shape `n_nodes x n_nodes`. :param driver_matrix: The driver matrix B, which determines how `k` control signals are applied in the linear dynamics, shape `n_nodes x k`. :param dtype: torch datatype of the calculations :param device: torch device of the calculations, usually "cpu" or "cuda:0"
nnc/controllers/baselines/ct_lti/dynamics.py
__init__
ZZHPKU/nnc
17
python
def __init__(self, interaction__matrix, driver_matrix, dtype=torch.float32, device=None): '\n Coontinuous time time-invariant linear dynamics of the form: `dx/dt = Ax + Bu`.\n :param interaction__matrix: The interaction matrix `A`, that determines how states of `n_nodes`\n nodes interact, shape `n_nodes x n_nodes`.\n :param driver_matrix: The driver matrix B, which determines how `k` control signals are\n applied in the linear dynamics, shape `n_nodes x k`.\n :param dtype: torch datatype of the calculations\n :param device: torch device of the calculations, usually "cpu" or "cuda:0"\n ' super().__init__(['x']) self.device = device self.dtype = dtype if (self.device is None): if torch.cuda.is_available(): self.device = ('cuda:' + str(torch.cuda.current_device())) else: self.device = torch.device('cpu') self.interaction__matrix = interaction__matrix.unsqueeze(0) self.driver_matrix = driver_matrix.unsqueeze(0) self.label = 'linear'
def __init__(self, interaction__matrix, driver_matrix, dtype=torch.float32, device=None): '\n Coontinuous time time-invariant linear dynamics of the form: `dx/dt = Ax + Bu`.\n :param interaction__matrix: The interaction matrix `A`, that determines how states of `n_nodes`\n nodes interact, shape `n_nodes x n_nodes`.\n :param driver_matrix: The driver matrix B, which determines how `k` control signals are\n applied in the linear dynamics, shape `n_nodes x k`.\n :param dtype: torch datatype of the calculations\n :param device: torch device of the calculations, usually "cpu" or "cuda:0"\n ' super().__init__(['x']) self.device = device self.dtype = dtype if (self.device is None): if torch.cuda.is_available(): self.device = ('cuda:' + str(torch.cuda.current_device())) else: self.device = torch.device('cpu') self.interaction__matrix = interaction__matrix.unsqueeze(0) self.driver_matrix = driver_matrix.unsqueeze(0) self.label = 'linear'<|docstring|>Coontinuous time time-invariant linear dynamics of the form: `dx/dt = Ax + Bu`. :param interaction__matrix: The interaction matrix `A`, that determines how states of `n_nodes` nodes interact, shape `n_nodes x n_nodes`. :param driver_matrix: The driver matrix B, which determines how `k` control signals are applied in the linear dynamics, shape `n_nodes x k`. :param dtype: torch datatype of the calculations :param device: torch device of the calculations, usually "cpu" or "cuda:0"<|endoftext|>
a43cbc2b8d9d261e987d011d3adbb5a2cd3c3ca3929a9873ccf3c08664672a76
def forward(self, t: Union[(torch.Tensor, Number)], x: Union[(torch.Tensor, Iterable[torch.Tensor])], u: torch.Tensor=None): '\n Evaluation of the derivative or **amount of change** for controlled continuous-time\n time-invariant linear dynamics.\n :param x: current state values for nodes. Please ensure the input is not permuted, unless you know\n what you doing.\n :param t: time scalar, which is not used as the model is time invariant.\n :param u: control vectors. In this case please confirm it has proper dimensionality such\n that\n torch.matmul(driver_matrix, u) is possible.\n :return: the derivative tensor.\n ' if ((not isinstance(x, torch.Tensor)) and isinstance(x, Iterable)): x = torch.stack(list(x)) dx = torch.matmul(self.interaction__matrix, x.unsqueeze((- 1))).squeeze((- 1)) if (u is not None): control_term = torch.matmul(self.driver_matrix, u.unsqueeze((- 1))).squeeze((- 1)) dx += control_term return dx
Evaluation of the derivative or **amount of change** for controlled continuous-time time-invariant linear dynamics. :param x: current state values for nodes. Please ensure the input is not permuted, unless you know what you doing. :param t: time scalar, which is not used as the model is time invariant. :param u: control vectors. In this case please confirm it has proper dimensionality such that torch.matmul(driver_matrix, u) is possible. :return: the derivative tensor.
nnc/controllers/baselines/ct_lti/dynamics.py
forward
ZZHPKU/nnc
17
python
def forward(self, t: Union[(torch.Tensor, Number)], x: Union[(torch.Tensor, Iterable[torch.Tensor])], u: torch.Tensor=None): '\n Evaluation of the derivative or **amount of change** for controlled continuous-time\n time-invariant linear dynamics.\n :param x: current state values for nodes. Please ensure the input is not permuted, unless you know\n what you doing.\n :param t: time scalar, which is not used as the model is time invariant.\n :param u: control vectors. In this case please confirm it has proper dimensionality such\n that\n torch.matmul(driver_matrix, u) is possible.\n :return: the derivative tensor.\n ' if ((not isinstance(x, torch.Tensor)) and isinstance(x, Iterable)): x = torch.stack(list(x)) dx = torch.matmul(self.interaction__matrix, x.unsqueeze((- 1))).squeeze((- 1)) if (u is not None): control_term = torch.matmul(self.driver_matrix, u.unsqueeze((- 1))).squeeze((- 1)) dx += control_term return dx
def forward(self, t: Union[(torch.Tensor, Number)], x: Union[(torch.Tensor, Iterable[torch.Tensor])], u: torch.Tensor=None): '\n Evaluation of the derivative or **amount of change** for controlled continuous-time\n time-invariant linear dynamics.\n :param x: current state values for nodes. Please ensure the input is not permuted, unless you know\n what you doing.\n :param t: time scalar, which is not used as the model is time invariant.\n :param u: control vectors. In this case please confirm it has proper dimensionality such\n that\n torch.matmul(driver_matrix, u) is possible.\n :return: the derivative tensor.\n ' if ((not isinstance(x, torch.Tensor)) and isinstance(x, Iterable)): x = torch.stack(list(x)) dx = torch.matmul(self.interaction__matrix, x.unsqueeze((- 1))).squeeze((- 1)) if (u is not None): control_term = torch.matmul(self.driver_matrix, u.unsqueeze((- 1))).squeeze((- 1)) dx += control_term return dx<|docstring|>Evaluation of the derivative or **amount of change** for controlled continuous-time time-invariant linear dynamics. :param x: current state values for nodes. Please ensure the input is not permuted, unless you know what you doing. :param t: time scalar, which is not used as the model is time invariant. :param u: control vectors. In this case please confirm it has proper dimensionality such that torch.matmul(driver_matrix, u) is possible. :return: the derivative tensor.<|endoftext|>
42d7c0d21dc61fe6c75e0d97804fb3104c7fdbb016c73716afb8ef06bf87e8e1
def annotateImage(sess, logits, keep_prob, image_pl, image_file, image_shape): '\n Generate test output using the test images\n :param sess: TF session\n :param logits: TF Tensor for the logits\n :param keep_prob: TF Placeholder for the dropout keep robability\n :param image_pl: TF Placeholder for the image placeholder\n :param data_folder: Path to the folder that contains the datasets\n :param image_shape: Tuple - Shape of image\n :return: Output for for each test image\n ' image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape) im_softmax = sess.run([tf.nn.softmax(logits)], {keep_prob: 1.0, image_pl: [image]}) im_softmax = im_softmax[0][(:, 1)].reshape(image_shape[0], image_shape[1]) segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1) mask = np.dot(segmentation, np.array([[0, 255, 0, 127]])) mask = scipy.misc.toimage(mask, mode='RGBA') street_im = scipy.misc.toimage(image) street_im.paste(mask, box=None, mask=mask) return np.array(street_im)
Generate test output using the test images :param sess: TF session :param logits: TF Tensor for the logits :param keep_prob: TF Placeholder for the dropout keep robability :param image_pl: TF Placeholder for the image placeholder :param data_folder: Path to the folder that contains the datasets :param image_shape: Tuple - Shape of image :return: Output for for each test image
InferenceUtils.py
annotateImage
RomanoViolet/LaneDetectionViaSemanticSegmentation
4
python
def annotateImage(sess, logits, keep_prob, image_pl, image_file, image_shape): '\n Generate test output using the test images\n :param sess: TF session\n :param logits: TF Tensor for the logits\n :param keep_prob: TF Placeholder for the dropout keep robability\n :param image_pl: TF Placeholder for the image placeholder\n :param data_folder: Path to the folder that contains the datasets\n :param image_shape: Tuple - Shape of image\n :return: Output for for each test image\n ' image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape) im_softmax = sess.run([tf.nn.softmax(logits)], {keep_prob: 1.0, image_pl: [image]}) im_softmax = im_softmax[0][(:, 1)].reshape(image_shape[0], image_shape[1]) segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1) mask = np.dot(segmentation, np.array([[0, 255, 0, 127]])) mask = scipy.misc.toimage(mask, mode='RGBA') street_im = scipy.misc.toimage(image) street_im.paste(mask, box=None, mask=mask) return np.array(street_im)
def annotateImage(sess, logits, keep_prob, image_pl, image_file, image_shape): '\n Generate test output using the test images\n :param sess: TF session\n :param logits: TF Tensor for the logits\n :param keep_prob: TF Placeholder for the dropout keep robability\n :param image_pl: TF Placeholder for the image placeholder\n :param data_folder: Path to the folder that contains the datasets\n :param image_shape: Tuple - Shape of image\n :return: Output for for each test image\n ' image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape) im_softmax = sess.run([tf.nn.softmax(logits)], {keep_prob: 1.0, image_pl: [image]}) im_softmax = im_softmax[0][(:, 1)].reshape(image_shape[0], image_shape[1]) segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1) mask = np.dot(segmentation, np.array([[0, 255, 0, 127]])) mask = scipy.misc.toimage(mask, mode='RGBA') street_im = scipy.misc.toimage(image) street_im.paste(mask, box=None, mask=mask) return np.array(street_im)<|docstring|>Generate test output using the test images :param sess: TF session :param logits: TF Tensor for the logits :param keep_prob: TF Placeholder for the dropout keep robability :param image_pl: TF Placeholder for the image placeholder :param data_folder: Path to the folder that contains the datasets :param image_shape: Tuple - Shape of image :return: Output for for each test image<|endoftext|>
040ccd109232139ec1889b3f4e102184b88b36063923ac7e512c4d3e690f3395
@click.group() @click.pass_context def sprint(ctx): 'Sprint management' ctx.obj = Sprint(ctx.obj['CONFIG'])
Sprint management
atlssncli/sprint.py
sprint
bkryza/atlssncli
1
python
@click.group() @click.pass_context def sprint(ctx): ctx.obj = Sprint(ctx.obj['CONFIG'])
@click.group() @click.pass_context def sprint(ctx): ctx.obj = Sprint(ctx.obj['CONFIG'])<|docstring|>Sprint management<|endoftext|>
2d87fbde9fe3ae5a7fd3f4cbea1958c321261068a41affde59fa20ef897619bd
@sprint.command() @click.pass_context def help(ctx): 'Print sprint command help' click.echo(ctx.parent.get_help())
Print sprint command help
atlssncli/sprint.py
help
bkryza/atlssncli
1
python
@sprint.command() @click.pass_context def help(ctx): click.echo(ctx.parent.get_help())
@sprint.command() @click.pass_context def help(ctx): click.echo(ctx.parent.get_help())<|docstring|>Print sprint command help<|endoftext|>
273320ff5ba261b55c2fed22d14ff9f3b0d15ac2d6f3c99a1d2cca1468ed9334
@sprint.command() @click.argument('sprint_id', required=False) @pass_sprint def status(sprint, sprint_id): '\n Get sprint status.\n\n If no sprint id is provided, currently active sprint will be returned.\n ' LOG.debug('Getting sprint status %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.get_sprint_status(sprint_id) except Exception: traceback.print_exc() raise click.ClickException('Get sprint status failed')
Get sprint status. If no sprint id is provided, currently active sprint will be returned.
atlssncli/sprint.py
status
bkryza/atlssncli
1
python
@sprint.command() @click.argument('sprint_id', required=False) @pass_sprint def status(sprint, sprint_id): '\n Get sprint status.\n\n If no sprint id is provided, currently active sprint will be returned.\n ' LOG.debug('Getting sprint status %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.get_sprint_status(sprint_id) except Exception: traceback.print_exc() raise click.ClickException('Get sprint status failed')
@sprint.command() @click.argument('sprint_id', required=False) @pass_sprint def status(sprint, sprint_id): '\n Get sprint status.\n\n If no sprint id is provided, currently active sprint will be returned.\n ' LOG.debug('Getting sprint status %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.get_sprint_status(sprint_id) except Exception: traceback.print_exc() raise click.ClickException('Get sprint status failed')<|docstring|>Get sprint status. If no sprint id is provided, currently active sprint will be returned.<|endoftext|>
4186fce52135c8e6e00019df6390267f63e5020daf9f67e75e239a2b160ea25e
@sprint.command() @click.argument('board_id', required=False) @click.option('-n', '--name', required=False) @click.option('-s', '--start-date', 'start_date', required=False) @click.option('-d', '--duration', required=False) @pass_sprint def create(sprint, board_id, name, start_date, duration): '\n Create new sprint.\n\n If no board id is provided, it will be taken from the config.ini.\n ' LOG.debug('Creating new sprint on board %s', board_id) try: handler = SprintHandler(sprint.get_config()) handler.create_sprint(board_id, name, start_date, duration) except Exception: traceback.print_exc() raise click.ClickException('Creating sprint failed')
Create new sprint. If no board id is provided, it will be taken from the config.ini.
atlssncli/sprint.py
create
bkryza/atlssncli
1
python
@sprint.command() @click.argument('board_id', required=False) @click.option('-n', '--name', required=False) @click.option('-s', '--start-date', 'start_date', required=False) @click.option('-d', '--duration', required=False) @pass_sprint def create(sprint, board_id, name, start_date, duration): '\n Create new sprint.\n\n If no board id is provided, it will be taken from the config.ini.\n ' LOG.debug('Creating new sprint on board %s', board_id) try: handler = SprintHandler(sprint.get_config()) handler.create_sprint(board_id, name, start_date, duration) except Exception: traceback.print_exc() raise click.ClickException('Creating sprint failed')
@sprint.command() @click.argument('board_id', required=False) @click.option('-n', '--name', required=False) @click.option('-s', '--start-date', 'start_date', required=False) @click.option('-d', '--duration', required=False) @pass_sprint def create(sprint, board_id, name, start_date, duration): '\n Create new sprint.\n\n If no board id is provided, it will be taken from the config.ini.\n ' LOG.debug('Creating new sprint on board %s', board_id) try: handler = SprintHandler(sprint.get_config()) handler.create_sprint(board_id, name, start_date, duration) except Exception: traceback.print_exc() raise click.ClickException('Creating sprint failed')<|docstring|>Create new sprint. If no board id is provided, it will be taken from the config.ini.<|endoftext|>
c64d3f7864456f2570f132efa42c3c803915fd2a23a93628c44239c4194cbaf0
@sprint.command() @click.argument('sprint_id') @pass_sprint def delete(sprint, sprint_id): 'Delete specific sprint.' LOG.debug('Deleting sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.delete_sprint(sprint_id) click.echo('Sprint {} deleted successfully'.format(sprint_id)) except Exception: traceback.print_exc() raise click.ClickException('Sprint delete failed')
Delete specific sprint.
atlssncli/sprint.py
delete
bkryza/atlssncli
1
python
@sprint.command() @click.argument('sprint_id') @pass_sprint def delete(sprint, sprint_id): LOG.debug('Deleting sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.delete_sprint(sprint_id) click.echo('Sprint {} deleted successfully'.format(sprint_id)) except Exception: traceback.print_exc() raise click.ClickException('Sprint delete failed')
@sprint.command() @click.argument('sprint_id') @pass_sprint def delete(sprint, sprint_id): LOG.debug('Deleting sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.delete_sprint(sprint_id) click.echo('Sprint {} deleted successfully'.format(sprint_id)) except Exception: traceback.print_exc() raise click.ClickException('Sprint delete failed')<|docstring|>Delete specific sprint.<|endoftext|>
f25a0065f54b47d7964f2c3d9f5d412980908dfe9cda0b46c6e4ba95a1468ead
@sprint.command() @click.argument('sprint_id', required=False) @click.argument('name') @pass_sprint def rename(sprint, sprint_id, name): 'Rename specific sprint.' LOG.debug('Renaming sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.rename_sprint(sprint_id, name) except Exception: traceback.print_exc() raise click.ClickException('Sprint rename failed')
Rename specific sprint.
atlssncli/sprint.py
rename
bkryza/atlssncli
1
python
@sprint.command() @click.argument('sprint_id', required=False) @click.argument('name') @pass_sprint def rename(sprint, sprint_id, name): LOG.debug('Renaming sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.rename_sprint(sprint_id, name) except Exception: traceback.print_exc() raise click.ClickException('Sprint rename failed')
@sprint.command() @click.argument('sprint_id', required=False) @click.argument('name') @pass_sprint def rename(sprint, sprint_id, name): LOG.debug('Renaming sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.rename_sprint(sprint_id, name) except Exception: traceback.print_exc() raise click.ClickException('Sprint rename failed')<|docstring|>Rename specific sprint.<|endoftext|>
93cdec05e437641c2983789aab156c1e2b01168f47cfc2639b0393fd96f96d3f
@sprint.command() @click.argument('sprint_id') @click.option('-s', '--start-date', 'start_date', required=False) @click.option('-d', '--duration', required=False) @pass_sprint def start(sprint, sprint_id, start_date, duration): "\n Start specific sprint.\n\n If start_date and duration are not provided, sprint is started\n with the current time and default duration specified in config.ini.\n\n In order to provide duration, start_date must be also provided,\n but can be specified simply as 'now'.\n " LOG.debug('Starting sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.start_sprint(sprint_id, start_date, duration) except Exception: traceback.print_exc() raise click.ClickException('Sprint start failed')
Start specific sprint. If start_date and duration are not provided, sprint is started with the current time and default duration specified in config.ini. In order to provide duration, start_date must be also provided, but can be specified simply as 'now'.
atlssncli/sprint.py
start
bkryza/atlssncli
1
python
@sprint.command() @click.argument('sprint_id') @click.option('-s', '--start-date', 'start_date', required=False) @click.option('-d', '--duration', required=False) @pass_sprint def start(sprint, sprint_id, start_date, duration): "\n Start specific sprint.\n\n If start_date and duration are not provided, sprint is started\n with the current time and default duration specified in config.ini.\n\n In order to provide duration, start_date must be also provided,\n but can be specified simply as 'now'.\n " LOG.debug('Starting sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.start_sprint(sprint_id, start_date, duration) except Exception: traceback.print_exc() raise click.ClickException('Sprint start failed')
@sprint.command() @click.argument('sprint_id') @click.option('-s', '--start-date', 'start_date', required=False) @click.option('-d', '--duration', required=False) @pass_sprint def start(sprint, sprint_id, start_date, duration): "\n Start specific sprint.\n\n If start_date and duration are not provided, sprint is started\n with the current time and default duration specified in config.ini.\n\n In order to provide duration, start_date must be also provided,\n but can be specified simply as 'now'.\n " LOG.debug('Starting sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.start_sprint(sprint_id, start_date, duration) except Exception: traceback.print_exc() raise click.ClickException('Sprint start failed')<|docstring|>Start specific sprint. If start_date and duration are not provided, sprint is started with the current time and default duration specified in config.ini. In order to provide duration, start_date must be also provided, but can be specified simply as 'now'.<|endoftext|>
0513084889369d8dd25155b73fc45f1a3f5802407267ff94b34fc986a4727604
@sprint.command() @click.argument('sprint_id', required=False) @pass_sprint def close(sprint, sprint_id): 'Close specific sprint.' LOG.debug('Closing sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.close_sprint(sprint_id) except Exception: traceback.print_exc() raise click.ClickException('Sprint closing failed')
Close specific sprint.
atlssncli/sprint.py
close
bkryza/atlssncli
1
python
@sprint.command() @click.argument('sprint_id', required=False) @pass_sprint def close(sprint, sprint_id): LOG.debug('Closing sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.close_sprint(sprint_id) except Exception: traceback.print_exc() raise click.ClickException('Sprint closing failed')
@sprint.command() @click.argument('sprint_id', required=False) @pass_sprint def close(sprint, sprint_id): LOG.debug('Closing sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.close_sprint(sprint_id) except Exception: traceback.print_exc() raise click.ClickException('Sprint closing failed')<|docstring|>Close specific sprint.<|endoftext|>
be1f65370dfece498a48918adac188e7e7b86ecb6fc276ae44acc4533bbdfa53
@sprint.command() @click.argument('board_id', required=False) @click.option('-a', '--active', is_flag=True, help='Include active sprints', default=False) @click.option('-c', '--closed', is_flag=True, help='Include closed sprints', default=False) @click.option('-f', '--future', is_flag=True, help='Include future sprints', default=False) @pass_sprint def list(sprint, board_id, active, closed, future): '\n List sprints for board.\n\n If no board is provided on the command line, the active\n board from config.ini is used.\n ' LOG.debug('Getting list of sprints for board %s', board_id) try: handler = SprintHandler(sprint.get_config()) state = [] if active: state.append('active') if closed: state.append('closed') if future: state.append('future') handler.get_sprint_list(board_id, ','.join(state)) except Exception: traceback.print_exc() raise click.ClickException('Listing sprints failed')
List sprints for board. If no board is provided on the command line, the active board from config.ini is used.
atlssncli/sprint.py
list
bkryza/atlssncli
1
python
@sprint.command() @click.argument('board_id', required=False) @click.option('-a', '--active', is_flag=True, help='Include active sprints', default=False) @click.option('-c', '--closed', is_flag=True, help='Include closed sprints', default=False) @click.option('-f', '--future', is_flag=True, help='Include future sprints', default=False) @pass_sprint def list(sprint, board_id, active, closed, future): '\n List sprints for board.\n\n If no board is provided on the command line, the active\n board from config.ini is used.\n ' LOG.debug('Getting list of sprints for board %s', board_id) try: handler = SprintHandler(sprint.get_config()) state = [] if active: state.append('active') if closed: state.append('closed') if future: state.append('future') handler.get_sprint_list(board_id, ','.join(state)) except Exception: traceback.print_exc() raise click.ClickException('Listing sprints failed')
@sprint.command() @click.argument('board_id', required=False) @click.option('-a', '--active', is_flag=True, help='Include active sprints', default=False) @click.option('-c', '--closed', is_flag=True, help='Include closed sprints', default=False) @click.option('-f', '--future', is_flag=True, help='Include future sprints', default=False) @pass_sprint def list(sprint, board_id, active, closed, future): '\n List sprints for board.\n\n If no board is provided on the command line, the active\n board from config.ini is used.\n ' LOG.debug('Getting list of sprints for board %s', board_id) try: handler = SprintHandler(sprint.get_config()) state = [] if active: state.append('active') if closed: state.append('closed') if future: state.append('future') handler.get_sprint_list(board_id, ','.join(state)) except Exception: traceback.print_exc() raise click.ClickException('Listing sprints failed')<|docstring|>List sprints for board. If no board is provided on the command line, the active board from config.ini is used.<|endoftext|>
7613f3ec77cc9abc7b68f46c05590f5157f17d50d0adc0c98ca3ce1063d25902
@sprint.command() @click.argument('sprint_id', required=False) @click.option('-o', '--open', 'opened', is_flag=True, help='Include open tickets', default=False) @click.option('-c', '--closed', is_flag=True, help='Include closed tickets', default=False) @click.option('-p', '--in-progress', 'in_progress', is_flag=True, help='Include in progress tickets', default=False) @click.option('-r', '--resolved', is_flag=True, help='Include resolved tickets', default=False) @click.option('-a', '--assignee', help='Specify assignee username') @click.option('-q', '--jql', help='Specify custom JQL query to filter results') @pass_sprint def issues(sprint, sprint_id, assignee, opened, in_progress, closed, resolved, jql): '\n List issues for sprint.\n ' LOG.debug('Getting list of issues for sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.get_sprint_issues(sprint_id, assignee, opened, in_progress, closed, resolved, jql) except Exception: traceback.print_exc() raise click.ClickException('Listing sprint issues failed')
List issues for sprint.
atlssncli/sprint.py
issues
bkryza/atlssncli
1
python
@sprint.command() @click.argument('sprint_id', required=False) @click.option('-o', '--open', 'opened', is_flag=True, help='Include open tickets', default=False) @click.option('-c', '--closed', is_flag=True, help='Include closed tickets', default=False) @click.option('-p', '--in-progress', 'in_progress', is_flag=True, help='Include in progress tickets', default=False) @click.option('-r', '--resolved', is_flag=True, help='Include resolved tickets', default=False) @click.option('-a', '--assignee', help='Specify assignee username') @click.option('-q', '--jql', help='Specify custom JQL query to filter results') @pass_sprint def issues(sprint, sprint_id, assignee, opened, in_progress, closed, resolved, jql): '\n \n ' LOG.debug('Getting list of issues for sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.get_sprint_issues(sprint_id, assignee, opened, in_progress, closed, resolved, jql) except Exception: traceback.print_exc() raise click.ClickException('Listing sprint issues failed')
@sprint.command() @click.argument('sprint_id', required=False) @click.option('-o', '--open', 'opened', is_flag=True, help='Include open tickets', default=False) @click.option('-c', '--closed', is_flag=True, help='Include closed tickets', default=False) @click.option('-p', '--in-progress', 'in_progress', is_flag=True, help='Include in progress tickets', default=False) @click.option('-r', '--resolved', is_flag=True, help='Include resolved tickets', default=False) @click.option('-a', '--assignee', help='Specify assignee username') @click.option('-q', '--jql', help='Specify custom JQL query to filter results') @pass_sprint def issues(sprint, sprint_id, assignee, opened, in_progress, closed, resolved, jql): '\n \n ' LOG.debug('Getting list of issues for sprint %s', sprint_id) try: handler = SprintHandler(sprint.get_config()) handler.get_sprint_issues(sprint_id, assignee, opened, in_progress, closed, resolved, jql) except Exception: traceback.print_exc() raise click.ClickException('Listing sprint issues failed')<|docstring|>List issues for sprint.<|endoftext|>
733b6ae60c61b20b3613e98d635f50f14a520787639d3e8a25b33ba843c15428
def post(self, request, *args, **kwargs): '\n Follows a user\n ' username = kwargs.get('username') profile = Profile.objects.get(user__username=username) current_profile = request.user.profiles if (current_profile.user.username == profile.user.username): return Response({'message': error_msg['cannot_followself']}, status=status.HTTP_400_BAD_REQUEST) current_profile.toggle_follow(profile.user) serializer = self.serializer_class(profile, context={'request': request}) if serializer.data['following']: message = success_msg['success_followed'] else: message = success_msg['success_unfollowed'] return Response((serializer.data, {'message': message}), status=status.HTTP_201_CREATED)
Follows a user
authors/apps/profiles/views.py
post
andela/ah-alpha
1
python
def post(self, request, *args, **kwargs): '\n \n ' username = kwargs.get('username') profile = Profile.objects.get(user__username=username) current_profile = request.user.profiles if (current_profile.user.username == profile.user.username): return Response({'message': error_msg['cannot_followself']}, status=status.HTTP_400_BAD_REQUEST) current_profile.toggle_follow(profile.user) serializer = self.serializer_class(profile, context={'request': request}) if serializer.data['following']: message = success_msg['success_followed'] else: message = success_msg['success_unfollowed'] return Response((serializer.data, {'message': message}), status=status.HTTP_201_CREATED)
def post(self, request, *args, **kwargs): '\n \n ' username = kwargs.get('username') profile = Profile.objects.get(user__username=username) current_profile = request.user.profiles if (current_profile.user.username == profile.user.username): return Response({'message': error_msg['cannot_followself']}, status=status.HTTP_400_BAD_REQUEST) current_profile.toggle_follow(profile.user) serializer = self.serializer_class(profile, context={'request': request}) if serializer.data['following']: message = success_msg['success_followed'] else: message = success_msg['success_unfollowed'] return Response((serializer.data, {'message': message}), status=status.HTTP_201_CREATED)<|docstring|>Follows a user<|endoftext|>
7468786bc6bee069a6a3c8f98680153b1c1466cdbfe475d5b6f2075cef809572
def upload_data(prepend_id, message_for_URL, message_for_upload): 'User uploaded files.\n Args:\n prepend_id: Prepends to the designated it.\n ' if (prepend_id == 'isotopomer'): options = mrsimulator_examples else: options = [] select_examples_dropdown = html.Div([dbc.Label(f'Select an example {prepend_id}.', className='formtext'), dcc.Dropdown(id=f'example-{prepend_id}-dropbox', options=options, searchable=False, clearable=True, placeholder='Select an example ... ', style={'zIndex': '10'})], className='d-flex flex-column grow') data_from_url = html.Div([label_with_help_button(*message_for_URL, id=f'upload-{prepend_id}-url-help'), dbc.InputGroup([dbc.Input(type='url', id=f'upload-{prepend_id}-url', value='', placeholder='Paste URL ...', className='form-control'), dbc.Button('Submit', id=f'upload-{prepend_id}-url-submit', className='append-last')])], className='d-flex flex-column pb-1') upload_local_file_widget = html.Div([label_with_help_button(*message_for_upload, id=f'upload-{prepend_id}-local-help'), dcc.Upload(id=f'upload-{prepend_id}-local', children=html.Div(['Drag and drop, or ', html.A([html.I(className='fas fa-upload'), ' select'], className='formtext', href='#')], className='formtext'), style={'lineHeight': '55px', 'borderWidth': '.85px', 'borderStyle': 'dashed', 'borderRadius': '7px', 'textAlign': 'center', 'color': 'silver'}, multiple=False, className='control-upload')], className='d-flex flex-column pb-1') fields = [{'text': 'Example', 'icon_classname': 'fac fa-isotopomers', 'id': f'example-{prepend_id}-button', 'tooltip': 'Select an example.', 'active': False, 'collapsable': select_examples_dropdown}, {'text': 'Local', 'icon_classname': 'fas fa-hdd', 'id': f'upload-{prepend_id}-local-button', 'tooltip': 'Upload a local JSON file containing isotopomers.', 'active': False, 'collapsable': upload_local_file_widget}, {'text': 'URL', 'icon_classname': 'fas fa-at', 'id': f'upload-{prepend_id}-url-button', 'tooltip': 'Retrieve isotopomers from a remote JSON file.', 'active': False, 'collapsable': data_from_url}] input_buttons = [] for item in fields: input_buttons.append(custom_button(text=item['text'], icon_classname=item['icon_classname'], id=item['id'], tooltip=item['tooltip'], active=item['active'], style={'borderRadius': 0}, outline=True)) input_layout_0 = [] for item in fields: id_ = item['id'] input_layout_0.append(dbc.Collapse(item['collapsable'], id=f'{id_}-collapse')) input_layout = html.Div([html.Div(dbc.Button(html.I(className='fas fa-times'), id=f'upload-{prepend_id}-panel-hide-button', color='dark', size='sm'), className='d-flex justify-content-end'), html.Div([dbc.ButtonGroup(input_buttons, vertical=True, className='button no-round'), dbc.Col(input_layout_0)], className='d-flex justify-content-start')], className='top-navbar') @app.callback([*[Output((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[Output(fields[j]['id'], 'active') for j in range(len(fields))]], [Input(fields[j]['id'], 'n_clicks') for j in range(len(fields))], [*[State((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[State(fields[j]['id'], 'active') for j in range(len(fields))]]) def toggle_collapsible_input(n1, n2, n3, c1, c2, c3, a1, a2, a3): 'Toggle collapsible widget form url and upload-a-file button fields.' if (n1 is n2 is n3 is None): return [False, True, False, False, True, False] ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if (button_id == fields[0]['id']): if (not c1): return [(not c1), False, False, (not a1), False, False] return [c1, False, False, a1, False, False] if (button_id == fields[1]['id']): if (not c2): return [False, (not c2), False, False, (not a2), False] return [False, c2, False, False, a2, False] if (button_id == fields[2]['id']): if (not c3): return [False, False, (not c3), False, False, (not a3)] return [False, False, c3, False, False, a3] drawer = dbc.Collapse(input_layout, id=f'upload-{prepend_id}-master-collapse') return drawer
User uploaded files. Args: prepend_id: Prepends to the designated it.
app/importer.py
upload_data
mVenetos97/mrsimulator-app
0
python
def upload_data(prepend_id, message_for_URL, message_for_upload): 'User uploaded files.\n Args:\n prepend_id: Prepends to the designated it.\n ' if (prepend_id == 'isotopomer'): options = mrsimulator_examples else: options = [] select_examples_dropdown = html.Div([dbc.Label(f'Select an example {prepend_id}.', className='formtext'), dcc.Dropdown(id=f'example-{prepend_id}-dropbox', options=options, searchable=False, clearable=True, placeholder='Select an example ... ', style={'zIndex': '10'})], className='d-flex flex-column grow') data_from_url = html.Div([label_with_help_button(*message_for_URL, id=f'upload-{prepend_id}-url-help'), dbc.InputGroup([dbc.Input(type='url', id=f'upload-{prepend_id}-url', value=, placeholder='Paste URL ...', className='form-control'), dbc.Button('Submit', id=f'upload-{prepend_id}-url-submit', className='append-last')])], className='d-flex flex-column pb-1') upload_local_file_widget = html.Div([label_with_help_button(*message_for_upload, id=f'upload-{prepend_id}-local-help'), dcc.Upload(id=f'upload-{prepend_id}-local', children=html.Div(['Drag and drop, or ', html.A([html.I(className='fas fa-upload'), ' select'], className='formtext', href='#')], className='formtext'), style={'lineHeight': '55px', 'borderWidth': '.85px', 'borderStyle': 'dashed', 'borderRadius': '7px', 'textAlign': 'center', 'color': 'silver'}, multiple=False, className='control-upload')], className='d-flex flex-column pb-1') fields = [{'text': 'Example', 'icon_classname': 'fac fa-isotopomers', 'id': f'example-{prepend_id}-button', 'tooltip': 'Select an example.', 'active': False, 'collapsable': select_examples_dropdown}, {'text': 'Local', 'icon_classname': 'fas fa-hdd', 'id': f'upload-{prepend_id}-local-button', 'tooltip': 'Upload a local JSON file containing isotopomers.', 'active': False, 'collapsable': upload_local_file_widget}, {'text': 'URL', 'icon_classname': 'fas fa-at', 'id': f'upload-{prepend_id}-url-button', 'tooltip': 'Retrieve isotopomers from a remote JSON file.', 'active': False, 'collapsable': data_from_url}] input_buttons = [] for item in fields: input_buttons.append(custom_button(text=item['text'], icon_classname=item['icon_classname'], id=item['id'], tooltip=item['tooltip'], active=item['active'], style={'borderRadius': 0}, outline=True)) input_layout_0 = [] for item in fields: id_ = item['id'] input_layout_0.append(dbc.Collapse(item['collapsable'], id=f'{id_}-collapse')) input_layout = html.Div([html.Div(dbc.Button(html.I(className='fas fa-times'), id=f'upload-{prepend_id}-panel-hide-button', color='dark', size='sm'), className='d-flex justify-content-end'), html.Div([dbc.ButtonGroup(input_buttons, vertical=True, className='button no-round'), dbc.Col(input_layout_0)], className='d-flex justify-content-start')], className='top-navbar') @app.callback([*[Output((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[Output(fields[j]['id'], 'active') for j in range(len(fields))]], [Input(fields[j]['id'], 'n_clicks') for j in range(len(fields))], [*[State((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[State(fields[j]['id'], 'active') for j in range(len(fields))]]) def toggle_collapsible_input(n1, n2, n3, c1, c2, c3, a1, a2, a3): 'Toggle collapsible widget form url and upload-a-file button fields.' if (n1 is n2 is n3 is None): return [False, True, False, False, True, False] ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if (button_id == fields[0]['id']): if (not c1): return [(not c1), False, False, (not a1), False, False] return [c1, False, False, a1, False, False] if (button_id == fields[1]['id']): if (not c2): return [False, (not c2), False, False, (not a2), False] return [False, c2, False, False, a2, False] if (button_id == fields[2]['id']): if (not c3): return [False, False, (not c3), False, False, (not a3)] return [False, False, c3, False, False, a3] drawer = dbc.Collapse(input_layout, id=f'upload-{prepend_id}-master-collapse') return drawer
def upload_data(prepend_id, message_for_URL, message_for_upload): 'User uploaded files.\n Args:\n prepend_id: Prepends to the designated it.\n ' if (prepend_id == 'isotopomer'): options = mrsimulator_examples else: options = [] select_examples_dropdown = html.Div([dbc.Label(f'Select an example {prepend_id}.', className='formtext'), dcc.Dropdown(id=f'example-{prepend_id}-dropbox', options=options, searchable=False, clearable=True, placeholder='Select an example ... ', style={'zIndex': '10'})], className='d-flex flex-column grow') data_from_url = html.Div([label_with_help_button(*message_for_URL, id=f'upload-{prepend_id}-url-help'), dbc.InputGroup([dbc.Input(type='url', id=f'upload-{prepend_id}-url', value=, placeholder='Paste URL ...', className='form-control'), dbc.Button('Submit', id=f'upload-{prepend_id}-url-submit', className='append-last')])], className='d-flex flex-column pb-1') upload_local_file_widget = html.Div([label_with_help_button(*message_for_upload, id=f'upload-{prepend_id}-local-help'), dcc.Upload(id=f'upload-{prepend_id}-local', children=html.Div(['Drag and drop, or ', html.A([html.I(className='fas fa-upload'), ' select'], className='formtext', href='#')], className='formtext'), style={'lineHeight': '55px', 'borderWidth': '.85px', 'borderStyle': 'dashed', 'borderRadius': '7px', 'textAlign': 'center', 'color': 'silver'}, multiple=False, className='control-upload')], className='d-flex flex-column pb-1') fields = [{'text': 'Example', 'icon_classname': 'fac fa-isotopomers', 'id': f'example-{prepend_id}-button', 'tooltip': 'Select an example.', 'active': False, 'collapsable': select_examples_dropdown}, {'text': 'Local', 'icon_classname': 'fas fa-hdd', 'id': f'upload-{prepend_id}-local-button', 'tooltip': 'Upload a local JSON file containing isotopomers.', 'active': False, 'collapsable': upload_local_file_widget}, {'text': 'URL', 'icon_classname': 'fas fa-at', 'id': f'upload-{prepend_id}-url-button', 'tooltip': 'Retrieve isotopomers from a remote JSON file.', 'active': False, 'collapsable': data_from_url}] input_buttons = [] for item in fields: input_buttons.append(custom_button(text=item['text'], icon_classname=item['icon_classname'], id=item['id'], tooltip=item['tooltip'], active=item['active'], style={'borderRadius': 0}, outline=True)) input_layout_0 = [] for item in fields: id_ = item['id'] input_layout_0.append(dbc.Collapse(item['collapsable'], id=f'{id_}-collapse')) input_layout = html.Div([html.Div(dbc.Button(html.I(className='fas fa-times'), id=f'upload-{prepend_id}-panel-hide-button', color='dark', size='sm'), className='d-flex justify-content-end'), html.Div([dbc.ButtonGroup(input_buttons, vertical=True, className='button no-round'), dbc.Col(input_layout_0)], className='d-flex justify-content-start')], className='top-navbar') @app.callback([*[Output((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[Output(fields[j]['id'], 'active') for j in range(len(fields))]], [Input(fields[j]['id'], 'n_clicks') for j in range(len(fields))], [*[State((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[State(fields[j]['id'], 'active') for j in range(len(fields))]]) def toggle_collapsible_input(n1, n2, n3, c1, c2, c3, a1, a2, a3): 'Toggle collapsible widget form url and upload-a-file button fields.' if (n1 is n2 is n3 is None): return [False, True, False, False, True, False] ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if (button_id == fields[0]['id']): if (not c1): return [(not c1), False, False, (not a1), False, False] return [c1, False, False, a1, False, False] if (button_id == fields[1]['id']): if (not c2): return [False, (not c2), False, False, (not a2), False] return [False, c2, False, False, a2, False] if (button_id == fields[2]['id']): if (not c3): return [False, False, (not c3), False, False, (not a3)] return [False, False, c3, False, False, a3] drawer = dbc.Collapse(input_layout, id=f'upload-{prepend_id}-master-collapse') return drawer<|docstring|>User uploaded files. Args: prepend_id: Prepends to the designated it.<|endoftext|>
8c58417e9b162e8cc4cf25b7ee4a0e139790380e54ac2e09084665e86cfc19ac
@app.callback([Output('alert-message-isotopomer', 'children'), Output('alert-message-isotopomer', 'is_open'), Output('local-isotopomers-data', 'data'), Output('filename_dataset', 'children'), Output('data_description', 'children'), Output('local-isotopomers-ui-data', 'data'), Output('isotopomer_list', 'children')], [Input('upload-isotopomer-local', 'contents'), Input('upload-isotopomer-url-submit', 'n_clicks'), Input('example-isotopomer-dropbox', 'value'), Input('upload-from-graph', 'contents')], [State('upload-isotopomer-url', 'value'), State('upload-isotopomer-local', 'filename'), State('local-isotopomers-data', 'data'), State('filename_dataset', 'children'), State('data_description', 'children'), State('upload-from-graph', 'filename'), State('local-isotopomers-ui-data', 'data'), State('isotopomer_list', 'children')]) def update_isotopomers(isotopomer_upload_content, n_click, example, from_graph_content, isotopomer_url, isotopomer_filename, existing_isotopomers_data, data_title, data_info, from_graph_filename, local_isotopomers_ui_data_state, isotopomer_list_state): 'Update the local isotopomers when a new file is imported.' ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate trigger_id = ctx.triggered[0]['prop_id'].split('.')[0] if (trigger_id == 'example-isotopomer-dropbox'): path = os.path.split(__file__)[0] if (example in ['', None]): raise PreventUpdate response = urlopen(get_absolute_url_path(example, path)) data = json.loads(response.read()) if (trigger_id == 'upload-isotopomer-url-submit'): if (isotopomer_url in ['', None]): raise PreventUpdate response = urlopen(isotopomer_url) try: data = json.loads(response.read()) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] if (trigger_id == 'upload-isotopomer-local'): if (isotopomer_upload_content is None): raise PreventUpdate try: data = parse_contents(isotopomer_upload_content, isotopomer_filename) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] if (trigger_id == 'upload-from-graph'): if (from_graph_content is None): raise PreventUpdate if (from_graph_filename.split('.')[1] != 'json'): raise PreventUpdate try: data = parse_contents(from_graph_content, from_graph_filename) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] local_isotopomers_ui_data_state = None isotopomer_ui = make_isotopomers_UI(data, uuid.uuid1()) if ('name' not in data): data['name'] = '' if ('description' not in data): data['description'] = '' return ['', False, data, data['name'], data['description'], isotopomer_ui, make_isotopomer_dropdown_UI(data)]
Update the local isotopomers when a new file is imported.
app/importer.py
update_isotopomers
mVenetos97/mrsimulator-app
0
python
@app.callback([Output('alert-message-isotopomer', 'children'), Output('alert-message-isotopomer', 'is_open'), Output('local-isotopomers-data', 'data'), Output('filename_dataset', 'children'), Output('data_description', 'children'), Output('local-isotopomers-ui-data', 'data'), Output('isotopomer_list', 'children')], [Input('upload-isotopomer-local', 'contents'), Input('upload-isotopomer-url-submit', 'n_clicks'), Input('example-isotopomer-dropbox', 'value'), Input('upload-from-graph', 'contents')], [State('upload-isotopomer-url', 'value'), State('upload-isotopomer-local', 'filename'), State('local-isotopomers-data', 'data'), State('filename_dataset', 'children'), State('data_description', 'children'), State('upload-from-graph', 'filename'), State('local-isotopomers-ui-data', 'data'), State('isotopomer_list', 'children')]) def update_isotopomers(isotopomer_upload_content, n_click, example, from_graph_content, isotopomer_url, isotopomer_filename, existing_isotopomers_data, data_title, data_info, from_graph_filename, local_isotopomers_ui_data_state, isotopomer_list_state): ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate trigger_id = ctx.triggered[0]['prop_id'].split('.')[0] if (trigger_id == 'example-isotopomer-dropbox'): path = os.path.split(__file__)[0] if (example in [, None]): raise PreventUpdate response = urlopen(get_absolute_url_path(example, path)) data = json.loads(response.read()) if (trigger_id == 'upload-isotopomer-url-submit'): if (isotopomer_url in [, None]): raise PreventUpdate response = urlopen(isotopomer_url) try: data = json.loads(response.read()) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] if (trigger_id == 'upload-isotopomer-local'): if (isotopomer_upload_content is None): raise PreventUpdate try: data = parse_contents(isotopomer_upload_content, isotopomer_filename) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] if (trigger_id == 'upload-from-graph'): if (from_graph_content is None): raise PreventUpdate if (from_graph_filename.split('.')[1] != 'json'): raise PreventUpdate try: data = parse_contents(from_graph_content, from_graph_filename) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] local_isotopomers_ui_data_state = None isotopomer_ui = make_isotopomers_UI(data, uuid.uuid1()) if ('name' not in data): data['name'] = if ('description' not in data): data['description'] = return [, False, data, data['name'], data['description'], isotopomer_ui, make_isotopomer_dropdown_UI(data)]
@app.callback([Output('alert-message-isotopomer', 'children'), Output('alert-message-isotopomer', 'is_open'), Output('local-isotopomers-data', 'data'), Output('filename_dataset', 'children'), Output('data_description', 'children'), Output('local-isotopomers-ui-data', 'data'), Output('isotopomer_list', 'children')], [Input('upload-isotopomer-local', 'contents'), Input('upload-isotopomer-url-submit', 'n_clicks'), Input('example-isotopomer-dropbox', 'value'), Input('upload-from-graph', 'contents')], [State('upload-isotopomer-url', 'value'), State('upload-isotopomer-local', 'filename'), State('local-isotopomers-data', 'data'), State('filename_dataset', 'children'), State('data_description', 'children'), State('upload-from-graph', 'filename'), State('local-isotopomers-ui-data', 'data'), State('isotopomer_list', 'children')]) def update_isotopomers(isotopomer_upload_content, n_click, example, from_graph_content, isotopomer_url, isotopomer_filename, existing_isotopomers_data, data_title, data_info, from_graph_filename, local_isotopomers_ui_data_state, isotopomer_list_state): ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate trigger_id = ctx.triggered[0]['prop_id'].split('.')[0] if (trigger_id == 'example-isotopomer-dropbox'): path = os.path.split(__file__)[0] if (example in [, None]): raise PreventUpdate response = urlopen(get_absolute_url_path(example, path)) data = json.loads(response.read()) if (trigger_id == 'upload-isotopomer-url-submit'): if (isotopomer_url in [, None]): raise PreventUpdate response = urlopen(isotopomer_url) try: data = json.loads(response.read()) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] if (trigger_id == 'upload-isotopomer-local'): if (isotopomer_upload_content is None): raise PreventUpdate try: data = parse_contents(isotopomer_upload_content, isotopomer_filename) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] if (trigger_id == 'upload-from-graph'): if (from_graph_content is None): raise PreventUpdate if (from_graph_filename.split('.')[1] != 'json'): raise PreventUpdate try: data = parse_contents(from_graph_content, from_graph_filename) except Exception: message = 'Error reading isotopomers.' return [message, True, existing_isotopomers_data, data_title, data_info, local_isotopomers_ui_data_state, isotopomer_list_state] local_isotopomers_ui_data_state = None isotopomer_ui = make_isotopomers_UI(data, uuid.uuid1()) if ('name' not in data): data['name'] = if ('description' not in data): data['description'] = return [, False, data, data['name'], data['description'], isotopomer_ui, make_isotopomer_dropdown_UI(data)]<|docstring|>Update the local isotopomers when a new file is imported.<|endoftext|>
febbd611b403f6f1e7b49213963e08feff5829b89113857e7a7f9c2b576da1ac
def parse_contents(contents, filename): 'Parse contents from the isotopomers file.' default_data = {'isotopomers': [], 'name': '', 'description': ''} if (filename is None): return default_data if ('json' in filename): content_string = contents.split(',')[1] decoded = base64.b64decode(content_string) data = json.loads(str(decoded, encoding='UTF-8')) if ('name' not in data.keys()): data['name'] = filename if ('description' not in data.keys()): data['description'] = '' return data else: raise Exception('File not recognized.')
Parse contents from the isotopomers file.
app/importer.py
parse_contents
mVenetos97/mrsimulator-app
0
python
def parse_contents(contents, filename): default_data = {'isotopomers': [], 'name': , 'description': } if (filename is None): return default_data if ('json' in filename): content_string = contents.split(',')[1] decoded = base64.b64decode(content_string) data = json.loads(str(decoded, encoding='UTF-8')) if ('name' not in data.keys()): data['name'] = filename if ('description' not in data.keys()): data['description'] = return data else: raise Exception('File not recognized.')
def parse_contents(contents, filename): default_data = {'isotopomers': [], 'name': , 'description': } if (filename is None): return default_data if ('json' in filename): content_string = contents.split(',')[1] decoded = base64.b64decode(content_string) data = json.loads(str(decoded, encoding='UTF-8')) if ('name' not in data.keys()): data['name'] = filename if ('description' not in data.keys()): data['description'] = return data else: raise Exception('File not recognized.')<|docstring|>Parse contents from the isotopomers file.<|endoftext|>
a8aae189820099eb9532a6e9af6f0c601b991d1a206b41c7b472d7e157333d90
@app.callback([Output('alert-message-spectrum', 'children'), Output('alert-message-spectrum', 'is_open'), Output('local-csdm-data', 'data')], [Input('upload-spectrum-local', 'contents'), Input('upload-from-graph', 'contents')], [State('local-csdm-data', 'data'), State('upload-from-graph', 'filename')]) def update_csdm_file(csdm_upload_content, csdm_upload_content_graph, existing_data, filename): 'Update a local CSDM file.' ctx = dash.callback_context print(ctx.triggered[0]['prop_id']) if ((csdm_upload_content is None) and (csdm_upload_content_graph is None)): raise PreventUpdate if (not ctx.triggered): raise PreventUpdate file_extension = filename.split('.')[1] if (file_extension not in ['csdf', 'json']): return [f'Expecting a .csdf or .json file, found .{file_extension}.', True, existing_data] if (file_extension != 'csdf'): raise PreventUpdate trigger_id = ctx.triggered[0]['prop_id'].split('.')[0] if (trigger_id == 'upload-spectrum-local'): content_string = csdm_upload_content if (trigger_id == 'upload-from-graph'): content_string = csdm_upload_content_graph content_string = content_string.split(',')[1] decoded = base64.b64decode(content_string) (success, data, error_message) = load_json(decoded) if success: return ['', False, data] else: return [f'Invalid JSON file. {error_message}', True, existing_data]
Update a local CSDM file.
app/importer.py
update_csdm_file
mVenetos97/mrsimulator-app
0
python
@app.callback([Output('alert-message-spectrum', 'children'), Output('alert-message-spectrum', 'is_open'), Output('local-csdm-data', 'data')], [Input('upload-spectrum-local', 'contents'), Input('upload-from-graph', 'contents')], [State('local-csdm-data', 'data'), State('upload-from-graph', 'filename')]) def update_csdm_file(csdm_upload_content, csdm_upload_content_graph, existing_data, filename): ctx = dash.callback_context print(ctx.triggered[0]['prop_id']) if ((csdm_upload_content is None) and (csdm_upload_content_graph is None)): raise PreventUpdate if (not ctx.triggered): raise PreventUpdate file_extension = filename.split('.')[1] if (file_extension not in ['csdf', 'json']): return [f'Expecting a .csdf or .json file, found .{file_extension}.', True, existing_data] if (file_extension != 'csdf'): raise PreventUpdate trigger_id = ctx.triggered[0]['prop_id'].split('.')[0] if (trigger_id == 'upload-spectrum-local'): content_string = csdm_upload_content if (trigger_id == 'upload-from-graph'): content_string = csdm_upload_content_graph content_string = content_string.split(',')[1] decoded = base64.b64decode(content_string) (success, data, error_message) = load_json(decoded) if success: return [, False, data] else: return [f'Invalid JSON file. {error_message}', True, existing_data]
@app.callback([Output('alert-message-spectrum', 'children'), Output('alert-message-spectrum', 'is_open'), Output('local-csdm-data', 'data')], [Input('upload-spectrum-local', 'contents'), Input('upload-from-graph', 'contents')], [State('local-csdm-data', 'data'), State('upload-from-graph', 'filename')]) def update_csdm_file(csdm_upload_content, csdm_upload_content_graph, existing_data, filename): ctx = dash.callback_context print(ctx.triggered[0]['prop_id']) if ((csdm_upload_content is None) and (csdm_upload_content_graph is None)): raise PreventUpdate if (not ctx.triggered): raise PreventUpdate file_extension = filename.split('.')[1] if (file_extension not in ['csdf', 'json']): return [f'Expecting a .csdf or .json file, found .{file_extension}.', True, existing_data] if (file_extension != 'csdf'): raise PreventUpdate trigger_id = ctx.triggered[0]['prop_id'].split('.')[0] if (trigger_id == 'upload-spectrum-local'): content_string = csdm_upload_content if (trigger_id == 'upload-from-graph'): content_string = csdm_upload_content_graph content_string = content_string.split(',')[1] decoded = base64.b64decode(content_string) (success, data, error_message) = load_json(decoded) if success: return [, False, data] else: return [f'Invalid JSON file. {error_message}', True, existing_data]<|docstring|>Update a local CSDM file.<|endoftext|>
7e5c8637d1cd74369df9ac6a9672e210a34b940444379b3ec4eebc79c5d0d1bd
def load_json(content): 'Load a JSON file. Return a list with members\n - Success: True if file is read correctly,\n - Data: File content is success, otherwise an empty string,\n - message: An error message when JSON file load fails, else an empty string.\n ' content = str(content, encoding='UTF-8') try: data = cp.loads(content).to_dict() return (True, data, '') except Exception as e: return (False, '', e)
Load a JSON file. Return a list with members - Success: True if file is read correctly, - Data: File content is success, otherwise an empty string, - message: An error message when JSON file load fails, else an empty string.
app/importer.py
load_json
mVenetos97/mrsimulator-app
0
python
def load_json(content): 'Load a JSON file. Return a list with members\n - Success: True if file is read correctly,\n - Data: File content is success, otherwise an empty string,\n - message: An error message when JSON file load fails, else an empty string.\n ' content = str(content, encoding='UTF-8') try: data = cp.loads(content).to_dict() return (True, data, ) except Exception as e: return (False, , e)
def load_json(content): 'Load a JSON file. Return a list with members\n - Success: True if file is read correctly,\n - Data: File content is success, otherwise an empty string,\n - message: An error message when JSON file load fails, else an empty string.\n ' content = str(content, encoding='UTF-8') try: data = cp.loads(content).to_dict() return (True, data, ) except Exception as e: return (False, , e)<|docstring|>Load a JSON file. Return a list with members - Success: True if file is read correctly, - Data: File content is success, otherwise an empty string, - message: An error message when JSON file load fails, else an empty string.<|endoftext|>
87fb9f5a7c82ebf256325deffffa536a8798faad27bca7a2953d86e72167effe
@app.callback([*[Output((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[Output(fields[j]['id'], 'active') for j in range(len(fields))]], [Input(fields[j]['id'], 'n_clicks') for j in range(len(fields))], [*[State((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[State(fields[j]['id'], 'active') for j in range(len(fields))]]) def toggle_collapsible_input(n1, n2, n3, c1, c2, c3, a1, a2, a3): 'Toggle collapsible widget form url and upload-a-file button fields.' if (n1 is n2 is n3 is None): return [False, True, False, False, True, False] ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if (button_id == fields[0]['id']): if (not c1): return [(not c1), False, False, (not a1), False, False] return [c1, False, False, a1, False, False] if (button_id == fields[1]['id']): if (not c2): return [False, (not c2), False, False, (not a2), False] return [False, c2, False, False, a2, False] if (button_id == fields[2]['id']): if (not c3): return [False, False, (not c3), False, False, (not a3)] return [False, False, c3, False, False, a3]
Toggle collapsible widget form url and upload-a-file button fields.
app/importer.py
toggle_collapsible_input
mVenetos97/mrsimulator-app
0
python
@app.callback([*[Output((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[Output(fields[j]['id'], 'active') for j in range(len(fields))]], [Input(fields[j]['id'], 'n_clicks') for j in range(len(fields))], [*[State((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[State(fields[j]['id'], 'active') for j in range(len(fields))]]) def toggle_collapsible_input(n1, n2, n3, c1, c2, c3, a1, a2, a3): if (n1 is n2 is n3 is None): return [False, True, False, False, True, False] ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if (button_id == fields[0]['id']): if (not c1): return [(not c1), False, False, (not a1), False, False] return [c1, False, False, a1, False, False] if (button_id == fields[1]['id']): if (not c2): return [False, (not c2), False, False, (not a2), False] return [False, c2, False, False, a2, False] if (button_id == fields[2]['id']): if (not c3): return [False, False, (not c3), False, False, (not a3)] return [False, False, c3, False, False, a3]
@app.callback([*[Output((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[Output(fields[j]['id'], 'active') for j in range(len(fields))]], [Input(fields[j]['id'], 'n_clicks') for j in range(len(fields))], [*[State((fields[j]['id'] + '-collapse'), 'is_open') for j in range(len(fields))], *[State(fields[j]['id'], 'active') for j in range(len(fields))]]) def toggle_collapsible_input(n1, n2, n3, c1, c2, c3, a1, a2, a3): if (n1 is n2 is n3 is None): return [False, True, False, False, True, False] ctx = dash.callback_context if (not ctx.triggered): raise PreventUpdate else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if (button_id == fields[0]['id']): if (not c1): return [(not c1), False, False, (not a1), False, False] return [c1, False, False, a1, False, False] if (button_id == fields[1]['id']): if (not c2): return [False, (not c2), False, False, (not a2), False] return [False, c2, False, False, a2, False] if (button_id == fields[2]['id']): if (not c3): return [False, False, (not c3), False, False, (not a3)] return [False, False, c3, False, False, a3]<|docstring|>Toggle collapsible widget form url and upload-a-file button fields.<|endoftext|>
0fe3aaa8705d1e25a313b64c3ba051298689a06df075ecfb59a1d71efbfdbe07
def kill(self, msg): 'Break out of the engine loop.' return 'kill'
Break out of the engine loop.
distarray/mpi_engine.py
kill
jrodgers01d/distarray
66
python
def kill(self, msg): return 'kill'
def kill(self, msg): return 'kill'<|docstring|>Break out of the engine loop.<|endoftext|>
e3560f00dc869daebc916c5cfca3ee53c51e6fde17a51145b06fd787c4416e19
def test_path(): 'Verify the output of `calc_addition` function' output = set_constants.get_paths() assert (output == 'data/flattened_csvs')
Verify the output of `calc_addition` function
test_1.py
test_path
RGGH/exa-data
0
python
def test_path(): output = set_constants.get_paths() assert (output == 'data/flattened_csvs')
def test_path(): output = set_constants.get_paths() assert (output == 'data/flattened_csvs')<|docstring|>Verify the output of `calc_addition` function<|endoftext|>
a589f4e1a146fbc87d242f94f1657603fb5c9bfe334b34ed3889cd3dcd07f936
def makeNoseConfig(env): 'Load a Config, pre-filled with user config files if any are\n found.\n ' cfg_files = all_config_files() manager = DefaultPluginManager() return Config(env=env, files=cfg_files, plugins=manager)
Load a Config, pre-filled with user config files if any are found.
ipython_nose.py
makeNoseConfig
datacamp/ipython_nose
1
python
def makeNoseConfig(env): 'Load a Config, pre-filled with user config files if any are\n found.\n ' cfg_files = all_config_files() manager = DefaultPluginManager() return Config(env=env, files=cfg_files, plugins=manager)
def makeNoseConfig(env): 'Load a Config, pre-filled with user config files if any are\n found.\n ' cfg_files = all_config_files() manager = DefaultPluginManager() return Config(env=env, files=cfg_files, plugins=manager)<|docstring|>Load a Config, pre-filled with user config files if any are found.<|endoftext|>
3f8f4fe0216bc413ed26330994379b13194100d1fc05ad4567622ef6b5b1901e
def __init__(self, type, storage, path, filename, outpath, pageimage, jsonfilename, tablefilename, customfilename, formulartype): 'Constructor containg all the parameters for processing a file\n\n Args:\n type ([int]): Type of declaration (welth of interest)\n storage ([type]): Type of storage: azure or something else\n path ([type]): Relative path to the file to process\n filename ([type]): Name of the file to be processed\n outpath ([type]): Relative path where the output files should be saved\n pageimage ([type]): Relative path where the page images should be saved\n jsonfilename ([type]): Relative path where the JSON file obtained from OCR service should be saved\n tablefilename ([type]): Relative path where the JSON file obtained from \n processing the file obtained from OCR services should be saved\n customfilename ([str]): Relative path where the custom JSON file will be saved \n (obtained after processing the data from OCR service)\n formulartype ([int]): Type of the formular (structure)\n ' self.type = type self.storage = storage self.path = path self.filename = filename self.out_path = outpath self.page_image_filename = pageimage self.ocr_json_filename = jsonfilename self.ocr_table_json_filename = tablefilename self.ocr_custom_json_filename = customfilename self.formular_type = formulartype
Constructor containg all the parameters for processing a file Args: type ([int]): Type of declaration (welth of interest) storage ([type]): Type of storage: azure or something else path ([type]): Relative path to the file to process filename ([type]): Name of the file to be processed outpath ([type]): Relative path where the output files should be saved pageimage ([type]): Relative path where the page images should be saved jsonfilename ([type]): Relative path where the JSON file obtained from OCR service should be saved tablefilename ([type]): Relative path where the JSON file obtained from processing the file obtained from OCR services should be saved customfilename ([str]): Relative path where the custom JSON file will be saved (obtained after processing the data from OCR service) formulartype ([int]): Type of the formular (structure)
NewDeclarationInQueue/preprocess/document_location.py
__init__
it-pebune/ani-research-data-extraction
0
python
def __init__(self, type, storage, path, filename, outpath, pageimage, jsonfilename, tablefilename, customfilename, formulartype): 'Constructor containg all the parameters for processing a file\n\n Args:\n type ([int]): Type of declaration (welth of interest)\n storage ([type]): Type of storage: azure or something else\n path ([type]): Relative path to the file to process\n filename ([type]): Name of the file to be processed\n outpath ([type]): Relative path where the output files should be saved\n pageimage ([type]): Relative path where the page images should be saved\n jsonfilename ([type]): Relative path where the JSON file obtained from OCR service should be saved\n tablefilename ([type]): Relative path where the JSON file obtained from \n processing the file obtained from OCR services should be saved\n customfilename ([str]): Relative path where the custom JSON file will be saved \n (obtained after processing the data from OCR service)\n formulartype ([int]): Type of the formular (structure)\n ' self.type = type self.storage = storage self.path = path self.filename = filename self.out_path = outpath self.page_image_filename = pageimage self.ocr_json_filename = jsonfilename self.ocr_table_json_filename = tablefilename self.ocr_custom_json_filename = customfilename self.formular_type = formulartype
def __init__(self, type, storage, path, filename, outpath, pageimage, jsonfilename, tablefilename, customfilename, formulartype): 'Constructor containg all the parameters for processing a file\n\n Args:\n type ([int]): Type of declaration (welth of interest)\n storage ([type]): Type of storage: azure or something else\n path ([type]): Relative path to the file to process\n filename ([type]): Name of the file to be processed\n outpath ([type]): Relative path where the output files should be saved\n pageimage ([type]): Relative path where the page images should be saved\n jsonfilename ([type]): Relative path where the JSON file obtained from OCR service should be saved\n tablefilename ([type]): Relative path where the JSON file obtained from \n processing the file obtained from OCR services should be saved\n customfilename ([str]): Relative path where the custom JSON file will be saved \n (obtained after processing the data from OCR service)\n formulartype ([int]): Type of the formular (structure)\n ' self.type = type self.storage = storage self.path = path self.filename = filename self.out_path = outpath self.page_image_filename = pageimage self.ocr_json_filename = jsonfilename self.ocr_table_json_filename = tablefilename self.ocr_custom_json_filename = customfilename self.formular_type = formulartype<|docstring|>Constructor containg all the parameters for processing a file Args: type ([int]): Type of declaration (welth of interest) storage ([type]): Type of storage: azure or something else path ([type]): Relative path to the file to process filename ([type]): Name of the file to be processed outpath ([type]): Relative path where the output files should be saved pageimage ([type]): Relative path where the page images should be saved jsonfilename ([type]): Relative path where the JSON file obtained from OCR service should be saved tablefilename ([type]): Relative path where the JSON file obtained from processing the file obtained from OCR services should be saved customfilename ([str]): Relative path where the custom JSON file will be saved (obtained after processing the data from OCR service) formulartype ([int]): Type of the formular (structure)<|endoftext|>
0125af6fe97dc54ff48558311ff9c1572dac6f54ae7921cac8a392b10181234b
def __init__(self, live_dir=None, live_retain=None, schedule=None, scheduled_dir=None, scheduled_retain=None): 'SettingsReportsSettings - a model defined in Swagger' self._live_dir = None self._live_retain = None self._schedule = None self._scheduled_dir = None self._scheduled_retain = None self.discriminator = None self.live_dir = live_dir self.live_retain = live_retain self.schedule = schedule self.scheduled_dir = scheduled_dir self.scheduled_retain = scheduled_retain
SettingsReportsSettings - a model defined in Swagger
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
__init__
mohitjain97/isilon_sdk_python
24
python
def __init__(self, live_dir=None, live_retain=None, schedule=None, scheduled_dir=None, scheduled_retain=None): self._live_dir = None self._live_retain = None self._schedule = None self._scheduled_dir = None self._scheduled_retain = None self.discriminator = None self.live_dir = live_dir self.live_retain = live_retain self.schedule = schedule self.scheduled_dir = scheduled_dir self.scheduled_retain = scheduled_retain
def __init__(self, live_dir=None, live_retain=None, schedule=None, scheduled_dir=None, scheduled_retain=None): self._live_dir = None self._live_retain = None self._schedule = None self._scheduled_dir = None self._scheduled_retain = None self.discriminator = None self.live_dir = live_dir self.live_retain = live_retain self.schedule = schedule self.scheduled_dir = scheduled_dir self.scheduled_retain = scheduled_retain<|docstring|>SettingsReportsSettings - a model defined in Swagger<|endoftext|>
93266d45b516cbf75d4f27881ecc1b6bd6c77940a86f1e7eaf006be7528c684c
@property def live_dir(self): 'Gets the live_dir of this SettingsReportsSettings. # noqa: E501\n\n The directory on /ifs where manual or live reports will be placed. # noqa: E501\n\n :return: The live_dir of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._live_dir
Gets the live_dir of this SettingsReportsSettings. # noqa: E501 The directory on /ifs where manual or live reports will be placed. # noqa: E501 :return: The live_dir of this SettingsReportsSettings. # noqa: E501 :rtype: str
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
live_dir
mohitjain97/isilon_sdk_python
24
python
@property def live_dir(self): 'Gets the live_dir of this SettingsReportsSettings. # noqa: E501\n\n The directory on /ifs where manual or live reports will be placed. # noqa: E501\n\n :return: The live_dir of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._live_dir
@property def live_dir(self): 'Gets the live_dir of this SettingsReportsSettings. # noqa: E501\n\n The directory on /ifs where manual or live reports will be placed. # noqa: E501\n\n :return: The live_dir of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._live_dir<|docstring|>Gets the live_dir of this SettingsReportsSettings. # noqa: E501 The directory on /ifs where manual or live reports will be placed. # noqa: E501 :return: The live_dir of this SettingsReportsSettings. # noqa: E501 :rtype: str<|endoftext|>
9825acb9bd2d6eca4d3589a75c7b5b3e9c6c7a48a0a6a40b6d7b763a34b1318d
@live_dir.setter def live_dir(self, live_dir): 'Sets the live_dir of this SettingsReportsSettings.\n\n The directory on /ifs where manual or live reports will be placed. # noqa: E501\n\n :param live_dir: The live_dir of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (live_dir is None): raise ValueError('Invalid value for `live_dir`, must not be `None`') self._live_dir = live_dir
Sets the live_dir of this SettingsReportsSettings. The directory on /ifs where manual or live reports will be placed. # noqa: E501 :param live_dir: The live_dir of this SettingsReportsSettings. # noqa: E501 :type: str
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
live_dir
mohitjain97/isilon_sdk_python
24
python
@live_dir.setter def live_dir(self, live_dir): 'Sets the live_dir of this SettingsReportsSettings.\n\n The directory on /ifs where manual or live reports will be placed. # noqa: E501\n\n :param live_dir: The live_dir of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (live_dir is None): raise ValueError('Invalid value for `live_dir`, must not be `None`') self._live_dir = live_dir
@live_dir.setter def live_dir(self, live_dir): 'Sets the live_dir of this SettingsReportsSettings.\n\n The directory on /ifs where manual or live reports will be placed. # noqa: E501\n\n :param live_dir: The live_dir of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (live_dir is None): raise ValueError('Invalid value for `live_dir`, must not be `None`') self._live_dir = live_dir<|docstring|>Sets the live_dir of this SettingsReportsSettings. The directory on /ifs where manual or live reports will be placed. # noqa: E501 :param live_dir: The live_dir of this SettingsReportsSettings. # noqa: E501 :type: str<|endoftext|>
d0e078cacfeb4bd31a869fb85a5775679c5f7d56ef15ba5027ce5c683d6fcfd0
@property def live_retain(self): 'Gets the live_retain of this SettingsReportsSettings. # noqa: E501\n\n The number of manual reports to keep. # noqa: E501\n\n :return: The live_retain of this SettingsReportsSettings. # noqa: E501\n :rtype: int\n ' return self._live_retain
Gets the live_retain of this SettingsReportsSettings. # noqa: E501 The number of manual reports to keep. # noqa: E501 :return: The live_retain of this SettingsReportsSettings. # noqa: E501 :rtype: int
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
live_retain
mohitjain97/isilon_sdk_python
24
python
@property def live_retain(self): 'Gets the live_retain of this SettingsReportsSettings. # noqa: E501\n\n The number of manual reports to keep. # noqa: E501\n\n :return: The live_retain of this SettingsReportsSettings. # noqa: E501\n :rtype: int\n ' return self._live_retain
@property def live_retain(self): 'Gets the live_retain of this SettingsReportsSettings. # noqa: E501\n\n The number of manual reports to keep. # noqa: E501\n\n :return: The live_retain of this SettingsReportsSettings. # noqa: E501\n :rtype: int\n ' return self._live_retain<|docstring|>Gets the live_retain of this SettingsReportsSettings. # noqa: E501 The number of manual reports to keep. # noqa: E501 :return: The live_retain of this SettingsReportsSettings. # noqa: E501 :rtype: int<|endoftext|>
931f43984bbc95ca12b39801bf262a09e4f835d16564fedcb9988ab372cefa73
@live_retain.setter def live_retain(self, live_retain): 'Sets the live_retain of this SettingsReportsSettings.\n\n The number of manual reports to keep. # noqa: E501\n\n :param live_retain: The live_retain of this SettingsReportsSettings. # noqa: E501\n :type: int\n ' if (live_retain is None): raise ValueError('Invalid value for `live_retain`, must not be `None`') if ((live_retain is not None) and (live_retain < 1)): raise ValueError('Invalid value for `live_retain`, must be a value greater than or equal to `1`') self._live_retain = live_retain
Sets the live_retain of this SettingsReportsSettings. The number of manual reports to keep. # noqa: E501 :param live_retain: The live_retain of this SettingsReportsSettings. # noqa: E501 :type: int
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
live_retain
mohitjain97/isilon_sdk_python
24
python
@live_retain.setter def live_retain(self, live_retain): 'Sets the live_retain of this SettingsReportsSettings.\n\n The number of manual reports to keep. # noqa: E501\n\n :param live_retain: The live_retain of this SettingsReportsSettings. # noqa: E501\n :type: int\n ' if (live_retain is None): raise ValueError('Invalid value for `live_retain`, must not be `None`') if ((live_retain is not None) and (live_retain < 1)): raise ValueError('Invalid value for `live_retain`, must be a value greater than or equal to `1`') self._live_retain = live_retain
@live_retain.setter def live_retain(self, live_retain): 'Sets the live_retain of this SettingsReportsSettings.\n\n The number of manual reports to keep. # noqa: E501\n\n :param live_retain: The live_retain of this SettingsReportsSettings. # noqa: E501\n :type: int\n ' if (live_retain is None): raise ValueError('Invalid value for `live_retain`, must not be `None`') if ((live_retain is not None) and (live_retain < 1)): raise ValueError('Invalid value for `live_retain`, must be a value greater than or equal to `1`') self._live_retain = live_retain<|docstring|>Sets the live_retain of this SettingsReportsSettings. The number of manual reports to keep. # noqa: E501 :param live_retain: The live_retain of this SettingsReportsSettings. # noqa: E501 :type: int<|endoftext|>
7cbb7e033ffbd2332ec0ceb6c271606860aeec943eecd70b00cdff7d23f4fae9
@property def schedule(self): 'Gets the schedule of this SettingsReportsSettings. # noqa: E501\n\n The isidate schedule used to generate reports. # noqa: E501\n\n :return: The schedule of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._schedule
Gets the schedule of this SettingsReportsSettings. # noqa: E501 The isidate schedule used to generate reports. # noqa: E501 :return: The schedule of this SettingsReportsSettings. # noqa: E501 :rtype: str
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
schedule
mohitjain97/isilon_sdk_python
24
python
@property def schedule(self): 'Gets the schedule of this SettingsReportsSettings. # noqa: E501\n\n The isidate schedule used to generate reports. # noqa: E501\n\n :return: The schedule of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._schedule
@property def schedule(self): 'Gets the schedule of this SettingsReportsSettings. # noqa: E501\n\n The isidate schedule used to generate reports. # noqa: E501\n\n :return: The schedule of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._schedule<|docstring|>Gets the schedule of this SettingsReportsSettings. # noqa: E501 The isidate schedule used to generate reports. # noqa: E501 :return: The schedule of this SettingsReportsSettings. # noqa: E501 :rtype: str<|endoftext|>
3d89c4876a37445e805498899540133d8f627f7dea345765a1317b65010ad552
@schedule.setter def schedule(self, schedule): 'Sets the schedule of this SettingsReportsSettings.\n\n The isidate schedule used to generate reports. # noqa: E501\n\n :param schedule: The schedule of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (schedule is None): raise ValueError('Invalid value for `schedule`, must not be `None`') self._schedule = schedule
Sets the schedule of this SettingsReportsSettings. The isidate schedule used to generate reports. # noqa: E501 :param schedule: The schedule of this SettingsReportsSettings. # noqa: E501 :type: str
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
schedule
mohitjain97/isilon_sdk_python
24
python
@schedule.setter def schedule(self, schedule): 'Sets the schedule of this SettingsReportsSettings.\n\n The isidate schedule used to generate reports. # noqa: E501\n\n :param schedule: The schedule of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (schedule is None): raise ValueError('Invalid value for `schedule`, must not be `None`') self._schedule = schedule
@schedule.setter def schedule(self, schedule): 'Sets the schedule of this SettingsReportsSettings.\n\n The isidate schedule used to generate reports. # noqa: E501\n\n :param schedule: The schedule of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (schedule is None): raise ValueError('Invalid value for `schedule`, must not be `None`') self._schedule = schedule<|docstring|>Sets the schedule of this SettingsReportsSettings. The isidate schedule used to generate reports. # noqa: E501 :param schedule: The schedule of this SettingsReportsSettings. # noqa: E501 :type: str<|endoftext|>
503e2741f4a8b3d1fb14f5c610b654fa2a39c66307df4f24d9a3d3ae5fd81067
@property def scheduled_dir(self): 'Gets the scheduled_dir of this SettingsReportsSettings. # noqa: E501\n\n The directory on /ifs where schedule reports will be placed. # noqa: E501\n\n :return: The scheduled_dir of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._scheduled_dir
Gets the scheduled_dir of this SettingsReportsSettings. # noqa: E501 The directory on /ifs where schedule reports will be placed. # noqa: E501 :return: The scheduled_dir of this SettingsReportsSettings. # noqa: E501 :rtype: str
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
scheduled_dir
mohitjain97/isilon_sdk_python
24
python
@property def scheduled_dir(self): 'Gets the scheduled_dir of this SettingsReportsSettings. # noqa: E501\n\n The directory on /ifs where schedule reports will be placed. # noqa: E501\n\n :return: The scheduled_dir of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._scheduled_dir
@property def scheduled_dir(self): 'Gets the scheduled_dir of this SettingsReportsSettings. # noqa: E501\n\n The directory on /ifs where schedule reports will be placed. # noqa: E501\n\n :return: The scheduled_dir of this SettingsReportsSettings. # noqa: E501\n :rtype: str\n ' return self._scheduled_dir<|docstring|>Gets the scheduled_dir of this SettingsReportsSettings. # noqa: E501 The directory on /ifs where schedule reports will be placed. # noqa: E501 :return: The scheduled_dir of this SettingsReportsSettings. # noqa: E501 :rtype: str<|endoftext|>
95143ec16f740b83bcc3c4d0d01eb0b1251fd9a6f11034e98b0ae62fd655130d
@scheduled_dir.setter def scheduled_dir(self, scheduled_dir): 'Sets the scheduled_dir of this SettingsReportsSettings.\n\n The directory on /ifs where schedule reports will be placed. # noqa: E501\n\n :param scheduled_dir: The scheduled_dir of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (scheduled_dir is None): raise ValueError('Invalid value for `scheduled_dir`, must not be `None`') self._scheduled_dir = scheduled_dir
Sets the scheduled_dir of this SettingsReportsSettings. The directory on /ifs where schedule reports will be placed. # noqa: E501 :param scheduled_dir: The scheduled_dir of this SettingsReportsSettings. # noqa: E501 :type: str
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
scheduled_dir
mohitjain97/isilon_sdk_python
24
python
@scheduled_dir.setter def scheduled_dir(self, scheduled_dir): 'Sets the scheduled_dir of this SettingsReportsSettings.\n\n The directory on /ifs where schedule reports will be placed. # noqa: E501\n\n :param scheduled_dir: The scheduled_dir of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (scheduled_dir is None): raise ValueError('Invalid value for `scheduled_dir`, must not be `None`') self._scheduled_dir = scheduled_dir
@scheduled_dir.setter def scheduled_dir(self, scheduled_dir): 'Sets the scheduled_dir of this SettingsReportsSettings.\n\n The directory on /ifs where schedule reports will be placed. # noqa: E501\n\n :param scheduled_dir: The scheduled_dir of this SettingsReportsSettings. # noqa: E501\n :type: str\n ' if (scheduled_dir is None): raise ValueError('Invalid value for `scheduled_dir`, must not be `None`') self._scheduled_dir = scheduled_dir<|docstring|>Sets the scheduled_dir of this SettingsReportsSettings. The directory on /ifs where schedule reports will be placed. # noqa: E501 :param scheduled_dir: The scheduled_dir of this SettingsReportsSettings. # noqa: E501 :type: str<|endoftext|>
44f98d0f57fba6006555b23c0af3ea29ac1aece5edd2ba7b09edeace6aee738f
@property def scheduled_retain(self): 'Gets the scheduled_retain of this SettingsReportsSettings. # noqa: E501\n\n The number of scheduled reports to keep. # noqa: E501\n\n :return: The scheduled_retain of this SettingsReportsSettings. # noqa: E501\n :rtype: int\n ' return self._scheduled_retain
Gets the scheduled_retain of this SettingsReportsSettings. # noqa: E501 The number of scheduled reports to keep. # noqa: E501 :return: The scheduled_retain of this SettingsReportsSettings. # noqa: E501 :rtype: int
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
scheduled_retain
mohitjain97/isilon_sdk_python
24
python
@property def scheduled_retain(self): 'Gets the scheduled_retain of this SettingsReportsSettings. # noqa: E501\n\n The number of scheduled reports to keep. # noqa: E501\n\n :return: The scheduled_retain of this SettingsReportsSettings. # noqa: E501\n :rtype: int\n ' return self._scheduled_retain
@property def scheduled_retain(self): 'Gets the scheduled_retain of this SettingsReportsSettings. # noqa: E501\n\n The number of scheduled reports to keep. # noqa: E501\n\n :return: The scheduled_retain of this SettingsReportsSettings. # noqa: E501\n :rtype: int\n ' return self._scheduled_retain<|docstring|>Gets the scheduled_retain of this SettingsReportsSettings. # noqa: E501 The number of scheduled reports to keep. # noqa: E501 :return: The scheduled_retain of this SettingsReportsSettings. # noqa: E501 :rtype: int<|endoftext|>
b0475e454b8d74c361ca20aed7364a4a1c37310e7c3cb2d440e31a1398e66ad5
@scheduled_retain.setter def scheduled_retain(self, scheduled_retain): 'Sets the scheduled_retain of this SettingsReportsSettings.\n\n The number of scheduled reports to keep. # noqa: E501\n\n :param scheduled_retain: The scheduled_retain of this SettingsReportsSettings. # noqa: E501\n :type: int\n ' if (scheduled_retain is None): raise ValueError('Invalid value for `scheduled_retain`, must not be `None`') if ((scheduled_retain is not None) and (scheduled_retain < 1)): raise ValueError('Invalid value for `scheduled_retain`, must be a value greater than or equal to `1`') self._scheduled_retain = scheduled_retain
Sets the scheduled_retain of this SettingsReportsSettings. The number of scheduled reports to keep. # noqa: E501 :param scheduled_retain: The scheduled_retain of this SettingsReportsSettings. # noqa: E501 :type: int
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
scheduled_retain
mohitjain97/isilon_sdk_python
24
python
@scheduled_retain.setter def scheduled_retain(self, scheduled_retain): 'Sets the scheduled_retain of this SettingsReportsSettings.\n\n The number of scheduled reports to keep. # noqa: E501\n\n :param scheduled_retain: The scheduled_retain of this SettingsReportsSettings. # noqa: E501\n :type: int\n ' if (scheduled_retain is None): raise ValueError('Invalid value for `scheduled_retain`, must not be `None`') if ((scheduled_retain is not None) and (scheduled_retain < 1)): raise ValueError('Invalid value for `scheduled_retain`, must be a value greater than or equal to `1`') self._scheduled_retain = scheduled_retain
@scheduled_retain.setter def scheduled_retain(self, scheduled_retain): 'Sets the scheduled_retain of this SettingsReportsSettings.\n\n The number of scheduled reports to keep. # noqa: E501\n\n :param scheduled_retain: The scheduled_retain of this SettingsReportsSettings. # noqa: E501\n :type: int\n ' if (scheduled_retain is None): raise ValueError('Invalid value for `scheduled_retain`, must not be `None`') if ((scheduled_retain is not None) and (scheduled_retain < 1)): raise ValueError('Invalid value for `scheduled_retain`, must be a value greater than or equal to `1`') self._scheduled_retain = scheduled_retain<|docstring|>Sets the scheduled_retain of this SettingsReportsSettings. The number of scheduled reports to keep. # noqa: E501 :param scheduled_retain: The scheduled_retain of this SettingsReportsSettings. # noqa: E501 :type: int<|endoftext|>
137ba0f026bd6074febc2e7ebe1fec840dba70990f936f32b47eaf0fb048bd4a
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
Returns the model properties as a dict
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
to_dict
mohitjain97/isilon_sdk_python
24
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
Returns the string representation of the model
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
to_str
mohitjain97/isilon_sdk_python
24
python
def to_str(self): return pprint.pformat(self.to_dict())
def to_str(self): return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
For `print` and `pprint`
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
__repr__
mohitjain97/isilon_sdk_python
24
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
d416fcf913dc0fa27f3cb57d24d23d9df963709707155be01a0af40f3b266fad
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, SettingsReportsSettings)): return False return (self.__dict__ == other.__dict__)
Returns true if both objects are equal
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
__eq__
mohitjain97/isilon_sdk_python
24
python
def __eq__(self, other): if (not isinstance(other, SettingsReportsSettings)): return False return (self.__dict__ == other.__dict__)
def __eq__(self, other): if (not isinstance(other, SettingsReportsSettings)): return False return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
Returns true if both objects are not equal
isi_sdk_8_1_0/isi_sdk_8_1_0/models/settings_reports_settings.py
__ne__
mohitjain97/isilon_sdk_python
24
python
def __ne__(self, other): return (not (self == other))
def __ne__(self, other): return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
a9629e7eb6a2010710d1ef349914b677683ba423f82f06b08fd23ddf31095f44
def dump_empty(schema_or_field): 'Return a full marc21 record dict with empty values.' if isinstance(schema_or_field, (Schema,)): schema = schema_or_field return {k: dump_empty(v) for (k, v) in schema.fields.items()} if isinstance(schema_or_field, SchemaMeta): schema = schema_or_field() return {k: dump_empty(v) for (k, v) in schema.fields.items()} if isinstance(schema_or_field, fields.List): field = schema_or_field return [dump_empty(field.inner)] if isinstance(schema_or_field, NestedAttribute): field = schema_or_field return dump_empty(field.nested) return None
Return a full marc21 record dict with empty values.
invenio_records_marc21/ui/theme/deposit.py
dump_empty
philippgualdi/invenio-records-marc21
1
python
def dump_empty(schema_or_field): if isinstance(schema_or_field, (Schema,)): schema = schema_or_field return {k: dump_empty(v) for (k, v) in schema.fields.items()} if isinstance(schema_or_field, SchemaMeta): schema = schema_or_field() return {k: dump_empty(v) for (k, v) in schema.fields.items()} if isinstance(schema_or_field, fields.List): field = schema_or_field return [dump_empty(field.inner)] if isinstance(schema_or_field, NestedAttribute): field = schema_or_field return dump_empty(field.nested) return None
def dump_empty(schema_or_field): if isinstance(schema_or_field, (Schema,)): schema = schema_or_field return {k: dump_empty(v) for (k, v) in schema.fields.items()} if isinstance(schema_or_field, SchemaMeta): schema = schema_or_field() return {k: dump_empty(v) for (k, v) in schema.fields.items()} if isinstance(schema_or_field, fields.List): field = schema_or_field return [dump_empty(field.inner)] if isinstance(schema_or_field, NestedAttribute): field = schema_or_field return dump_empty(field.nested) return None<|docstring|>Return a full marc21 record dict with empty values.<|endoftext|>
dfc8e2aaccfadcf9a6943a6a9fc512c8d3446f2650368a030867e9d8879ff429
def empty_record(): 'Create an empty record.' record = dump_empty(Marc21RecordSchema) record['metadata'] = '<record> <leader>00000nam a2200000zca4500</leader></record>' record['is_published'] = False record['files'] = {'enabled': True} return record
Create an empty record.
invenio_records_marc21/ui/theme/deposit.py
empty_record
philippgualdi/invenio-records-marc21
1
python
def empty_record(): record = dump_empty(Marc21RecordSchema) record['metadata'] = '<record> <leader>00000nam a2200000zca4500</leader></record>' record['is_published'] = False record['files'] = {'enabled': True} return record
def empty_record(): record = dump_empty(Marc21RecordSchema) record['metadata'] = '<record> <leader>00000nam a2200000zca4500</leader></record>' record['is_published'] = False record['files'] = {'enabled': True} return record<|docstring|>Create an empty record.<|endoftext|>
9571f8b2952edc59921c7366745fcd3fd341b375c2562d2e33f1d8006c212605
def deposit_templates(): 'Retrieve from DB the tamplates for marc21 deposit form.' templates = current_records_marc21.templates_service.get_templates() if templates: return [template.to_dict() for template in templates] return []
Retrieve from DB the tamplates for marc21 deposit form.
invenio_records_marc21/ui/theme/deposit.py
deposit_templates
philippgualdi/invenio-records-marc21
1
python
def deposit_templates(): templates = current_records_marc21.templates_service.get_templates() if templates: return [template.to_dict() for template in templates] return []
def deposit_templates(): templates = current_records_marc21.templates_service.get_templates() if templates: return [template.to_dict() for template in templates] return []<|docstring|>Retrieve from DB the tamplates for marc21 deposit form.<|endoftext|>
059727b3709024970b8d1555b1fc485f099c62c7dc015de028cbb236553440b0
def deposit_config(**kwargs): 'Create an deposit configuration.' jsonschema = current_app.extensions['invenio-jsonschemas'] schema = {} if jsonschema: schema = jsonschema.get_schema(path='marc21/marc21-structure-v1.0.0.json') config = {**kwargs} config.setdefault('error', '') config.setdefault('schema', schema) config.setdefault('createUrl', '/api/marc21') return config
Create an deposit configuration.
invenio_records_marc21/ui/theme/deposit.py
deposit_config
philippgualdi/invenio-records-marc21
1
python
def deposit_config(**kwargs): jsonschema = current_app.extensions['invenio-jsonschemas'] schema = {} if jsonschema: schema = jsonschema.get_schema(path='marc21/marc21-structure-v1.0.0.json') config = {**kwargs} config.setdefault('error', ) config.setdefault('schema', schema) config.setdefault('createUrl', '/api/marc21') return config
def deposit_config(**kwargs): jsonschema = current_app.extensions['invenio-jsonschemas'] schema = {} if jsonschema: schema = jsonschema.get_schema(path='marc21/marc21-structure-v1.0.0.json') config = {**kwargs} config.setdefault('error', ) config.setdefault('schema', schema) config.setdefault('createUrl', '/api/marc21') return config<|docstring|>Create an deposit configuration.<|endoftext|>
e3a7947e665ceaf8fe188a776eba1a03ad5ae2dfd826665c199d2c65000d7956
def ensure_listitem_in_list(doc: DocCursor) -> DocCursor: "We expect all listitems to have a parent list. However, the parser has\n emitted lists followed by bare listitems. We'll place the listitem in the\n list that imemdiately precedes it." for li in doc.filter((lambda n: (n.node_type == 'listitem'))): parent = li.parent() prev_sibling = li.left_sibling() if (not parent): logger.warning('Root of %s is an li.', doc.policy.title_with_number) continue if (parent.node_type == 'list'): continue if (prev_sibling and (prev_sibling.node_type == 'list')): li.append_to(prev_sibling) return ensure_listitem_in_list(doc) logger.warning('Could not fix li in %s', doc.policy.title_with_number) return doc
We expect all listitems to have a parent list. However, the parser has emitted lists followed by bare listitems. We'll place the listitem in the list that imemdiately precedes it.
api/ombpdf/management/commands/migrate_documents.py
ensure_listitem_in_list
18F/omb-eregs
10
python
def ensure_listitem_in_list(doc: DocCursor) -> DocCursor: "We expect all listitems to have a parent list. However, the parser has\n emitted lists followed by bare listitems. We'll place the listitem in the\n list that imemdiately precedes it." for li in doc.filter((lambda n: (n.node_type == 'listitem'))): parent = li.parent() prev_sibling = li.left_sibling() if (not parent): logger.warning('Root of %s is an li.', doc.policy.title_with_number) continue if (parent.node_type == 'list'): continue if (prev_sibling and (prev_sibling.node_type == 'list')): li.append_to(prev_sibling) return ensure_listitem_in_list(doc) logger.warning('Could not fix li in %s', doc.policy.title_with_number) return doc
def ensure_listitem_in_list(doc: DocCursor) -> DocCursor: "We expect all listitems to have a parent list. However, the parser has\n emitted lists followed by bare listitems. We'll place the listitem in the\n list that imemdiately precedes it." for li in doc.filter((lambda n: (n.node_type == 'listitem'))): parent = li.parent() prev_sibling = li.left_sibling() if (not parent): logger.warning('Root of %s is an li.', doc.policy.title_with_number) continue if (parent.node_type == 'list'): continue if (prev_sibling and (prev_sibling.node_type == 'list')): li.append_to(prev_sibling) return ensure_listitem_in_list(doc) logger.warning('Could not fix li in %s', doc.policy.title_with_number) return doc<|docstring|>We expect all listitems to have a parent list. However, the parser has emitted lists followed by bare listitems. We'll place the listitem in the list that imemdiately precedes it.<|endoftext|>
341389eb5719431eeb7b0cfd6f555ec15c58959d10ab74b0ebcb55abdc2ef8b1
def ensure_section_has_heading(doc: DocCursor) -> DocCursor: 'We expect all sections to have a heading. Fill the missing data with\n placeholder text.' secs = list(doc.filter((lambda n: (n.node_type == 'sec')))) for sec in secs: children = list(sec.children()) if ((not children) or (children[0].node_type != 'heading')): sec.add_child('heading', insert_pos=0, text='--Missing Heading--', policy_id=doc.policy_id) return doc
We expect all sections to have a heading. Fill the missing data with placeholder text.
api/ombpdf/management/commands/migrate_documents.py
ensure_section_has_heading
18F/omb-eregs
10
python
def ensure_section_has_heading(doc: DocCursor) -> DocCursor: 'We expect all sections to have a heading. Fill the missing data with\n placeholder text.' secs = list(doc.filter((lambda n: (n.node_type == 'sec')))) for sec in secs: children = list(sec.children()) if ((not children) or (children[0].node_type != 'heading')): sec.add_child('heading', insert_pos=0, text='--Missing Heading--', policy_id=doc.policy_id) return doc
def ensure_section_has_heading(doc: DocCursor) -> DocCursor: 'We expect all sections to have a heading. Fill the missing data with\n placeholder text.' secs = list(doc.filter((lambda n: (n.node_type == 'sec')))) for sec in secs: children = list(sec.children()) if ((not children) or (children[0].node_type != 'heading')): sec.add_child('heading', insert_pos=0, text='--Missing Heading--', policy_id=doc.policy_id) return doc<|docstring|>We expect all sections to have a heading. Fill the missing data with placeholder text.<|endoftext|>
6ebd311e4b300b7d6acf79aaff452b670b4571efbad538b3fe0727c2b91ab2b9
def migrate_doc(doc: DocCursor) -> DocCursor: 'Apply all transforms to a given document. Save it and return.' for transform in transforms: doc = transform(doc) doc.nested_set_renumber(bulk_create=False) for node in doc.walk(): node.save() return doc
Apply all transforms to a given document. Save it and return.
api/ombpdf/management/commands/migrate_documents.py
migrate_doc
18F/omb-eregs
10
python
def migrate_doc(doc: DocCursor) -> DocCursor: for transform in transforms: doc = transform(doc) doc.nested_set_renumber(bulk_create=False) for node in doc.walk(): node.save() return doc
def migrate_doc(doc: DocCursor) -> DocCursor: for transform in transforms: doc = transform(doc) doc.nested_set_renumber(bulk_create=False) for node in doc.walk(): node.save() return doc<|docstring|>Apply all transforms to a given document. Save it and return.<|endoftext|>
015db49be32e2204a27d7e8dc3ab26f89986885bbf3d88865e5e86b5743e5e62
def test_import_opening_hours_period_comments(requests_mock, importer, places_response): 'Import openinghours_exception as OpeningHoursPeriod.comment' requests_mock.get(PLACES_URL, json=places_response) importer.import_features() f = Feature.objects.get(source_id=416) assert (f.opening_hours_periods.count() == 1) ohp = f.opening_hours_periods.first() assert (ohp.comment == pytest_regex('^Suomenlinna on Helsingin kaupunginosa.*'))
Import openinghours_exception as OpeningHoursPeriod.comment
features/importers/myhelsinki_places/tests/test_import_opening_hours.py
test_import_opening_hours_period_comments
City-of-Helsinki/ah
0
python
def test_import_opening_hours_period_comments(requests_mock, importer, places_response): requests_mock.get(PLACES_URL, json=places_response) importer.import_features() f = Feature.objects.get(source_id=416) assert (f.opening_hours_periods.count() == 1) ohp = f.opening_hours_periods.first() assert (ohp.comment == pytest_regex('^Suomenlinna on Helsingin kaupunginosa.*'))
def test_import_opening_hours_period_comments(requests_mock, importer, places_response): requests_mock.get(PLACES_URL, json=places_response) importer.import_features() f = Feature.objects.get(source_id=416) assert (f.opening_hours_periods.count() == 1) ohp = f.opening_hours_periods.first() assert (ohp.comment == pytest_regex('^Suomenlinna on Helsingin kaupunginosa.*'))<|docstring|>Import openinghours_exception as OpeningHoursPeriod.comment<|endoftext|>
9e5939068c485b6a6a299e2a0c8c3b43e87e1a9dd0ca7de1426ee32e477ce3db
def test_empty_opening_hours_are_not_imported(requests_mock, importer, places_response): 'Empty / null opening hours i.e. no opening hours' requests_mock.get(PLACES_URL, json=places_response) importer.import_features() f = Feature.objects.get(source_id=416) assert (f.opening_hours_periods.count() == 1) ohp = f.opening_hours_periods.first() assert (ohp.opening_hours.count() == 0) f = Feature.objects.get(source_id=2792) assert (f.opening_hours_periods.count() == 0)
Empty / null opening hours i.e. no opening hours
features/importers/myhelsinki_places/tests/test_import_opening_hours.py
test_empty_opening_hours_are_not_imported
City-of-Helsinki/ah
0
python
def test_empty_opening_hours_are_not_imported(requests_mock, importer, places_response): requests_mock.get(PLACES_URL, json=places_response) importer.import_features() f = Feature.objects.get(source_id=416) assert (f.opening_hours_periods.count() == 1) ohp = f.opening_hours_periods.first() assert (ohp.opening_hours.count() == 0) f = Feature.objects.get(source_id=2792) assert (f.opening_hours_periods.count() == 0)
def test_empty_opening_hours_are_not_imported(requests_mock, importer, places_response): requests_mock.get(PLACES_URL, json=places_response) importer.import_features() f = Feature.objects.get(source_id=416) assert (f.opening_hours_periods.count() == 1) ohp = f.opening_hours_periods.first() assert (ohp.opening_hours.count() == 0) f = Feature.objects.get(source_id=2792) assert (f.opening_hours_periods.count() == 0)<|docstring|>Empty / null opening hours i.e. no opening hours<|endoftext|>
33b6a88947dd114679a602af5e62badb4186e48a2cabc591e04d7c5060c71a27
def __init__(self, **kwargs): 'BuilderConfig for DCQA.\n\n Args:\n **kwargs: keyword arguments forwarded to super.\n ' super(DcqaConfig, self).__init__(**kwargs)
BuilderConfig for DCQA. Args: **kwargs: keyword arguments forwarded to super.
datasets/dcqa/dcqa.py
__init__
ExpressAI/DataLab
54
python
def __init__(self, **kwargs): 'BuilderConfig for DCQA.\n\n Args:\n **kwargs: keyword arguments forwarded to super.\n ' super(DcqaConfig, self).__init__(**kwargs)
def __init__(self, **kwargs): 'BuilderConfig for DCQA.\n\n Args:\n **kwargs: keyword arguments forwarded to super.\n ' super(DcqaConfig, self).__init__(**kwargs)<|docstring|>BuilderConfig for DCQA. Args: **kwargs: keyword arguments forwarded to super.<|endoftext|>
22d9ab17d5fa7defcb5992ecdc3809d5e5ed76696539d7f20cd923aaa1eff971
def _generate_examples(self, filepath): 'This function returns the examples in the raw (text) form.' logger.info('generating examples from = %s', filepath) key = 0 id_sample = 0 with open(filepath, encoding='utf-8') as f: dcqa = json.load(f) for data in dcqa: question = data['question'] context = data['context'] answer = data['answer'] AnchorSentenceID = data['AnchorSentenceID'] QuestionID = data['QuestionID'] (yield (key, {'id': str((id_sample + 1)), 'question': question, 'context': context, 'answer': answer, 'AnchorSentenceID': AnchorSentenceID, 'QuestionID': QuestionID})) key += 1
This function returns the examples in the raw (text) form.
datasets/dcqa/dcqa.py
_generate_examples
ExpressAI/DataLab
54
python
def _generate_examples(self, filepath): logger.info('generating examples from = %s', filepath) key = 0 id_sample = 0 with open(filepath, encoding='utf-8') as f: dcqa = json.load(f) for data in dcqa: question = data['question'] context = data['context'] answer = data['answer'] AnchorSentenceID = data['AnchorSentenceID'] QuestionID = data['QuestionID'] (yield (key, {'id': str((id_sample + 1)), 'question': question, 'context': context, 'answer': answer, 'AnchorSentenceID': AnchorSentenceID, 'QuestionID': QuestionID})) key += 1
def _generate_examples(self, filepath): logger.info('generating examples from = %s', filepath) key = 0 id_sample = 0 with open(filepath, encoding='utf-8') as f: dcqa = json.load(f) for data in dcqa: question = data['question'] context = data['context'] answer = data['answer'] AnchorSentenceID = data['AnchorSentenceID'] QuestionID = data['QuestionID'] (yield (key, {'id': str((id_sample + 1)), 'question': question, 'context': context, 'answer': answer, 'AnchorSentenceID': AnchorSentenceID, 'QuestionID': QuestionID})) key += 1<|docstring|>This function returns the examples in the raw (text) form.<|endoftext|>
f3582dfb01f1c77b6f202ce1554f47ecca32375dcad7e3113b044dd6ea35962f
@commands.command(name='roll', aliases=['r']) async def roll_command(self, ctx: Context, expression: str): '\n Classic dice roller.\n\n For `dice` type `{x}d{y}`.\n Where `x` - count of dices and `y` - edges of dices.\n\n Examples:\n `1d10` -> Rolls 1 dice with 10 edges. Returns a number from 1 to 10.\n `2d15` -> Rolls 2 dices with 15 edges. Returns two numbers from 1 to 15.\n `1d5+1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, to which 1 is added.\n `1d5-1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, from which 1 will be subtracted.\n ' if (len(expression) < 3): (await ctx.send('Too short!')) return expression_split = expression.split('d') count = int(expression_split[0]) sides_and_mod = re.split('(\\+|\\-|\\*|//|/)', expression_split[1]) sides = int(sides_and_mod[0]) mod_str = ' '.join(sides_and_mod[1:]) rolls = [] while (count > 0): roll = random.randint(1, sides) rolls.append(roll) count -= 1 rolls_mod = [] for roll in rolls: expr = (str(roll) + mod_str) rolls_mod.append(int(eval(expr))) message = 'Here you go\n' for (roll, roll_modded) in zip(rolls, rolls_mod): roll_string = f'{roll} [ {mod_str} ] ➞ **{roll_modded}**' message += (roll_string + '\n') roll_summ = 0 for roll in rolls_mod: roll_summ += roll message += f''' **Total**: {roll_summ} ''' embed = Embed() embed.set_author(name='Roll result') embed.description = message (await ctx.send(embed=embed))
Classic dice roller. For `dice` type `{x}d{y}`. Where `x` - count of dices and `y` - edges of dices. Examples: `1d10` -> Rolls 1 dice with 10 edges. Returns a number from 1 to 10. `2d15` -> Rolls 2 dices with 15 edges. Returns two numbers from 1 to 15. `1d5+1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, to which 1 is added. `1d5-1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, from which 1 will be subtracted.
incarn/exts/games/dice_roll.py
roll_command
SidVeld/Incarn
0
python
@commands.command(name='roll', aliases=['r']) async def roll_command(self, ctx: Context, expression: str): '\n Classic dice roller.\n\n For `dice` type `{x}d{y}`.\n Where `x` - count of dices and `y` - edges of dices.\n\n Examples:\n `1d10` -> Rolls 1 dice with 10 edges. Returns a number from 1 to 10.\n `2d15` -> Rolls 2 dices with 15 edges. Returns two numbers from 1 to 15.\n `1d5+1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, to which 1 is added.\n `1d5-1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, from which 1 will be subtracted.\n ' if (len(expression) < 3): (await ctx.send('Too short!')) return expression_split = expression.split('d') count = int(expression_split[0]) sides_and_mod = re.split('(\\+|\\-|\\*|//|/)', expression_split[1]) sides = int(sides_and_mod[0]) mod_str = ' '.join(sides_and_mod[1:]) rolls = [] while (count > 0): roll = random.randint(1, sides) rolls.append(roll) count -= 1 rolls_mod = [] for roll in rolls: expr = (str(roll) + mod_str) rolls_mod.append(int(eval(expr))) message = 'Here you go\n' for (roll, roll_modded) in zip(rolls, rolls_mod): roll_string = f'{roll} [ {mod_str} ] ➞ **{roll_modded}**' message += (roll_string + '\n') roll_summ = 0 for roll in rolls_mod: roll_summ += roll message += f' **Total**: {roll_summ} ' embed = Embed() embed.set_author(name='Roll result') embed.description = message (await ctx.send(embed=embed))
@commands.command(name='roll', aliases=['r']) async def roll_command(self, ctx: Context, expression: str): '\n Classic dice roller.\n\n For `dice` type `{x}d{y}`.\n Where `x` - count of dices and `y` - edges of dices.\n\n Examples:\n `1d10` -> Rolls 1 dice with 10 edges. Returns a number from 1 to 10.\n `2d15` -> Rolls 2 dices with 15 edges. Returns two numbers from 1 to 15.\n `1d5+1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, to which 1 is added.\n `1d5-1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, from which 1 will be subtracted.\n ' if (len(expression) < 3): (await ctx.send('Too short!')) return expression_split = expression.split('d') count = int(expression_split[0]) sides_and_mod = re.split('(\\+|\\-|\\*|//|/)', expression_split[1]) sides = int(sides_and_mod[0]) mod_str = ' '.join(sides_and_mod[1:]) rolls = [] while (count > 0): roll = random.randint(1, sides) rolls.append(roll) count -= 1 rolls_mod = [] for roll in rolls: expr = (str(roll) + mod_str) rolls_mod.append(int(eval(expr))) message = 'Here you go\n' for (roll, roll_modded) in zip(rolls, rolls_mod): roll_string = f'{roll} [ {mod_str} ] ➞ **{roll_modded}**' message += (roll_string + '\n') roll_summ = 0 for roll in rolls_mod: roll_summ += roll message += f' **Total**: {roll_summ} ' embed = Embed() embed.set_author(name='Roll result') embed.description = message (await ctx.send(embed=embed))<|docstring|>Classic dice roller. For `dice` type `{x}d{y}`. Where `x` - count of dices and `y` - edges of dices. Examples: `1d10` -> Rolls 1 dice with 10 edges. Returns a number from 1 to 10. `2d15` -> Rolls 2 dices with 15 edges. Returns two numbers from 1 to 15. `1d5+1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, to which 1 is added. `1d5-1` -> Rolls 1 dice with 5 edges. Returns a number from 1 to 5, from which 1 will be subtracted.<|endoftext|>
ac5835141e567ae2891f4256b566a44e48cdef7720f643560bd53d717ad59a08
@pytest.fixture(scope='module', name='project_id') def project_id_fixture() -> str: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return project_id()
NOTE: since pytest does not allow to use fixtures inside parametrizations, this trick allows to re-use the same function in a fixture with a same "fixture" name
packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py
project_id_fixture
colinRawlings/osparc-simcore
25
python
@pytest.fixture(scope='module', name='project_id') def project_id_fixture() -> str: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return project_id()
@pytest.fixture(scope='module', name='project_id') def project_id_fixture() -> str: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return project_id()<|docstring|>NOTE: since pytest does not allow to use fixtures inside parametrizations, this trick allows to re-use the same function in a fixture with a same "fixture" name<|endoftext|>
fdad293d07e1664f6ea08d5c4025d790f1a3ed8e725e606ee2ee6c859bd401bb
@pytest.fixture(scope='module', name='node_uuid') def node_uuid_fixture() -> str: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return node_uuid()
NOTE: since pytest does not allow to use fixtures inside parametrizations, this trick allows to re-use the same function in a fixture with a same "fixture" name
packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py
node_uuid_fixture
colinRawlings/osparc-simcore
25
python
@pytest.fixture(scope='module', name='node_uuid') def node_uuid_fixture() -> str: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return node_uuid()
@pytest.fixture(scope='module', name='node_uuid') def node_uuid_fixture() -> str: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return node_uuid()<|docstring|>NOTE: since pytest does not allow to use fixtures inside parametrizations, this trick allows to re-use the same function in a fixture with a same "fixture" name<|endoftext|>
b263f0c8002dee83c3fe89b5af097d02cd0a45303f3d7cea12b888cf6ed7967e
@pytest.fixture(scope='module', name='user_id') def user_id_fixture() -> int: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return user_id()
NOTE: since pytest does not allow to use fixtures inside parametrizations, this trick allows to re-use the same function in a fixture with a same "fixture" name
packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py
user_id_fixture
colinRawlings/osparc-simcore
25
python
@pytest.fixture(scope='module', name='user_id') def user_id_fixture() -> int: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return user_id()
@pytest.fixture(scope='module', name='user_id') def user_id_fixture() -> int: 'NOTE: since pytest does not allow to use fixtures inside parametrizations,\n this trick allows to re-use the same function in a fixture with a same "fixture" name' return user_id()<|docstring|>NOTE: since pytest does not allow to use fixtures inside parametrizations, this trick allows to re-use the same function in a fixture with a same "fixture" name<|endoftext|>