function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def RecoverDevice(device, denylist, should_reboot=lambda device: True): if device_status.IsDenylisted(device.adb.GetDeviceSerial(), denylist): logger.debug('%s is denylisted, skipping recovery.', str(device)) return if device.adb.GetState() == 'unauthorized' and TryAuth(device): logger.info('Successfully authed device %s!', str(device)) return if should_reboot(device): should_restore_root = device.HasRoot() try: device.WaitUntilFullyBooted(retries=0) except (device_errors.CommandTimeoutError, device_errors.CommandFailedError, device_errors.DeviceUnreachableError): logger.exception( 'Failure while waiting for %s. ' 'Attempting to recover.', str(device)) try: try: device.Reboot(block=False, timeout=5, retries=0) except device_errors.CommandTimeoutError: logger.warning( 'Timed out while attempting to reboot %s normally.' 'Attempting alternative reboot.', str(device)) # The device drops offline before we can grab the exit code, so # we don't check for status. try: device.adb.Root() finally: # We are already in a failure mode, attempt to reboot regardless of # what device.adb.Root() returns. If the sysrq reboot fails an # exception willbe thrown at that level. device.adb.Shell( 'echo b > /proc/sysrq-trigger', expect_status=None, timeout=5, retries=0) except (device_errors.CommandFailedError, device_errors.DeviceUnreachableError): logger.exception('Failed to reboot %s.', str(device)) if denylist: denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_failure') except device_errors.CommandTimeoutError: logger.exception('Timed out while rebooting %s.', str(device)) if denylist: denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout') try: device.WaitUntilFullyBooted( retries=0, timeout=device.REBOOT_DEFAULT_TIMEOUT) if should_restore_root: device.EnableRoot() except (device_errors.CommandFailedError, device_errors.DeviceUnreachableError): logger.exception('Failure while waiting for %s.', str(device)) if denylist: denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_failure') except device_errors.CommandTimeoutError: logger.exception('Timed out while waiting for %s.', str(device)) if denylist: denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def main(): parser = argparse.ArgumentParser() logging_common.AddLoggingArguments(parser) script_common.AddEnvironmentArguments(parser) parser.add_argument('--denylist-file', help='Device denylist JSON file.') parser.add_argument( '--known-devices-file', action='append', default=[], dest='known_devices_files', help='Path to known device lists.') parser.add_argument( '--enable-usb-reset', action='store_true', help='Reset USB if necessary.') args = parser.parse_args() logging_common.InitializeLogging(args) script_common.InitializeEnvironment(args) denylist = (device_denylist.Denylist(args.denylist_file) if args.denylist_file else None) expected_devices = device_status.GetExpectedDevices(args.known_devices_files) usb_devices = set(lsusb.get_android_devices()) devices = [ device_utils.DeviceUtils(s) for s in expected_devices.union(usb_devices) ] RecoverDevices(devices, denylist, enable_usb_reset=args.enable_usb_reset)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def getrc(defns,depth=1): from sys import getrefcount, _getframe f = _getframe(depth) G0 = f.f_globals L = f.f_locals if L is not G0: LL = [L] while 1: f = f.f_back G = f.f_globals L = f.f_locals if G is not G0 or G is L: break LL.append(L) L = {} LL.reverse() for l in LL: L.update(l) else: L = L.copy() G0 = G0.copy() return ' '.join([str(getrefcount(eval(x,L,G0))-1) for x in defns.split()])
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def testFpStr(self): # should give siz decimal places if less than 1. # if more, give up to seven sig figs from _rl_accel import fp_str assert fp_str(1,2,3)=='1 2 3' assert fp_str(1) == '1' assert fp_str(595.275574) == '595.2756' assert fp_str(59.5275574) == '59.52756' assert fp_str(5.95275574) == '5.952756'
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def test_AsciiBase85Decode(self): from _rl_accel import _AsciiBase85Decode assert _AsciiBase85Decode('6ul^K@;[2RDIdd%@f~>')=='Dragan Andric'
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def test_instanceEscapePDF(self): from _rl_accel import _instanceEscapePDF assert _instanceEscapePDF('', '(test)')=='\\(test\\)'
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def test_instanceStringWidth(self): from reportlab.pdfbase.pdfmetrics import registerFont, getFont, _fonts, unicode2T1 from reportlab.pdfbase.ttfonts import TTFont ttfn = 'Vera' t1fn = 'Times-Roman' registerFont(TTFont(ttfn, "Vera.ttf")) ttf = getFont(ttfn) t1f = getFont(t1fn) testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9)) enc='cp1252' senc = 'utf8' ts = 'ABCDEF\xce\x91\xce\xb2G' utext = 'ABCDEF\xce\x91\xce\xb2G'.decode(senc) fontSize = 12 defns="ttfn t1fn ttf t1f testCp1252 enc senc ts utext fontSize ttf.face ttf.face.charWidths ttf.face.defaultWidth t1f.widths t1f.encName t1f.substitutionFonts _fonts" rcv = getrc(defns) def tfunc(f,ts,fontSize,enc): w1 = f.stringWidth(ts,fontSize,enc) w2 = f._py_stringWidth(ts,fontSize,enc) assert abs(w1-w2)<1e-10,"f(%r).stringWidthU(%r,%s,%r)-->%r != f._py_stringWidth(...)-->%r" % (f,ts,fontSize,enc,w1,w2) tfunc(t1f,testCp1252,fontSize,enc) tfunc(t1f,ts,fontSize,senc) tfunc(t1f,utext,fontSize,senc) tfunc(ttf,ts,fontSize,senc) tfunc(ttf,testCp1252,fontSize,enc) tfunc(ttf,utext,fontSize,senc) rcc = checkrc(defns,rcv) assert not rcc, "rc diffs (%s)" % rcc
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def tfunc(f,ts): w1 = unicode2T1(ts,[f]+f.substitutionFonts) w2 = _py_unicode2T1(ts,[f]+f.substitutionFonts) assert w1==w2,"%r != %r" % (w1,w2)
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def test_sameFrag(self): from _rl_accel import _sameFrag class ABag: def __init__(self,**kwd): self.__dict__.update(kwd) def __str__(self): V=['%s=%r' % v for v in self.__dict__.items()] V.sort() return 'ABag(%s)' % ','.join(V) a=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa") b=ABag(fontName='Helvetica',fontSize=12, textColor="red", rise=0, underline=0, strike=0, link="aaaa") for name in ("fontName", "fontSize", "textColor", "rise", "underline", "strike", "link"): old = getattr(a,name) assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b) assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a) setattr(a,name,None) assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b) assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a) delattr(a,name) assert _sameFrag(a,b)==0, "_sameFrag(%s,%s)!=0" % (a,b) assert _sameFrag(b,a)==0, "_sameFrag(%s,%s)!=0" % (b,a) delattr(b,name) assert _sameFrag(a,b)==1, "_sameFrag(%s,%s)!=1" % (a,b) assert _sameFrag(b,a)==1, "_sameFrag(%s,%s)!=1" % (b,a) setattr(a,name,old) setattr(b,name,old)
mattjmorrison/ReportLab
[ 24, 24, 24, 3, 1302902528 ]
def main(): """Drives the main script behavior.""" script_dir = os.path.dirname(os.path.realpath(__file__)) for filename in os.listdir(script_dir): basename, extension = os.path.splitext(filename) if basename.startswith("Test") and extension == '.py': source_path = os.path.join(script_dir, filename) dest_path = source_path + ".park" sys.stdout.write("renaming {} to {}\n".format( source_path, dest_path)) os.rename(source_path, dest_path)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def test_execute(self, mock_hook, mock_object): op = WorkflowsCreateWorkflowOperator( task_id="test_task", workflow=WORKFLOW, workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.create_workflow.assert_called_once_with( workflow=WORKFLOW, workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) assert result == mock_object.to_dict.return_value
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute(self, mock_hook, mock_object): op = WorkflowsUpdateWorkflowOperator( task_id="test_task", workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, update_mask=UPDATE_MASK, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.get_workflow.assert_called_once_with( workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) mock_hook.return_value.update_workflow.assert_called_once_with( workflow=mock_hook.return_value.get_workflow.return_value, update_mask=UPDATE_MASK, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) assert result == mock_object.to_dict.return_value
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute( self, mock_hook,
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute(self, mock_hook, mock_object): workflow_mock = mock.MagicMock() workflow_mock.start_time = datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(minutes=5) mock_hook.return_value.list_workflows.return_value = [workflow_mock] op = WorkflowsListWorkflowsOperator( task_id="test_task", location=LOCATION, project_id=PROJECT_ID, filter_=FILTER_, order_by=ORDER_BY, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.list_workflows.assert_called_once_with( location=LOCATION, project_id=PROJECT_ID, filter_=FILTER_, order_by=ORDER_BY, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) assert result == [mock_object.to_dict.return_value]
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute(self, mock_hook, mock_object): op = WorkflowsGetWorkflowOperator( task_id="test_task", workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.get_workflow.assert_called_once_with( workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) assert result == mock_object.to_dict.return_value
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute(self, mock_xcom, mock_hook, mock_object): mock_hook.return_value.create_execution.return_value.name = "name/execution_id" op = WorkflowsCreateExecutionOperator( task_id="test_task", workflow_id=WORKFLOW_ID, execution=EXECUTION, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.create_execution.assert_called_once_with( workflow_id=WORKFLOW_ID, execution=EXECUTION, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) mock_xcom.assert_called_once_with({}, key="execution_id", value="execution_id") assert result == mock_object.to_dict.return_value
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute(self, mock_hook, mock_object): op = WorkflowsCancelExecutionOperator( task_id="test_task", workflow_id=WORKFLOW_ID, execution_id=EXECUTION_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.cancel_execution.assert_called_once_with( workflow_id=WORKFLOW_ID, execution_id=EXECUTION_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) assert result == mock_object.to_dict.return_value
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_execute(self, mock_hook, mock_object): execution_mock = mock.MagicMock() execution_mock.start_time = datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(minutes=5) mock_hook.return_value.list_executions.return_value = [execution_mock] op = WorkflowsListExecutionsOperator( task_id="test_task", workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) result = op.execute({}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.list_executions.assert_called_once_with( workflow_id=WORKFLOW_ID, location=LOCATION, project_id=PROJECT_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) assert result == [mock_object.to_dict.return_value]
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def __init__(self, initial_learning_rate): self.initial_learning_rate = initial_learning_rate
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __call__(self, step): raise NotImplementedError("Learning rate schedule must override __call__")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): raise NotImplementedError("Learning rate schedule must override get_config")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_config(cls, config): """Instantiates a `LearningRateSchedule` from its config. Args: config: Output of `get_config()`. Returns: A `LearningRateSchedule` instance. """ return cls(**config)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): return initial_learning_rate * decay_rate ^ (step / decay_steps)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): """Applies exponential decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'. """ super(ExponentialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, boundaries, values, name=None): """Piecewise constant from boundaries and interval values. Args: boundaries: A list of `Tensor`s or `int`s or `float`s with strictly increasing entries, and with all elements having the same type as the optimizer step. values: A list of `Tensor`s or `float`s or `int`s that specifies the values for the intervals defined by `boundaries`. It should have one more element than `boundaries`, and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Raises: ValueError: if the number of elements in the lists do not match. """ super(PiecewiseConstantDecay, self).__init__() if len(boundaries) != len(values) - 1: raise ValueError( "The length of boundaries should be 1 less than the length of values") self.boundaries = boundaries self.values = values self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "boundaries": self.boundaries, "values": self.values, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): step = min(step, decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): decay_steps = decay_steps * ceil(step / decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name=None): """Applies a polynomial decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The minimal end learning rate. power: A scalar `float32` or `float64` `Tensor` or a Python number. The power of the polynomial. Defaults to linear, 1.0. cycle: A boolean, whether or not it should cycle beyond decay_steps. name: String. Optional name of the operation. Defaults to 'PolynomialDecay'. """ super(PolynomialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.end_learning_rate = end_learning_rate self.power = power self.cycle = cycle self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "end_learning_rate": self.end_learning_rate, "power": self.power, "cycle": self.cycle, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * step / decay_step)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): """Applies inverse time decay to the initial learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'. """ super(InverseTimeDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): step = min(step, decay_steps) cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps)) decayed = (1 - alpha) * cosine_decay + alpha return initial_learning_rate * decayed
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, decay_steps, alpha=0.0, name=None): """Applies cosine decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of initial_learning_rate. name: String. Optional name of the operation. Defaults to 'CosineDecay'. """ super(CosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.alpha = alpha self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "alpha": self.alpha, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None): """Applies cosine decay with restarts to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of the initial_learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'. """ super(CosineDecayRestarts, self).__init__() self.initial_learning_rate = initial_learning_rate self.first_decay_steps = first_decay_steps self._t_mul = t_mul self._m_mul = m_mul self.alpha = alpha self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def compute_step(completed_fraction, geometric=False): """Helper for `cond` operation.""" if geometric: i_restart = math_ops.floor( math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) / math_ops.log(t_mul)) sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul) completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart else: i_restart = math_ops.floor(completed_fraction) completed_fraction -= i_restart return i_restart, completed_fraction
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "first_decay_steps": self.first_decay_steps, "t_mul": self._t_mul, "m_mul": self._m_mul, "alpha": self.alpha, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay) * cosine_decay + beta return initial_learning_rate * decayed
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, decay_steps, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies linear cosine decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'LinearCosineDecay'. """ super(LinearCosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.num_periods = num_periods self.alpha = alpha self.beta = beta self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "num_periods": self.num_periods, "alpha": self.alpha, "beta": self.beta, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta return initial_learning_rate * decayed
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__( self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies noisy linear cosine decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. initial_variance: initial variance for the noise. See computation above. variance_decay: decay for the noise's variance. See computation above. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'NoisyLinearCosineDecay'. """ super(NoisyLinearCosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.initial_variance = initial_variance self.variance_decay = variance_decay self.num_periods = num_periods self.alpha = alpha self.beta = beta self.name = name
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "initial_variance": self.initial_variance, "variance_decay": self.variance_decay, "num_periods": self.num_periods, "alpha": self.alpha, "beta": self.beta, "name": self.name }
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def serialize(learning_rate_schedule): return generic_utils.serialize_keras_object(learning_rate_schedule)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDistortOpWithStandardLensModel(self):
code-google-com/cortex-vfx
[ 4, 3, 4, 21, 1426431447 ]
def __init__(self, table_prefix=''): self.table_prefix = table_prefix
naparuba/shinken
[ 1129, 344, 1129, 221, 1290510176 ]
def create_insert_query(self, table, data): """Create a INSERT query in table with all data of data (a dict)""" query = u"INSERT INTO %s " % (self.table_prefix + table) props_str = u' (' values_str = u' (' i = 0 # f or the ',' problem... look like C here... for prop in data: i += 1 val = data[prop] # Boolean must be catch, because we want 0 or 1, not True or False if isinstance(val, bool): if val: val = 1 else: val = 0 # Get a string of the value val = self.stringify(val) if i == 1: props_str = props_str + u"%s " % prop values_str = values_str + u"'%s' " % val else: props_str = props_str + u", %s " % prop values_str = values_str + u", '%s' " % val # Ok we've got data, let's finish the query props_str = props_str + u' )' values_str = values_str + u' )' query = query + props_str + u' VALUES' + values_str return query
naparuba/shinken
[ 1129, 344, 1129, 221, 1290510176 ]
def fetchone(self): """Just get an entry""" return self.db_cursor.fetchone()
naparuba/shinken
[ 1129, 344, 1129, 221, 1290510176 ]
def generate_visit_struct_body(field_prefix, members): ret = "" if len(field_prefix): field_prefix = field_prefix + "." for argname, argentry, optional, structured in parse_args(members): if optional: ret += mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def generate_visit_struct(name, members): ret = mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def generate_visit_list(name, members): return mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def generate_visit_enum(name, members): return mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def generate_visit_union(name, members): ret = generate_visit_enum('%sKind' % name, members.keys()) ret += mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def generate_declaration(name, members, genlist=True): ret = mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def generate_decl_enum(name, members, genlist=True): return mcgen('''
KernelAnalysisPlatform/KlareDbg
[ 73, 9, 73, 1, 1466427520 ]
def should_skip(test): if test.startswith("native"): return True if test.startswith("viper"): return True
infinnovation/micropython
[ 2, 1, 2, 8, 1460816997 ]
def test_one_dim_odd_input(self): x = [1., 5., 3., 2., 4.] for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, axis=0) with self.test_session(): pct = sample_stats.percentile( x, q=q, interpolation=self._interpolation, axis=[0]) self.assertAllEqual((), pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval())
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_two_dim_odd_input_axis_0(self): x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, axis=0) with self.test_session(): # Get dim 1 with negative and positive indices. pct_neg_index = sample_stats.percentile( x, q=q, interpolation=self._interpolation, axis=[0]) pct_pos_index = sample_stats.percentile( x, q=q, interpolation=self._interpolation, axis=[0]) self.assertAllEqual((2,), pct_neg_index.get_shape()) self.assertAllEqual((2,), pct_pos_index.get_shape()) self.assertAllClose(expected_percentile, pct_neg_index.eval()) self.assertAllClose(expected_percentile, pct_pos_index.eval())
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_two_dim_even_input_and_keep_dims_true(self): x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, keepdims=True, axis=0) with self.test_session(): pct = sample_stats.percentile( x, q=q, interpolation=self._interpolation, keep_dims=True, axis=[0]) self.assertAllEqual((1, 2), pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval())
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_four_dimensional_input_and_keepdims(self): x = rng.rand(2, 3, 4, 5) for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]: expected_percentile = np.percentile( x, q=0.77, interpolation=self._interpolation, axis=axis, keepdims=True) with self.test_session(): pct = sample_stats.percentile( x, q=0.77, interpolation=self._interpolation, axis=axis, keep_dims=True) self.assertAllEqual(expected_percentile.shape, pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval())
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self): x = rng.rand(2, 3, 4, 5) x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None]) for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]: expected_percentile = np.percentile( x, q=0.77, interpolation=self._interpolation, axis=axis, keepdims=True) with self.test_session(): pct = sample_stats.percentile( x_ph, q=0.77, interpolation=self._interpolation, axis=axis, keep_dims=True) self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_one_dim_odd_input(self): x = [1., 5., 3., 2., 4.] for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation) with self.test_session(): pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation) self.assertAllEqual((), pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval())
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_invalid_interpolation_raises(self): x = [1., 5., 3., 2., 4.] with self.assertRaisesRegexp(ValueError, "interpolation"): sample_stats.percentile(x, q=0.5, interpolation="bad")
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def test_vector_q_raises_dynamic(self): x = [1., 5., 3., 2., 4.] q_ph = array_ops.placeholder(dtypes.float32) pct = sample_stats.percentile(x, q=q_ph, validate_args=True) with self.test_session(): with self.assertRaisesOpError("rank"): pct.eval(feed_dict={q_ph: [0.5]})
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def __init__(self, screen, folder, interval = None, autoplay = True): super(Animation, self).__init__(screen) if folder[:-1] != '/': folder = folder + '/'
marian42/pixelpi
[ 90, 15, 90, 3, 1440634832 ]
def load_frames(self): self.frames = [] i = 0 while os.path.isfile(self.folder + str(i) + '.bmp'): try: bmp = pygame.image.load(self.folder + str(i) + '.bmp') except Exception: print('Error loading ' + str(i) + '.bmp from ' + self.folder) raise pixel_array = pygame.PixelArray(bmp)
marian42/pixelpi
[ 90, 15, 90, 3, 1440634832 ]
def is_single_file(self): return os.path.isfile(self.folder + '0.bmp') and not os.path.isfile(self.folder + '1.bmp')
marian42/pixelpi
[ 90, 15, 90, 3, 1440634832 ]
def load_single(self): self.frames = [] bmp = pygame.image.load(self.folder + '0.bmp') framecount = bmp.get_height() / 16 pixel_array = pygame.PixelArray(bmp)
marian42/pixelpi
[ 90, 15, 90, 3, 1440634832 ]
def load_interval(self): cfg = ConfigParser.ConfigParser() cfg.read(self.folder + 'config.ini') return cfg.getint('animation', 'hold')
marian42/pixelpi
[ 90, 15, 90, 3, 1440634832 ]
def on_start(self): print('Starting ' + self.folder)
marian42/pixelpi
[ 90, 15, 90, 3, 1440634832 ]
def test_01_setting_signature(self): print("\nThe expected unit test environment is") print("1. TBD") self.assertEqual(gc.SETTING["SIGNATURE"],'LASS-SIM')
LinkItONEDevGroup/LASS
[ 153, 77, 153, 3, 1435306462 ]
def test_03_check_dir_exist(self): pass
LinkItONEDevGroup/LASS
[ 153, 77, 153, 3, 1435306462 ]
def setUp(self): self.request = Request('localhost', port=8182) self.gremlin = Gremlin('gizmo_testing') self.mapper = Mapper(self.request, self.gremlin) self.ioloop = asyncio.get_event_loop() super(BaseTests, self).setUp()
emehrkay/Gizmo
[ 20, 3, 20, 1, 1417416909 ]
def hash(binary): return hashlib.sha256(binary).hexdigest()
ultmaster/eoj3
[ 161, 29, 161, 27, 1489228488 ]
def main(): # Read the station lookup table col, dummy = ascii_dat_read("ATCA_stations.txt", delim=" ", doFloatCols=[2, 3]) statDict = {} for station, N, W in zip(col[1], col[2], col[3]): statDict[station] = (-W+1622.449, N) # Read the array configuration file col, dummy = ascii_dat_read("ATCA_configs.txt", delim=" ", doFloatCols=[2, 3, 4, 5, 6, 7]) for confName, A1, A2, A3, A4, A5, A6 in zip(col[1], col[2], col[3], col[4], col[5], col[6], col[7]): if A1=='': continue outFileName = "ATCA_%s.config" % confName FH = open(outFileName, "w") FH.write("#" + "-"*78 + "#\n") FH.write("#\n") FH.write("# Array definition file for the %s %s configuration.\n" % (telescope, confName)) FH.write("#\n") FH.write("#" + "-"*78 + "#\n") FH.write("\n") FH.write("# Name of the telescope\n") FH.write("telescope = %s\n" % telescope) FH.write("\n") FH.write("# Name of the configuration\n") FH.write("config = %s\n" % confName) FH.write("\n") FH.write("# Latitude of the array centre\n") FH.write("latitude_deg = %f\n" % latitude_deg) FH.write("\n") FH.write("# Antenna diameter\n") FH.write("diameter_m = %f\n" % diameter_m) FH.write("\n") FH.write("# Antenna coordinates (offset E, offset N)\n") FH.write("%f, %f\n" % (statDict[A1][0], statDict[A1][1])) FH.write("%f, %f\n" % (statDict[A2][0], statDict[A2][1])) FH.write("%f, %f\n" % (statDict[A3][0], statDict[A3][1])) FH.write("%f, %f\n" % (statDict[A4][0], statDict[A4][1])) FH.write("%f, %f\n" % (statDict[A5][0], statDict[A5][1])) FH.write("%f, %f\n" % (statDict[A6][0], statDict[A6][1])) FH.close()
crpurcell/friendlyVRI
[ 39, 12, 39, 2, 1488335416 ]
def request_oauth_token(request): # We'll require a refresh in the blue dot cache if BLUE_DOT_CACHE_KEY in request.session: del request.session[BLUE_DOT_CACHE_KEY] access_token_callback = settings.QUICKBOOKS['OAUTH_CALLBACK_URL'] if callable(access_token_callback): access_token_callback = access_token_callback(request) session = OAuth1Session(client_key=settings.QUICKBOOKS['CONSUMER_KEY'], client_secret=settings.QUICKBOOKS['CONSUMER_SECRET'], callback_uri=access_token_callback) response = session.fetch_request_token(REQUEST_TOKEN_URL) try: request_token = response['oauth_token'] request_token_secret = response['oauth_token_secret'] request.session['qb_oauth_token'] = request_token request.session['qb_oauth_token_secret'] = request_token_secret except: logger = logging.getLogger('quickbooks.views.request_oauth_token') logger.exception(("Couldn't extract oAuth parameters from token " + "request response. Response was '%s'"), response) raise return HttpResponseRedirect("%s?oauth_token=%s" % (AUTHORIZATION_URL, request_token))
grue/django-quickbooks-online
[ 20, 12, 20, 20, 1394156571 ]
def get_access_token(request): # [todo] - add doc string for get_access_token session = OAuth1Session(client_key=settings.QUICKBOOKS['CONSUMER_KEY'], client_secret=settings.QUICKBOOKS['CONSUMER_SECRET'], resource_owner_key=request.session['qb_oauth_token'], resource_owner_secret=request.session['qb_oauth_token_secret']) remote_response = session.parse_authorization_response('?{}'.format(request.META.get('QUERY_STRING'))) realm_id = remote_response['realmId'] data_source = remote_response['dataSource'] oauth_verifier = remote_response['oauth_verifier'] # [review] - Possible bug? This should be taken care of by session.parse_authorization_response session.auth.client.verifier = unicode(oauth_verifier) response = session.fetch_access_token(ACCESS_TOKEN_URL) # Delete any existing access tokens request.user.quickbookstoken_set.all().delete() token = QuickbooksToken.objects.create( user=request.user, access_token=response['oauth_token'], access_token_secret=response['oauth_token_secret'], realm_id=realm_id, data_source=data_source) # Cache blue dot menu try: request.session[BLUE_DOT_CACHE_KEY] = None blue_dot_menu(request) except AttributeError: raise Exception('The sessions framework must be installed for this ' + 'application to work.') # Let everyone else know we conneted qb_connected.send(None, token=token) return render_to_response('oauth_callback.html', {'complete_url': settings.QUICKBOOKS['ACCESS_COMPLETE_URL']})
grue/django-quickbooks-online
[ 20, 12, 20, 20, 1394156571 ]
def blue_dot_menu(request): """ Returns the blue dot menu. If possible a cached copy is returned. """ html = request.session.get(BLUE_DOT_CACHE_KEY) if not html: html = request.session[BLUE_DOT_CACHE_KEY] = \ HttpResponse(QuickbooksApi(request.user).app_menu()) return html
grue/django-quickbooks-online
[ 20, 12, 20, 20, 1394156571 ]
def test_simple_uri_comparision(uri): u1 = URI(b'http://abc.com:80/~smith/home.html') u2 = URI(b'http://ABC.com/%7Esmith/home.html') u3 = URI(b'http://ABC.com:/%7esmith/home.html') u4 = URI(b'http://ABC.com:/%7esmith/./home.html') u5 = URI(b'http://ABC.com:/%7esmith/foo/../home.html') assert u1 == u2 assert u2 == u3 assert u1 == u3 assert u1 == u4 assert u1 == u5
spaceone/httoop
[ 17, 5, 17, 5, 1365534138 ]
def test_request_uri_is_star(): pass
spaceone/httoop
[ 17, 5, 17, 5, 1365534138 ]
def test_invalid_uri_scheme(): pass
spaceone/httoop
[ 17, 5, 17, 5, 1365534138 ]
def test_normalized_uri_redirects(): pass
spaceone/httoop
[ 17, 5, 17, 5, 1365534138 ]
def setUp(self): self.settings = get_project_settings() self.settings.set('KAFKA_TOPIC_PREFIX', "demo_test") # set up redis self.redis_conn = redis.Redis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings['REDIS_DB']) try: self.redis_conn.info() except ConnectionError: print("Could not connect to Redis") # plugin is essential to functionality sys.exit(1) # clear out older test keys if any keys = self.redis_conn.keys("test-spider:*") for key in keys: self.redis_conn.delete(key) # set up kafka to consumer potential result self.consumer = KafkaConsumer( "demo_test.crawled_firehose", bootstrap_servers=self.settings['KAFKA_HOSTS'], group_id="demo-id", auto_commit_interval_ms=10, consumer_timeout_ms=5000, auto_offset_reset='earliest' ) time.sleep(1)
istresearch/scrapy-cluster
[ 1100, 320, 1100, 15, 1429045924 ]
def thread_func(): time.sleep(20) reactor.stop()
istresearch/scrapy-cluster
[ 1100, 320, 1100, 15, 1429045924 ]
def tearDown(self): keys = self.redis_conn.keys('stats:crawler:*:test-spider:*') keys = keys + self.redis_conn.keys('test-spider:*') for key in keys: self.redis_conn.delete(key) # if for some reason the tests fail, we end up falling behind on # the consumer for m in self.consumer: pass self.consumer.close()
istresearch/scrapy-cluster
[ 1100, 320, 1100, 15, 1429045924 ]
def __init__( self, plotly_name="showtickprefix", parent_name="layout.yaxis", **kwargs
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def format_subtables_for_results(table_ids): parts = [] deferred_racials = defaultdict(list) deferred_pr = [] for table in table_ids: parsed = parse_table_id(table) if parsed['racial']: key = parsed['table_type'] if parsed['puerto_rico']: key += 'PR' deferred_racials[key].append(parsed) elif parsed['puerto_rico']: deferred_pr.append(table) else: parts.append(table_link(table, generic_table_description(table))) for table in deferred_pr: parts.append(table_link(table, generic_table_description(table))) racial_label_tests = [ ('B', 'Detailed (by race)'), ('C', 'Simplified (by race)'), ('BPR', 'Detailed (by race) for Puerto Rico'), ('CPR', 'Simplified (by race) for Puerto Rico'), ] for test, label in racial_label_tests: try: iteration_parts = [] for table_dict in deferred_racials[test]: iteration_parts.append(table_link(table_dict['table_id'], table_dict['race'])) group_table_id = table_dict['table_id'] if iteration_parts: contents = ' / '.join(iteration_parts) iter_wrapper = """
censusreporter/censusreporter
[ 659, 135, 659, 96, 1369089960 ]
def setUp(self): super(TokensTest, self).setUp() TestClient.execute("""TRUNCATE auth_token""") self.test_token_data = {'description': 'Test token 1', 'scope_push': True, 'scope_pull': True}
InfraBox/infrabox
[ 44, 7, 44, 11, 1504426936 ]
def D_to_nu(D): """True anomaly from parabolic eccentric anomaly. Parameters ---------- D : ~astropy.units.Quantity Eccentric anomaly. Returns ------- nu : ~astropy.units.Quantity True anomaly. Notes ----- Taken from Farnocchia, Davide, Davide Bracali Cioci, and Andrea Milani. "Robust resolution of Kepler’s equation in all eccentricity regimes." Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34. """ return (D_to_nu_fast(D.to_value(u.rad)) * u.rad).to(D.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def nu_to_D(nu): """Parabolic eccentric anomaly from true anomaly. Parameters ---------- nu : ~astropy.units.Quantity True anomaly. Returns ------- D : ~astropy.units.Quantity Hyperbolic eccentric anomaly. Notes ----- Taken from Farnocchia, Davide, Davide Bracali Cioci, and Andrea Milani. "Robust resolution of Kepler’s equation in all eccentricity regimes." Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34. """ return (nu_to_D_fast(nu.to_value(u.rad)) * u.rad).to(nu.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def nu_to_E(nu, ecc): """Eccentric anomaly from true anomaly. .. versionadded:: 0.4.0 Parameters ---------- nu : ~astropy.units.Quantity True anomaly. ecc : ~astropy.units.Quantity Eccentricity. Returns ------- E : ~astropy.units.Quantity Eccentric anomaly. """ return (nu_to_E_fast(nu.to_value(u.rad), ecc.value) * u.rad).to(nu.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def nu_to_F(nu, ecc): """Hyperbolic eccentric anomaly from true anomaly. Parameters ---------- nu : ~astropy.units.Quantity True anomaly. ecc : ~astropy.units.Quantity Eccentricity (>1). Returns ------- F : ~astropy.units.Quantity Hyperbolic eccentric anomaly. Notes ----- Taken from Curtis, H. (2013). *Orbital mechanics for engineering students*. 167 """ return (nu_to_F_fast(nu.to_value(u.rad), ecc.value) * u.rad).to(nu.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def E_to_nu(E, ecc): """True anomaly from eccentric anomaly. .. versionadded:: 0.4.0 Parameters ---------- E : ~astropy.units.Quantity Eccentric anomaly. ecc : ~astropy.units.Quantity Eccentricity. Returns ------- nu : ~astropy.units.Quantity True anomaly. """ return (E_to_nu_fast(E.to_value(u.rad), ecc.value) * u.rad).to(E.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def F_to_nu(F, ecc): """True anomaly from hyperbolic eccentric anomaly. Parameters ---------- F : ~astropy.units.Quantity Hyperbolic eccentric anomaly. ecc : ~astropy.units.Quantity Eccentricity (>1). Returns ------- nu : ~astropy.units.Quantity True anomaly. """ return (F_to_nu_fast(F.to_value(u.rad), ecc.value) * u.rad).to(F.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def M_to_E(M, ecc): """Eccentric anomaly from mean anomaly. .. versionadded:: 0.4.0 Parameters ---------- M : ~astropy.units.Quantity Mean anomaly. ecc : ~astropy.units.Quantity Eccentricity. Returns ------- E : ~astropy.units.Quantity Eccentric anomaly. """ return (M_to_E_fast(M.to_value(u.rad), ecc.value) * u.rad).to(M.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def M_to_F(M, ecc): """Hyperbolic eccentric anomaly from mean anomaly. Parameters ---------- M : ~astropy.units.Quantity Mean anomaly. ecc : ~astropy.units.Quantity Eccentricity (>1). Returns ------- F : ~astropy.units.Quantity Hyperbolic eccentric anomaly. """ return (M_to_F_fast(M.to_value(u.rad), ecc.value) * u.rad).to(M.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]