function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def set_app_version(): ''' Declare the application version to juju ''' cmd = ['kubelet', '--version'] version = check_output(cmd) hookenv.application_version_set(version.split(b' v')[-1].rstrip())
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def notify_user_transient_status(): ''' Notify to the user we are in a transient state and the application is still converging. Potentially remotely, or we may be in a detached loop wait state ''' # During deployment the worker has to start kubelet without cluster dns # configured. If this is the first unit online in a service pool waiting # to self host the dns pod, and configure itself to query the dns service # declared in the kube-system namespace hookenv.status_set('waiting', 'Waiting for cluster DNS.')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def charm_status(kube_control): '''Update the status message with the current status of kubelet.''' update_kubelet_status()
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def send_data(tls): '''Send the data that is required to create a server certificate for this server.''' # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() # Create SANs that the tls layer will add to the server cert. sans = [ hookenv.unit_public_ip(), hookenv.unit_private_ip(), gethostname() ] # Create a path safe name by removing path characters from the unit name. certificate_name = hookenv.local_unit().replace('/', '_') # Request a server cert with this information. tls.request_server_cert(common_name, sans, certificate_name)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def watch_for_changes(kube_api, kube_control, cni): ''' Watch for configuration changes and signal if we need to restart the worker services ''' servers = get_kube_api_servers(kube_api) dns = kube_control.get_dns() cluster_cidr = cni.get_config()['cidr'] if (data_changed('kube-api-servers', servers) or data_changed('kube-dns', dns) or data_changed('cluster-cidr', cluster_cidr)): set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def start_worker(kube_api, kube_control, auth_control, cni): ''' Start kubelet using the provided API and DNS info.''' servers = get_kube_api_servers(kube_api) # Note that the DNS server doesn't necessarily exist at this point. We know # what its IP will eventually be, though, so we can go ahead and configure # kubelet with that info. This ensures that early pods are configured with # the correct DNS even though the server isn't ready yet. dns = kube_control.get_dns() cluster_cidr = cni.get_config()['cidr'] if cluster_cidr is None: hookenv.log('Waiting for cluster cidr.') return creds = db.get('credentials') data_changed('kube-control.creds', creds) # set --allow-privileged flag for kubelet set_privileged() create_config(random.choice(servers), creds) configure_kubelet(dns) configure_kube_proxy(servers, cluster_cidr) set_state('kubernetes-worker.config.created') restart_unit_services() update_kubelet_status() apply_node_labels() remove_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def configure_cni(cni): ''' Set worker configuration on the CNI relation. This lets the CNI subordinate know that we're the worker so it can respond accordingly. ''' cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def toggle_ingress_state(): ''' Ingress is a toggled state. Remove ingress.available if set when toggled ''' remove_state('kubernetes-worker.ingress.available')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def sdn_changed(): '''The Software Defined Network changed on the container so restart the kubernetes services.''' restart_unit_services() update_kubelet_status() remove_state('docker.sdn.configured')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def render_and_launch_ingress(): ''' If configuration has ingress daemon set enabled, launch the ingress load balancer and default http backend. Otherwise attempt deletion. ''' config = hookenv.config() # If ingress is enabled, launch the ingress controller if config.get('ingress'): launch_default_ingress_controller() else: hookenv.log('Deleting the http backend and ingress.') kubectl_manifest('delete', '/root/cdk/addons/default-http-backend.yaml') kubectl_manifest('delete', '/root/cdk/addons/ingress-daemon-set.yaml') # noqa hookenv.close_port(80) hookenv.close_port(443)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def apply_node_labels(): ''' Parse the labels configuration option and apply the labels to the node. ''' # scrub and try to format an array from the configuration option config = hookenv.config() user_labels = _parse_labels(config.get('labels')) # For diffing sake, iterate the previous label set if config.previous('labels'): previous_labels = _parse_labels(config.previous('labels')) hookenv.log('previous labels: {}'.format(previous_labels)) else: # this handles first time run if there is no previous labels config previous_labels = _parse_labels("") # Calculate label removal for label in previous_labels: if label not in user_labels: hookenv.log('Deleting node label {}'.format(label)) _apply_node_label(label, delete=True) # if the label is in user labels we do nothing here, it will get set # during the atomic update below. # Atomically set a label for label in user_labels: _apply_node_label(label, overwrite=True) # Set label for application name _apply_node_label('juju-application={}'.format(hookenv.service_name()), overwrite=True)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def extra_args_changed(): set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def docker_logins_changed(): config = hookenv.config() previous_logins = config.previous('docker-logins') logins = config['docker-logins'] logins = json.loads(logins) if previous_logins: previous_logins = json.loads(previous_logins) next_servers = {login['server'] for login in logins} previous_servers = {login['server'] for login in previous_logins} servers_to_logout = previous_servers - next_servers for server in servers_to_logout: cmd = ['docker', 'logout', server] subprocess.check_call(cmd) for login in logins: server = login['server'] username = login['username'] password = login['password'] cmd = ['docker', 'login', server, '-u', username, '-p', password] subprocess.check_call(cmd) set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def create_config(server, creds): '''Create a kubernetes configuration for the worker unit.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') # Create kubernetes configuration in the default location for ubuntu. create_kubeconfig('/home/ubuntu/.kube/config', server, ca, token=creds['client_token'], user='ubuntu') # Make the config dir readable by the ubuntu users so juju scp works. cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube'] check_call(cmd) # Create kubernetes configuration in the default location for root. create_kubeconfig(kubeclientconfig_path, server, ca, token=creds['client_token'], user='root') # Create kubernetes configuration for kubelet, and kube-proxy services. create_kubeconfig(kubeconfig_path, server, ca, token=creds['kubelet_token'], user='kubelet') create_kubeconfig(kubeproxyconfig_path, server, ca, token=creds['proxy_token'], user='kube-proxy')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def configure_kubernetes_service(service, base_args, extra_args_key): db = unitdata.kv() prev_args_key = 'kubernetes-worker.prev_args.' + service prev_args = db.get(prev_args_key) or {} extra_args = parse_extra_args(extra_args_key) args = {} for arg in prev_args: # remove previous args by setting to null args[arg] = 'null' for k, v in base_args.items(): args[k] = v for k, v in extra_args.items(): args[k] = v cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()] check_call(cmd) db.set(prev_args_key, args)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def configure_kube_proxy(api_servers, cluster_cidr): kube_proxy_opts = {} kube_proxy_opts['cluster-cidr'] = cluster_cidr kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path kube_proxy_opts['logtostderr'] = 'true' kube_proxy_opts['v'] = '0' kube_proxy_opts['master'] = random.choice(api_servers) if b'lxc' in check_output('virt-what', shell=True): kube_proxy_opts['conntrack-max-per-core'] = '0' configure_kubernetes_service('kube-proxy', kube_proxy_opts, 'proxy-extra-args')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def launch_default_ingress_controller(): ''' Launch the Kubernetes ingress controller & default backend (404) ''' context = {} context['arch'] = arch() addon_path = '/root/cdk/addons/{}' context['defaultbackend_image'] = \ "gcr.io/google_containers/defaultbackend:1.4" if arch() == 's390x': context['defaultbackend_image'] = \ "gcr.io/google_containers/defaultbackend-s390x:1.4" # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return # Render the ingress daemon set controller manifest context['ingress_image'] = \ "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13" if arch() == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" context['juju_application'] = hookenv.service_name() manifest = addon_path.format('ingress-daemon-set.yaml') render('ingress-daemon-set.yaml', manifest, context) hookenv.log('Creating the ingress daemon set.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) hookenv.open_port(443)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def get_kube_api_servers(kube_api): '''Return the kubernetes api server address and port for this relationship.''' hosts = [] # Iterate over every service from the relation object. for service in kube_api.services(): for unit in service['hosts']: hosts.append('https://{0}:{1}'.format(unit['hostname'], unit['port'])) return hosts
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def kubectl_success(*args): ''' Runs kubectl with the given args. Returns True if succesful, False if not. ''' try: kubectl(*args) return True except CalledProcessError: return False
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def initial_nrpe_config(nagios=None): set_state('nrpe-external-master.initial-config') update_nrpe_config(nagios)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def update_nrpe_config(unused=None): services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.add_init_service_checks(nrpe_setup, services, current_unit) nrpe_setup.write()
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def remove_nrpe_config(nagios=None): remove_state('nrpe-external-master.initial-config') # List of systemd services for which the checks will be removed services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon') # The current nrpe-external-master interface doesn't handle a lot of logic, # use the charm-helpers code for now. hostname = nrpe.get_nagios_hostname() nrpe_setup = nrpe.NRPE(hostname=hostname) for service in services: nrpe_setup.remove_check(shortname=service)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def on_config_allow_privileged_change(): """React to changed 'allow-privileged' config value. """ set_state('kubernetes-worker.restart-needed') remove_state('config.changed.allow-privileged')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def enable_gpu(): """Enable GPU usage on this node. """ config = hookenv.config() if config['allow-privileged'] == "false": hookenv.status_set( 'active', 'GPUs available. Set allow-privileged="auto" to enable.' ) return hookenv.log('Enabling gpu mode') try: # Not sure why this is necessary, but if you don't run this, k8s will # think that the node has 0 gpus (as shown by the output of # `kubectl get nodes -o yaml` check_call(['nvidia-smi']) except CalledProcessError as cpe: hookenv.log('Unable to communicate with the NVIDIA driver.') hookenv.log(cpe) return # Apply node labels _apply_node_label('gpu=true', overwrite=True) _apply_node_label('cuda=true', overwrite=True) set_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def disable_gpu(): """Disable GPU usage on this node. This handler fires when we're running in gpu mode, and then the operator sets allow-privileged="false". Since we can no longer run privileged containers, we need to disable gpu mode. """ hookenv.log('Disabling gpu mode') # Remove node labels _apply_node_label('gpu', delete=True) _apply_node_label('cuda', delete=True) remove_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def notify_master_gpu_enabled(kube_control): """Notify kubernetes-master that we're gpu-enabled. """ kube_control.set_gpu(True)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def notify_master_gpu_not_enabled(kube_control): """Notify kubernetes-master that we're not gpu-enabled. """ kube_control.set_gpu(False)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def request_kubelet_and_proxy_credentials(kube_control): """ Request kubelet node authorization with a well formed kubelet user. This also implies that we are requesting kube-proxy auth. """ # The kube-cotrol interface is created to support RBAC. # At this point we might as well do the right thing and return the hostname # even if it will only be used when we enable RBAC nodeuser = 'system:node:{}'.format(gethostname().lower()) kube_control.set_auth_request(nodeuser)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" nodeuser = 'system:node:{}'.format(gethostname().lower()) creds = kube_control.get_auth_credentials(nodeuser) if creds \ and data_changed('kube-control.creds', creds) \ and creds['user'] == nodeuser: # We need to cache the credentials here because if the # master changes (master leader dies and replaced by a new one) # the new master will have no recollection of our certs. db.set('credentials', creds) set_state('worker.auth.bootstrapped') set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def missing_kube_control(): """Inform the operator they need to add the kube-control relation. If deploying via bundle this won't happen, but if operator is upgrading a a charm in a deployment that pre-dates the kube-control relation, it'll be missing. """ hookenv.status_set( 'blocked', 'Relate {}:kube-control kubernetes-master:kube-control'.format( hookenv.service_name()))
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def fix_iptables_for_docker_1_13(): """ Fix iptables FORWARD policy for Docker >=1.13 https://github.com/kubernetes/kubernetes/issues/40182 https://github.com/kubernetes/kubernetes/issues/39823 """ cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT'] check_call(cmd)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def get_node_name(): # Get all the nodes in the cluster cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path) cmd = cmd.split() deadline = time.time() + 60 while time.time() < deadline: try: raw = check_output(cmd) break except CalledProcessError: hookenv.log('Failed to get node name for node %s.' ' Will retry.' % (gethostname())) time.sleep(1) else: msg = 'Failed to get node name for node %s' % gethostname() raise GetNodeNameFailed(msg) result = json.loads(raw.decode('utf-8')) if 'items' in result: for node in result['items']: if 'status' not in node: continue if 'addresses' not in node['status']: continue # find the hostname for address in node['status']['addresses']: if address['type'] == 'Hostname': if address['address'] == gethostname(): return node['metadata']['name'] # if we didn't match, just bail to the next node break msg = 'Failed to get node name for node %s' % gethostname() raise GetNodeNameFailed(msg)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def _apply_node_label(label, delete=False, overwrite=False): ''' Invoke kubectl to apply node label changes ''' nodename = get_node_name() # TODO: Make this part of the kubectl calls instead of a special string cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}' if delete is True: label_key = label.split('=')[0] cmd = cmd_base.format(kubeconfig_path, nodename, label_key) cmd = cmd + '-' else: cmd = cmd_base.format(kubeconfig_path, nodename, label) if overwrite: cmd = '{} --overwrite'.format(cmd) cmd = cmd.split() deadline = time.time() + 60 while time.time() < deadline: code = subprocess.call(cmd) if code == 0: break hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % ( label, code)) time.sleep(1) else: msg = 'Failed to apply label %s' % label raise ApplyNodeLabelFailed(msg)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def mm(self, barparam): ''' @param barparam: this is barparam '''
aptana/Pydev
[ 239, 85, 239, 6, 1250792405 ]
def test_load_tsv_file(self): self.assertEqual(self.TSV_CONTENTS, generator_utils.load_tsv_file( os.path.join(SRC_DIR, "tools/traffic_annotation/scripts/test_data/fake_annotations.tsv"), False))
chromium/chromium
[ 14247, 5365, 14247, 62, 1517864132 ]
def test_xml_parser_build_placeholders(self): xml_parser = generator_utils.XMLParser( os.path.join(TESTS_DIR, "fake_grouping.xml"), self.ANNOTATIONS_MAPPING) self.assertEqual(self.PLACEHOLDERS, xml_parser.build_placeholders())
chromium/chromium
[ 14247, 5365, 14247, 62, 1517864132 ]
def test_find_last_index(self): last_index = generator_utils.find_last_index(self.DOC_JSON) self.assertEqual(2066, last_index)
chromium/chromium
[ 14247, 5365, 14247, 62, 1517864132 ]
def test_find_bold_ranges(self): expected_bold_ranges = [(1843, 1855), (1859, 1867), (1871, 1876), (1880, 1889), (1893, 1900), (1918, 1930), (1934, 1942), (1968, 1975), (1946, 1951), (1955, 1964), (2001, 2013), (2017, 2025), (2029, 2034), (2038, 2047), (2051, 2058)] bold_ranges = generator_utils.find_bold_ranges(self.DOC_JSON) self.assertItemsEqual(expected_bold_ranges, bold_ranges)
chromium/chromium
[ 14247, 5365, 14247, 62, 1517864132 ]
def get_current_request_hostname(): """ This method will return the hostname that was used in the current Django request """ hostname = None request = get_current_request() if request: hostname = request.META.get('HTTP_HOST') return hostname
edx/edx-platform
[ 6290, 3437, 6290, 280, 1369945238 ]
def syntax(self): return "[-v]"
openhatch/oh-mainline
[ 238, 323, 238, 203, 1321317235 ]
def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--verbose", "-v", dest="verbose", action="store_true", help="also display twisted/python/platform info (useful for bug reports)")
openhatch/oh-mainline
[ 238, 323, 238, 203, 1321317235 ]
def regions(): """ Get all available regions for the AWS CloudHSM service. :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.cloudhsm.layer1 import CloudHSMConnection return get_regions('cloudhsm', connection_cls=CloudHSMConnection)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self, name="", point=None, attributes=None, **kwargs): self.name = name if point is not None: self.point = Point(point) if attributes is None: attributes = {} self.attributes = dict(attributes, **kwargs)
golismero/golismero
[ 814, 275, 814, 29, 1375181550 ]
def __getitem__(self, index): """Backwards compatibility with geopy 0.93 tuples.""" return (self.name, self.point)[index]
golismero/golismero
[ 814, 275, 814, 29, 1375181550 ]
def __repr__(self): return "Location(%r, %r)" % (self.name, self.point)
golismero/golismero
[ 814, 275, 814, 29, 1375181550 ]
def __iter__(self): return iter((self.name, self.point))
golismero/golismero
[ 814, 275, 814, 29, 1375181550 ]
def __eq__(self, other): return (self.name, self.point) == (other.name, other.point)
golismero/golismero
[ 814, 275, 814, 29, 1375181550 ]
def __ne__(self, other): return (self.name, self.point) != (other.name, other.point)
golismero/golismero
[ 814, 275, 814, 29, 1375181550 ]
def mknet(mkbn=lambda chan: df.BatchNormalization(chan, 0.95)): kw = dict(mkbn=mkbn) net = df.Sequential( # -> 128x48 df.SpatialConvolutionCUDNN(3, 64, (7,7), border='same', bias=None), dfext.resblock(64, **kw), df.PoolingCUDNN((2,2)), # -> 64x24 dfext.resblock(64, **kw), dfext.resblock(64, **kw), dfext.resblock(64, 96, **kw), df.PoolingCUDNN((2,2)), # -> 32x12 dfext.resblock(96, **kw), dfext.resblock(96, **kw), df.PoolingCUDNN((2,2)), # -> 16x6 dfext.resblock(96, **kw), dfext.resblock(96, **kw), dfext.resblock(96, 128, **kw), df.PoolingCUDNN((2,2)), # -> 8x3 dfext.resblock(128, **kw), dfext.resblock(128, **kw), df.PoolingCUDNN((2,3)), # -> 4x1 dfext.resblock(128, **kw), # Eq. to flatten + linear df.SpatialConvolutionCUDNN(128, 256, (4,1), bias=None), mkbn(256), df.ReLU(), df.StoreOut(df.SpatialConvolutionCUDNN(256, 128, (1,1))) ) net.emb_mod = net[-1] net.in_shape = (128, 48) net.scale_factor = (2*2*2*2*2, 2*2*2*2*3) print("Net has {:.2f}M params".format(df.utils.count_params(net)/1000/1000), flush=True) return net
VisualComputingInstitute/towards-reid-tracking
[ 82, 18, 82, 1, 1494600159 ]
def __init__(self, logdir): self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME self.is_recording = False self.video_writer = video_writing.VideoWriter( self.PLUGIN_LOGDIR, outputs=[video_writing.FFmpegVideoOutput, video_writing.PNGVideoOutput]) self.last_image_shape = [] self.last_update_time = time.time() self.config_last_modified_time = -1 self.previous_config = dict(DEFAULT_CONFIG) if not os.path.exists(self.PLUGIN_LOGDIR + '/config.pkl'): os.makedirs(self.PLUGIN_LOGDIR) write_pickle(DEFAULT_CONFIG, '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)) # self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
lanpa/tensorboardX
[ 7545, 867, 7545, 76, 1497362059 ]
def _write_summary(self, frame): '''Writes the frame to disk as a tensor summary.''' path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME) smd = SummaryMetadata() tensor = TensorProto( dtype='DT_FLOAT', float_val=frame.reshape(-1).tolist(), tensor_shape=TensorShapeProto( dim=[TensorShapeProto.Dim(size=frame.shape[0]), TensorShapeProto.Dim(size=frame.shape[1]), TensorShapeProto.Dim(size=frame.shape[2])] ) ) summary = Summary(value=[Summary.Value( tag=TAG_NAME, metadata=smd, tensor=tensor)]).SerializeToString() write_file(summary, path)
lanpa/tensorboardX
[ 7545, 867, 7545, 76, 1497362059 ]
def stats(tensor_and_name): imgstats = [] for (img, name) in tensor_and_name: immax = img.max() immin = img.min() imgstats.append( { 'height': img.shape[0], 'max': str(immax), 'mean': str(img.mean()), 'min': str(immin), 'name': name, 'range': str(immax - immin), 'shape': str((img.shape[1], img.shape[2])) }) return imgstats
lanpa/tensorboardX
[ 7545, 867, 7545, 76, 1497362059 ]
def _enough_time_has_passed(self, FPS): '''For limiting how often frames are computed.''' if FPS == 0: return False else: earliest_time = self.last_update_time + (1.0 / FPS) return time.time() >= earliest_time
lanpa/tensorboardX
[ 7545, 867, 7545, 76, 1497362059 ]
def _update_recording(self, frame, config): '''Adds a frame to the current video output.''' # pylint: disable=redefined-variable-type should_record = config['is_recording'] if should_record: if not self.is_recording: self.is_recording = True print('Starting recording using %s', self.video_writer.current_output().name()) self.video_writer.write_frame(frame) elif self.is_recording: self.is_recording = False self.video_writer.finish() print('Finished recording')
lanpa/tensorboardX
[ 7545, 867, 7545, 76, 1497362059 ]
def update(self, trainable=None, arrays=None, frame=None): '''Creates a frame and writes it to disk. Args: trainable: a list of namedtuple (tensors, name). arrays: a list of namedtuple (tensors, name). frame: lalala ''' new_config = self._get_config() if True or self._enough_time_has_passed(self.previous_config['FPS']): # self.visualizer.update(new_config) self.last_update_time = time.time() final_image = self._update_frame( trainable, arrays, frame, new_config) self._update_recording(final_image, new_config)
lanpa/tensorboardX
[ 7545, 867, 7545, 76, 1497362059 ]
def _compute_scheduled_date(self): for rself in self: if rself.interval_type not in [ "transferring_started", "transferring_finished", ]: return super(EventMailScheduler, rself)._compute_scheduled_date() if rself.event_id.state not in ["confirm", "done"]: rself.scheduled_date = False else: date, sign = rself.event_id.create_date, 1 rself.scheduled_date = datetime.strptime( date, tools.DEFAULT_SERVER_DATETIME_FORMAT ) + _INTERVALS[rself.interval_unit](sign * rself.interval_nbr)
it-projects-llc/website-addons
[ 122, 304, 122, 13, 1431434015 ]
def findLadders(self, beginWord, endWord, wordlist): """ :type beginWord: str :type endWord: str :type wordlist: Set[str] :rtype: List[List[int]] """
gavinfish/leetcode-share
[ 181, 133, 181, 1, 1449815478 ]
def bfs(front_level, end_level, is_forward, word_set, path_dic): if len(front_level) == 0: return False if len(front_level) > len(end_level): return bfs(end_level, front_level, not is_forward, word_set, path_dic) for word in (front_level | end_level): word_set.discard(word) next_level = set() done = False while front_level: word = front_level.pop() for c in 'abcdefghijklmnopqrstuvwxyz': for i in range(len(word)): new_word = word[:i] + c + word[i + 1:] if new_word in end_level: done = True add_path(word, new_word, is_forward, path_dic) else: if new_word in word_set: next_level.add(new_word) add_path(word, new_word, is_forward, path_dic) return done or bfs(next_level, end_level, is_forward, word_set, path_dic)
gavinfish/leetcode-share
[ 181, 133, 181, 1, 1449815478 ]
def add_path(word, new_word, is_forward, path_dic): if is_forward: path_dic[word] = path_dic.get(word, []) + [new_word] else: path_dic[new_word] = path_dic.get(new_word, []) + [word]
gavinfish/leetcode-share
[ 181, 133, 181, 1, 1449815478 ]
def construct_path(word, end_word, path_dic, path, paths): if word == end_word: paths.append(path) return if word in path_dic: for item in path_dic[word]: construct_path(item, end_word, path_dic, path + [item], paths)
gavinfish/leetcode-share
[ 181, 133, 181, 1, 1449815478 ]
def _prepare_docstring(s, ignore=1): # type: (unicode, int) -> List[unicode] """Convert a docstring into lines of parseable reST. Remove common leading indentation, where the indentation of a given number of lines (usually just one) is ignored. Return the docstring as a list of lines usable for inserting into a docutils ViewList (used as argument of nested_parse().) An empty line is added to act as a separator between this docstring and following content. """ lines = s.expandtabs().splitlines() # Find minimum indentation of any non-blank lines after ignored lines. margin = sys.maxsize for line in lines[ignore:]: content = len(line.lstrip()) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation from ignored lines. for i in range(ignore): if i < len(lines): lines[i] = lines[i].lstrip() if margin < sys.maxsize: for i in range(ignore, len(lines)): lines[i] = lines[i][margin:] # Remove any leading blank lines. while lines and not lines[0]: lines.pop(0) # make sure there is an empty line at the end if lines and lines[-1]: lines.append('') return lines
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def __init__(self, docstring=None, what='', name='', obj=None, options=None): if not what: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif isinstance(obj, collections.Callable): # type: ignore what = 'function' else: what = 'object' if docstring is None: if obj is None: raise "If docstring is None, obj may not be" docstring = obj.__doc__ self._what = what self._name = name self._obj = obj if isinstance(docstring, str): docstring = _prepare_docstring(docstring) self._lines = docstring self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip()) self._parsed_lines = [] # type: List[unicode] self._is_in_section = False self._section_indent = 0 self._directive_sections = [] # type: List[unicode] self._entry_sections = { 'args': self._parse_fields_section, 'attributes': self._parse_fields_section, 'returns': self._parse_fields_section, 'yields': self._parse_fields_section, 'example args': self._parse_fields_section, } # type: Dict[unicode, Callable] self._freeform_sections = { 'example': self._parse_generic_section, 'examples': self._parse_generic_section, 'example returns': self._parse_generic_section, 'note': self._parse_generic_section, 'references': self._parse_generic_section, 'see also': self._parse_generic_section, 'todo': self._parse_generic_section, } # type: Dict[unicode, Callable] self._sections = { name: value for name, value in [*self._entry_sections.items(), *self._freeform_sections.items()] } self._parsed_dicts = { name: [] for name in self._entry_sections.keys() } self._parse()
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def result(self): # type: () -> List[unicode] """Return the parsed lines of the docstring in reStructuredText format. Returns ------- list(str) The lines of the docstring in a list. """ return {'sections': self._parsed_lines, **self._parsed_dicts}
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _consume_contiguous(self): # type: () -> List[unicode] lines = [] while (self._line_iter.has_next() and self._line_iter.peek() and not self._is_section_header()): lines.append(next(self._line_iter)) # type: ignore return lines
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _consume_field(self, parse_type=True, prefer_type=False): # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]] line = next(self._line_iter) # type: ignore before, colon, after = self._partition_field_on_colon(line) _name, _type, _desc = before, '', after # type: unicode, unicode, unicode if parse_type: match = _google_typed_arg_regex.match(before) # type: ignore if match: _name = match.group(1) _type = match.group(2) _name = self._escape_args_and_kwargs(_name) if prefer_type and not _type: _type, _name = _name, _type indent = self._get_indent(line) + 1 _descs = [_desc] + self._dedent(self._consume_indented_block(indent)) return _name, _type, _descs
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _consume_section_header(self): # type: () -> unicode section = next(self._line_iter) # type: ignore stripped_section = section.strip(':') if stripped_section.lower() in self._sections: section = stripped_section return section
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _consume_to_next_section(self): # type: () -> List[unicode] self._consume_empty() lines = [] while not self._is_section_break(): lines.append(next(self._line_iter)) # type: ignore return lines + self._consume_empty()
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _escape_args_and_kwargs(self, name): # type: (unicode) -> unicode if name[:2] == '**': return r'\*\*' + name[2:] elif name[:1] == '*': return r'\*' + name[1:] else: return name
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _get_current_indent(self, peek_ahead=0): # type: (int) -> int line = self._line_iter.peek(peek_ahead + 1)[peek_ahead] while line != self._line_iter.sentinel: if line: return self._get_indent(line) peek_ahead += 1 line = self._line_iter.peek(peek_ahead + 1)[peek_ahead] return 0
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _get_initial_indent(self, lines): # type: (List[unicode]) -> int for line in lines: if line: return self._get_indent(line) return 0
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _indent(self, lines, n=4): # type: (List[unicode], int) -> List[unicode] return [(' ' * n) + line for line in lines]
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _is_list(self, lines): # type: (List[unicode]) -> bool if not lines: return False if _bullet_list_regex.match(lines[0]): # type: ignore return True if _enumerated_list_regex.match(lines[0]): # type: ignore return True if len(lines) < 2 or lines[0].endswith('::'): return False indent = self._get_indent(lines[0]) next_indent = indent for line in lines[1:]: if line: next_indent = self._get_indent(line) break return next_indent > indent
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _is_section_break(self): # type: () -> bool line = self._line_iter.peek() return (not self._line_iter.has_next() or self._is_section_header() or (self._is_in_section and line and not self._is_indented(line, self._section_indent)))
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _parse_fields_section(self): # type: (unicode) -> List[unicode] fields = self._consume_fields() # type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA lines = [] for _name, _type, _desc in fields: _desc = self._strip_empty(_desc) if any(_desc): _desc = self._fix_field_desc(_desc) lines.append((_name, _type, _desc)) return lines
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def _partition_field_on_colon(self, line): # type: (unicode) -> Tuple[unicode, unicode, unicode] before_colon = [] after_colon = [] colon = '' found_colon = False for i, source in enumerate(_xref_regex.split(line)): # type: ignore if found_colon: after_colon.append(source) else: m = _single_colon_regex.search(source) if (i % 2) == 0 and m: found_colon = True colon = source[m.start(): m.end()] before_colon.append(source[:m.start()]) after_colon.append(source[m.end():]) else: before_colon.append(source) return ("".join(before_colon).strip(), colon, "".join(after_colon).strip())
ajbouh/tfi
[ 154, 12, 154, 3, 1505258255 ]
def __init__(self): self._client = MattermostClient( bot_settings.BOT_URL, bot_settings.BOT_TEAM, bot_settings.BOT_LOGIN, bot_settings.BOT_PASSWORD, bot_settings.SSL_VERIFY ) self._plugins = PluginsManager() self._plugins.init_plugins() self._dispatcher = MessageDispatcher(self._client, self._plugins)
seLain/mattermost_bot
[ 5, 1, 5, 7, 1505455582 ]
def update(levels): vis = np.zeros((cvImg.height, cvImg.width, 3), np.uint8) levels = levels - 3 cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255), 3, cv2.CV_AA, hierarchy, abs(levels) ) cv2.imshow('contours', vis)
hasadna/OpenPress
[ 2, 8, 2, 21, 1383756546 ]
def init_app(app): pass
miguelgrinberg/flasky
[ 8080, 4112, 8080, 19, 1387863492 ]
def init_app(cls, app): Config.init_app(app) # email errors to the administrators import logging from logging.handlers import SMTPHandler credentials = None secure = None if getattr(cls, 'MAIL_USERNAME', None) is not None: credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD) if getattr(cls, 'MAIL_USE_TLS', None): secure = () mail_handler = SMTPHandler( mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT), fromaddr=cls.FLASKY_MAIL_SENDER, toaddrs=[cls.FLASKY_ADMIN], subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error', credentials=credentials, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler)
miguelgrinberg/flasky
[ 8080, 4112, 8080, 19, 1387863492 ]
def init_app(cls, app): ProductionConfig.init_app(app) # handle reverse proxy server headers try: from werkzeug.middleware.proxy_fix import ProxyFix except ImportError: from werkzeug.contrib.fixers import ProxyFix app.wsgi_app = ProxyFix(app.wsgi_app) # log to stderr import logging from logging import StreamHandler file_handler = StreamHandler() file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler)
miguelgrinberg/flasky
[ 8080, 4112, 8080, 19, 1387863492 ]
def init_app(cls, app): ProductionConfig.init_app(app) # log to stderr import logging from logging import StreamHandler file_handler = StreamHandler() file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler)
miguelgrinberg/flasky
[ 8080, 4112, 8080, 19, 1387863492 ]
def init_app(cls, app): ProductionConfig.init_app(app) # log to syslog import logging from logging.handlers import SysLogHandler syslog_handler = SysLogHandler() syslog_handler.setLevel(logging.INFO) app.logger.addHandler(syslog_handler)
miguelgrinberg/flasky
[ 8080, 4112, 8080, 19, 1387863492 ]
def run_app(uri, type, parameter, **kwargs): kwargs = {key: value for key, value in kwargs.items() if value is not None} do = try_load(uri, type, parameters=parameter) do = try_apply_sql(do, kwargs) with qt_app(): from boadata.gui.qt import DataObjectWindow window = DataObjectWindow(do) window.show() window.setWindowTitle(do.uri)
janpipek/boadata
[ 3, 1, 3, 10, 1410266406 ]
def higgs_signalstrength(wc_obj, par, name_prod, name_dec): scale = flavio.config['renormalization scale']['hdecays'] C = wc_obj.get_wcxf(sector='all', scale=scale, par=par, eft='SMEFT', basis='Warsaw') f_prod = getattr(production, name_prod) f_dec = getattr(decay, name_dec) return f_prod(C) * f_dec(C) / width.Gamma_h(par, C)
flav-io/flavio
[ 66, 57, 66, 23, 1453802592 ]
def obs_fct(wc_obj, par): return higgs_signalstrength(wc_obj, par, name_prod, name_dec)
flav-io/flavio
[ 66, 57, 66, 23, 1453802592 ]
def __init__( self, plotly_name="color", parent_name="surface.hoverlabel.font", **kwargs
plotly/plotly.py
[ 13052, 2308, 13052, 1319, 1385013188 ]
def clear(self) -> None: raise NotImplementedError("implement in derived class")
ap--/python-seabreeze
[ 172, 72, 172, 35, 1413502221 ]
def get_number_of_elements(self) -> int: raise NotImplementedError("implement in derived class")
ap--/python-seabreeze
[ 172, 72, 172, 35, 1413502221 ]
def set_buffer_capacity(self, capacity: int) -> None: raise NotImplementedError("implement in derived class")
ap--/python-seabreeze
[ 172, 72, 172, 35, 1413502221 ]
def __unicode__(self): return self.name
bootcamptropa/django
[ 7, 6, 7, 1, 1447057534 ]
def setUp(self): # There will be a lot of warnings in the feature calculators. # Just ignore all of them in these tests warnings.simplefilter("ignore")
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def assertIsNaN(self, result): self.assertTrue(np.isnan(result), msg="{} is not np.NaN")
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def assertTrueOnAllArrayTypes(self, f, input_to_f, *args, **kwargs): self.assertTrue(f(input_to_f, *args, **kwargs), msg="Not true for lists") self.assertTrue( f(np.array(input_to_f), *args, **kwargs), msg="Not true for numpy.arrays" ) self.assertTrue( f(pd.Series(input_to_f), *args, **kwargs), msg="Not true for pandas.Series" )
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def assertFalseOnAllArrayTypes(self, f, input_to_f, *args, **kwargs): self.assertFalse(f(input_to_f, *args, **kwargs), msg="Not false for lists") self.assertFalse( f(np.array(input_to_f), *args, **kwargs), msg="Not false for numpy.arrays" ) self.assertFalse( f(pd.Series(input_to_f), *args, **kwargs), msg="Not false for pandas.Series" )
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def assertAlmostEqualOnAllArrayTypes(self, f, input_to_f, result, *args, **kwargs): expected_result = f(input_to_f, *args, **kwargs) self.assertAlmostEqual( expected_result, result, msg="Not almost equal for lists: {} != {}".format(expected_result, result), ) expected_result = f(np.array(input_to_f), *args, **kwargs) self.assertAlmostEqual( expected_result, result, msg="Not almost equal for numpy.arrays: {} != {}".format( expected_result, result ), ) expected_result = f(pd.Series(input_to_f, dtype="float64"), *args, **kwargs) self.assertAlmostEqual( expected_result, result, msg="Not almost equal for pandas.Series: {} != {}".format( expected_result, result ), )
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def assertEqualPandasSeriesWrapper(self, f, input_to_f, result, *args, **kwargs): self.assertEqual( f(pd.Series(input_to_f), *args, **kwargs), result, msg="Not equal for pandas.Series: {} != {}".format( f(pd.Series(input_to_f), *args, **kwargs), result ), )
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def test___get_length_sequences_where(self): self.assertEqualOnAllArrayTypes( _get_length_sequences_where, [0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1], [1, 3, 1, 2], ) self.assertEqualOnAllArrayTypes( _get_length_sequences_where, [0, True, 0, 0, True, True, True, 0, 0, True, 0, True, True], [1, 3, 1, 2], ) self.assertEqualOnAllArrayTypes( _get_length_sequences_where, [0, True, 0, 0, 1, True, 1, 0, 0, True, 0, 1, True], [1, 3, 1, 2], ) self.assertEqualOnAllArrayTypes(_get_length_sequences_where, [0] * 10, [0]) self.assertEqualOnAllArrayTypes(_get_length_sequences_where, [], [0])
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def test_variance_larger_than_standard_deviation(self): self.assertFalseOnAllArrayTypes( variance_larger_than_standard_deviation, [-1, -1, 1, 1, 1] ) self.assertTrueOnAllArrayTypes( variance_larger_than_standard_deviation, [-1, -1, 1, 1, 2] )
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def test_symmetry_looking(self): self.assertAllTrueOnAllArrayTypes( symmetry_looking, [-1, -1, 1, 1], [dict(r=0.05), dict(r=0.75)] ) self.assertAllFalseOnAllArrayTypes( symmetry_looking, [-1, -1, 1, 1], [dict(r=0)] ) self.assertAllFalseOnAllArrayTypes( symmetry_looking, [-1, -1, -1, -1, 1], [dict(r=0.05)] ) self.assertAllTrueOnAllArrayTypes( symmetry_looking, [-2, -2, -2, -1, -1, -1], [dict(r=0.05)] ) self.assertAllTrueOnAllArrayTypes( symmetry_looking, [-0.9, -0.900001], [dict(r=0.05)] )
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]
def test_has_duplicate_min(self): self.assertTrueOnAllArrayTypes(has_duplicate_min, [-2.1, 0, 0, -2.1, 1.1]) self.assertFalseOnAllArrayTypes(has_duplicate_min, [2.1, 0, -1, 2, 1.1]) self.assertTrueOnAllArrayTypes(has_duplicate_min, np.array([1, 1, 1, 1])) self.assertFalseOnAllArrayTypes(has_duplicate_min, np.array([0])) self.assertTrueOnAllArrayTypes(has_duplicate_min, np.array([1, 1]))
blue-yonder/tsfresh
[ 7135, 1120, 7135, 61, 1477481357 ]